max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
mwtab/mwschema.py | MoseleyBioinformaticsLab/mwtab | 7 | 12900 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
mwtab.mwschema
~~~~~~~~~~~~~~
This module provides schema definitions for different sections of the
``mwTab`` Metabolomics Workbench format.
"""
import sys
from schema import Schema, Optional, Or
if sys.version_info.major == 2:
str = unicode
metabolomics_workbench_schema = Schema(
{
"VERSION": str,
"CREATED_ON": str,
Optional("STUDY_ID"): str,
Optional("ANALYSIS_ID"): str,
Optional("PROJECT_ID"): str,
Optional("HEADER"): str,
Optional("DATATRACK_ID"): str
}
)
project_schema = Schema(
{
"PROJECT_TITLE": str,
Optional("PROJECT_TYPE"): str,
"PROJECT_SUMMARY": str,
"INSTITUTE": str,
Optional("DEPARTMENT"): str,
Optional("LABORATORY"): str,
"LAST_NAME": str,
"FIRST_NAME": str,
"ADDRESS": str,
"EMAIL": str,
"PHONE": str,
Optional("FUNDING_SOURCE"): str,
Optional("PROJECT_COMMENTS"): str,
Optional("PUBLICATIONS"): str,
Optional("CONTRIBUTORS"): str,
Optional("DOI"): str
}
)
study_schema = Schema(
{
"STUDY_TITLE": str,
Optional("STUDY_TYPE"): str,
"STUDY_SUMMARY": str,
"INSTITUTE": str,
Optional("DEPARTMENT"): str,
Optional("LABORATORY"): str,
"LAST_NAME": str,
"FIRST_NAME": str,
"ADDRESS": str,
"EMAIL": str,
"PHONE": str,
Optional("NUM_GROUPS"): str,
Optional("TOTAL_SUBJECTS"): str,
Optional("NUM_MALES"): str,
Optional("NUM_FEMALES"): str,
Optional("STUDY_COMMENTS"): str,
Optional("PUBLICATIONS"): str, # assumed
Optional("SUBMIT_DATE"): str # assumed
}
)
subject_schema = Schema(
{
"SUBJECT_TYPE": str,
"SUBJECT_SPECIES": str,
Optional("TAXONOMY_ID"): str,
Optional("GENOTYPE_STRAIN"): str,
Optional("AGE_OR_AGE_RANGE"): str,
Optional("WEIGHT_OR_WEIGHT_RANGE"): str,
Optional("HEIGHT_OR_HEIGHT_RANGE"): str,
Optional("GENDER"): str,
Optional("HUMAN_RACE"): str,
Optional("HUMAN_ETHNICITY"): str,
Optional("HUMAN_TRIAL_TYPE"): str,
Optional("HUMAN_LIFESTYLE_FACTORS"): str,
Optional("HUMAN_MEDICATIONS"): str,
Optional("HUMAN_PRESCRIPTION_OTC"): str,
Optional("HUMAN_SMOKING_STATUS"): str,
Optional("HUMAN_ALCOHOL_DRUG_USE"): str,
Optional("HUMAN_NUTRITION"): str,
Optional("HUMAN_INCLUSION_CRITERIA"): str,
Optional("HUMAN_EXCLUSION_CRITERIA"): str,
Optional("ANIMAL_ANIMAL_SUPPLIER"): str,
Optional("ANIMAL_HOUSING"): str,
Optional("ANIMAL_LIGHT_CYCLE"): str,
Optional("ANIMAL_FEED"): str,
Optional("ANIMAL_WATER"): str,
Optional("ANIMAL_INCLUSION_CRITERIA"): str,
Optional("CELL_BIOSOURCE_OR_SUPPLIER"): str,
Optional("CELL_STRAIN_DETAILS"): str,
Optional("SUBJECT_COMMENTS"): str,
Optional("CELL_PRIMARY_IMMORTALIZED"): str,
Optional("CELL_PASSAGE_NUMBER"): str,
Optional("CELL_COUNTS"): str,
Optional("SPECIES_GROUP"): str
}
)
subject_sample_factors_schema = Schema(
[
{
"Subject ID": str,
"Sample ID": str,
"Factors": dict,
Optional("Additional sample data"): {
Optional("RAW_FILE_NAME"): str,
Optional(str): str
}
}
]
)
collection_schema = Schema(
{
"COLLECTION_SUMMARY": str,
Optional("COLLECTION_PROTOCOL_ID"): str,
Optional("COLLECTION_PROTOCOL_FILENAME"): str,
Optional("COLLECTION_PROTOCOL_COMMENTS"): str,
Optional("SAMPLE_TYPE"): str, # assumed optional due to large number of files without
Optional("COLLECTION_METHOD"): str,
Optional("COLLECTION_LOCATION"): str,
Optional("COLLECTION_FREQUENCY"): str,
Optional("COLLECTION_DURATION"): str,
Optional("COLLECTION_TIME"): str,
Optional("VOLUMEORAMOUNT_COLLECTED"): str,
Optional("STORAGE_CONDITIONS"): str,
Optional("COLLECTION_VIALS"): str,
Optional("STORAGE_VIALS"): str,
Optional("COLLECTION_TUBE_TEMP"): str,
Optional("ADDITIVES"): str,
Optional("BLOOD_SERUM_OR_PLASMA"): str,
Optional("TISSUE_CELL_IDENTIFICATION"): str,
Optional("TISSUE_CELL_QUANTITY_TAKEN"): str
}
)
treatment_schema = Schema(
{
"TREATMENT_SUMMARY": str,
Optional("TREATMENT_PROTOCOL_ID"): str,
Optional("TREATMENT_PROTOCOL_FILENAME"): str,
Optional("TREATMENT_PROTOCOL_COMMENTS"): str,
Optional("TREATMENT"): str,
Optional("TREATMENT_COMPOUND"): str,
Optional("TREATMENT_ROUTE"): str,
Optional("TREATMENT_DOSE"): str,
Optional("TREATMENT_DOSEVOLUME"): str,
Optional("TREATMENT_DOSEDURATION"): str,
Optional("TREATMENT_VEHICLE"): str,
Optional("ANIMAL_VET_TREATMENTS"): str,
Optional("ANIMAL_ANESTHESIA"): str,
Optional("ANIMAL_ACCLIMATION_DURATION"): str,
Optional("ANIMAL_FASTING"): str,
Optional("ANIMAL_ENDP_EUTHANASIA"): str,
Optional("ANIMAL_ENDP_TISSUE_COLL_LIST"): str,
Optional("ANIMAL_ENDP_TISSUE_PROC_METHOD"): str,
Optional("ANIMAL_ENDP_CLINICAL_SIGNS"): str,
Optional("HUMAN_FASTING"): str,
Optional("HUMAN_ENDP_CLINICAL_SIGNS"): str,
Optional("CELL_STORAGE"): str,
Optional("CELL_GROWTH_CONTAINER"): str,
Optional("CELL_GROWTH_CONFIG"): str,
Optional("CELL_GROWTH_RATE"): str,
Optional("CELL_INOC_PROC"): str,
Optional("CELL_MEDIA"): str,
Optional("CELL_ENVIR_COND"): str,
Optional("CELL_HARVESTING"): str,
Optional("PLANT_GROWTH_SUPPORT"): str,
Optional("PLANT_GROWTH_LOCATION"): str,
Optional("PLANT_PLOT_DESIGN"): str,
Optional("PLANT_LIGHT_PERIOD"): str,
Optional("PLANT_HUMIDITY"): str,
Optional("PLANT_TEMP"): str,
Optional("PLANT_WATERING_REGIME"): str,
Optional("PLANT_NUTRITIONAL_REGIME"): str,
Optional("PLANT_ESTAB_DATE"): str,
Optional("PLANT_HARVEST_DATE"): str,
Optional("PLANT_GROWTH_STAGE"): str,
Optional("PLANT_METAB_QUENCH_METHOD"): str,
Optional("PLANT_HARVEST_METHOD"): str,
Optional("PLANT_STORAGE"): str,
Optional("CELL_PCT_CONFLUENCE"): str,
Optional("CELL_MEDIA_LASTCHANGED"): str
}
)
sampleprep_schema = Schema(
{
"SAMPLEPREP_SUMMARY": str,
Optional("SAMPLEPREP_PROTOCOL_ID"): str,
Optional("SAMPLEPREP_PROTOCOL_FILENAME"): str,
Optional("SAMPLEPREP_PROTOCOL_COMMENTS"): str,
Optional("PROCESSING_METHOD"): str,
Optional("PROCESSING_STORAGE_CONDITIONS"): str,
Optional("EXTRACTION_METHOD"): str,
Optional("EXTRACT_CONCENTRATION_DILUTION"): str,
Optional("EXTRACT_ENRICHMENT"): str,
Optional("EXTRACT_CLEANUP"): str,
Optional("EXTRACT_STORAGE"): str,
Optional("SAMPLE_RESUSPENSION"): str,
Optional("SAMPLE_DERIVATIZATION"): str,
Optional("SAMPLE_SPIKING"): str,
Optional("ORGAN"): str,
Optional("ORGAN_SPECIFICATION"): str,
Optional("CELL_TYPE"): str,
Optional("SUBCELLULAR_LOCATION"): str
}
)
chromatography_schema = Schema(
{
Optional("CHROMATOGRAPHY_SUMMARY"): str,
"CHROMATOGRAPHY_TYPE": str,
"INSTRUMENT_NAME": str,
"COLUMN_NAME": str,
Optional("FLOW_GRADIENT"): str,
Optional("FLOW_RATE"): str,
Optional("COLUMN_TEMPERATURE"): str,
Optional("METHODS_FILENAME"): str,
Optional("SOLVENT_A"): str,
Optional("SOLVENT_B"): str,
Optional("METHODS_ID"): str,
Optional("COLUMN_PRESSURE"): str,
Optional("INJECTION_TEMPERATURE"): str,
Optional("INTERNAL_STANDARD"): str,
Optional("INTERNAL_STANDARD_MT"): str,
Optional("RETENTION_INDEX"): str,
Optional("RETENTION_TIME"): str,
Optional("SAMPLE_INJECTION"): str,
Optional("SAMPLING_CONE"): str,
Optional("ANALYTICAL_TIME"): str,
Optional("CAPILLARY_VOLTAGE"): str,
Optional("MIGRATION_TIME"): str,
Optional("OVEN_TEMPERATURE"): str,
Optional("PRECONDITIONING"): str,
Optional("RUNNING_BUFFER"): str,
Optional("RUNNING_VOLTAGE"): str,
Optional("SHEATH_LIQUID"): str,
Optional("TIME_PROGRAM"): str,
Optional("TRANSFERLINE_TEMPERATURE"): str,
Optional("WASHING_BUFFER"): str,
Optional("WEAK_WASH_SOLVENT_NAME"): str,
Optional("WEAK_WASH_VOLUME"): str,
Optional("STRONG_WASH_SOLVENT_NAME"): str,
Optional("STRONG_WASH_VOLUME"): str,
Optional("TARGET_SAMPLE_TEMPERATURE"): str,
Optional("SAMPLE_LOOP_SIZE"): str,
Optional("SAMPLE_SYRINGE_SIZE"): str,
Optional("RANDOMIZATION_ORDER"): str,
Optional("CHROMATOGRAPHY_COMMENTS"): str
}
)
analysis_schema = Schema(
{
"ANALYSIS_TYPE": str,
Optional("LABORATORY_NAME"): str,
Optional("OPERATOR_NAME"): str,
Optional("DETECTOR_TYPE"): str,
Optional("SOFTWARE_VERSION"): str,
Optional("ACQUISITION_DATE"): str,
Optional("ANALYSIS_PROTOCOL_FILE"): str,
Optional("ACQUISITION_PARAMETERS_FILE"): str,
Optional("PROCESSING_PARAMETERS_FILE"): str,
Optional("DATA_FORMAT"): str,
# not specified in mwTab specification (assumed)
Optional("ACQUISITION_ID"): str,
Optional("ACQUISITION_TIME"): str,
Optional("ANALYSIS_COMMENTS"): str,
Optional("ANALYSIS_DISPLAY"): str,
Optional("INSTRUMENT_NAME"): str,
Optional("INSTRUMENT_PARAMETERS_FILE"): str,
Optional("NUM_FACTORS"): str,
Optional("NUM_METABOLITES"): str,
Optional("PROCESSED_FILE"): str,
Optional("RANDOMIZATION_ORDER"): str,
Optional("RAW_FILE"): str,
}
)
ms_schema = Schema(
{
"INSTRUMENT_NAME": str,
"INSTRUMENT_TYPE": str,
"MS_TYPE": str,
"ION_MODE": str,
"MS_COMMENTS": str, # changed to required
Optional("CAPILLARY_TEMPERATURE"): str,
Optional("CAPILLARY_VOLTAGE"): str,
Optional("COLLISION_ENERGY"): str,
Optional("COLLISION_GAS"): str,
Optional("DRY_GAS_FLOW"): str,
Optional("DRY_GAS_TEMP"): str,
Optional("FRAGMENT_VOLTAGE"): str,
Optional("FRAGMENTATION_METHOD"): str,
Optional("GAS_PRESSURE"): str,
Optional("HELIUM_FLOW"): str,
Optional("ION_SOURCE_TEMPERATURE"): str,
Optional("ION_SPRAY_VOLTAGE"): str,
Optional("IONIZATION"): str,
Optional("IONIZATION_ENERGY"): str,
Optional("IONIZATION_POTENTIAL"): str,
Optional("MASS_ACCURACY"): str,
Optional("PRECURSOR_TYPE"): str,
Optional("REAGENT_GAS"): str,
Optional("SOURCE_TEMPERATURE"): str,
Optional("SPRAY_VOLTAGE"): str,
Optional("ACTIVATION_PARAMETER"): str,
Optional("ACTIVATION_TIME"): str,
Optional("ATOM_GUN_CURRENT"): str,
Optional("AUTOMATIC_GAIN_CONTROL"): str,
Optional("BOMBARDMENT"): str,
Optional("CDL_SIDE_OCTOPOLES_BIAS_VOLTAGE"): str,
Optional("CDL_TEMPERATURE"): str,
Optional("DATAFORMAT"): str,
Optional("DESOLVATION_GAS_FLOW"): str,
Optional("DESOLVATION_TEMPERATURE"): str,
Optional("INTERFACE_VOLTAGE"): str,
Optional("IT_SIDE_OCTOPOLES_BIAS_VOLTAGE"): str,
Optional("LASER"): str,
Optional("MATRIX"): str,
Optional("NEBULIZER"): str,
Optional("OCTPOLE_VOLTAGE"): str,
Optional("PROBE_TIP"): str,
Optional("RESOLUTION_SETTING"): str,
Optional("SAMPLE_DRIPPING"): str,
Optional("SCAN_RANGE_MOVERZ"): str,
Optional("SCANNING"): str,
Optional("SCANNING_CYCLE"): str,
Optional("SCANNING_RANGE"): str,
Optional("SKIMMER_VOLTAGE"): str,
Optional("TUBE_LENS_VOLTAGE"): str,
Optional("MS_RESULTS_FILE"): Or(str, dict)
}
)
nmr_schema = Schema(
{
"INSTRUMENT_NAME": str,
"INSTRUMENT_TYPE": str,
"NMR_EXPERIMENT_TYPE": str,
Optional("NMR_COMMENTS"): str,
Optional("FIELD_FREQUENCY_LOCK"): str,
Optional("STANDARD_CONCENTRATION"): str,
"SPECTROMETER_FREQUENCY": str,
Optional("NMR_PROBE"): str,
Optional("NMR_SOLVENT"): str,
Optional("NMR_TUBE_SIZE"): str,
Optional("SHIMMING_METHOD"): str,
Optional("PULSE_SEQUENCE"): str,
Optional("WATER_SUPPRESSION"): str,
Optional("PULSE_WIDTH"): str,
Optional("POWER_LEVEL"): str,
Optional("RECEIVER_GAIN"): str,
Optional("OFFSET_FREQUENCY"): str,
Optional("PRESATURATION_POWER_LEVEL"): str,
Optional("CHEMICAL_SHIFT_REF_CPD"): str,
Optional("TEMPERATURE"): str,
Optional("NUMBER_OF_SCANS"): str,
Optional("DUMMY_SCANS"): str,
Optional("ACQUISITION_TIME"): str,
Optional("RELAXATION_DELAY"): str,
Optional("SPECTRAL_WIDTH"): str,
Optional("NUM_DATA_POINTS_ACQUIRED"): str,
Optional("REAL_DATA_POINTS"): str,
Optional("LINE_BROADENING"): str,
Optional("ZERO_FILLING"): str,
Optional("APODIZATION"): str,
Optional("BASELINE_CORRECTION_METHOD"): str,
Optional("CHEMICAL_SHIFT_REF_STD"): str,
Optional("BINNED_INCREMENT"): str,
Optional("BINNED_DATA_NORMALIZATION_METHOD"): str,
Optional("BINNED_DATA_PROTOCOL_FILE"): str,
Optional("BINNED_DATA_CHEMICAL_SHIFT_RANGE"): str,
Optional("BINNED_DATA_EXCLUDED_RANGE"): str
}
)
data_schema = Schema(
[
{
Or("Metabolite", "Bin range(ppm)", only_one=True): str,
Optional(str): str,
},
]
)
extended_schema = Schema(
[
{
"Metabolite": str,
Optional(str): str,
"sample_id": str
},
]
)
ms_metabolite_data_schema = Schema(
{
"Units": str,
"Data": data_schema,
"Metabolites": data_schema,
Optional("Extended"): extended_schema
}
)
nmr_binned_data_schema = Schema(
{
"Units": str,
"Data": data_schema
}
)
section_schema_mapping = {
"METABOLOMICS WORKBENCH": metabolomics_workbench_schema,
"PROJECT": project_schema,
"STUDY": study_schema,
"ANALYSIS": analysis_schema,
"SUBJECT": subject_schema,
"SUBJECT_SAMPLE_FACTORS": subject_sample_factors_schema,
"COLLECTION": collection_schema,
"TREATMENT": treatment_schema,
"SAMPLEPREP": sampleprep_schema,
"CHROMATOGRAPHY": chromatography_schema,
"MS": ms_schema,
"NM": nmr_schema,
"MS_METABOLITE_DATA": ms_metabolite_data_schema,
"NMR_METABOLITE_DATA": ms_metabolite_data_schema,
"NMR_BINNED_DATA": nmr_binned_data_schema,
}
| 2.078125 | 2 |
functions/asmm_xml.py | EUFAR/asmm-eufar | 0 | 12901 | <reponame>EUFAR/asmm-eufar<filename>functions/asmm_xml.py
import datetime
import xml.dom.minidom
import logging
from PyQt5 import QtCore, QtWidgets
from functions.button_functions import add_read
NAMESPACE_URI = 'http://www.eufar.net/ASMM'
def create_asmm_xml(self, out_file_name):
logging.debug('asmm_xml.py - create_asmm_xml - out_file_name ' + out_file_name)
doc = xml.dom.minidom.Document()
doc_root = add_element(doc, "MissionMetadata", doc)
doc_root.setAttribute("xmlns:asmm", NAMESPACE_URI)
current_date = datetime.date.isoformat(datetime.date.today())
if not self.create_date:
self.create_date = current_date
add_element(doc, "CreationDate", doc_root, self.create_date)
add_element(doc, "RevisionDate", doc_root, current_date)
############################
# Flight Information
############################
flightInformation = add_element(doc, "FlightInformation", doc_root)
add_element(doc, "FlightNumber", flightInformation, self.flightNumber_ln.text())
add_element(doc, "Date", flightInformation, self.date_dt.date().toString(QtCore.Qt.ISODate))
add_element(doc, "ProjectAcronym", flightInformation, self.projectAcronym_ln.text())
add_element(doc, "MissionScientist", flightInformation, self.missionSci_ln.text())
add_element(doc, "FlightManager", flightInformation, self.flightManager_ln.text())
operator = self.operator_cb.currentText()
aircraft = self.aircraft_cb.currentText()
country = ''
manufacturer = ''
registration = ''
if operator == 'Other...':
operator = self.newOperator_ln.text()
aircraft = self.newAircraft_ln.text()
registration = self.newRegistration_ln.text()
manufacturer = self.newManufacturer_ln.text()
if self.newCountry_cb.currentText() != 'Make a choice...':
country = self.newCountry_cb.currentText()
elif operator != 'Make a choice...':
if aircraft != 'Make a choice...':
index = -1
index = aircraft.find(' - ')
if (index != -1):
registration = aircraft[index + 3:]
if len(registration) > 3:
aircraft = aircraft[0:index]
for i in range(len(self.new_operators_aircraft)):
if registration != '' and len(registration) > 3:
if registration == self.new_operators_aircraft[i][2]:
index = self.new_operators_aircraft[i][1].find(', ');
manufacturer = self.new_operators_aircraft[i][1][: index]
country = self.new_operators_aircraft[i][3]
break
else:
index = self.new_operators_aircraft[i][1].find(', ');
aircraft_from_table = self.new_operators_aircraft[i][1][index + 2:]
if aircraft == aircraft_from_table:
manufacturer = self.new_operators_aircraft[i][1][: index]
country = self.new_operators_aircraft[i][3]
registration = self.new_operators_aircraft[i][2]
break
else:
aircraft = ''
else:
operator = ''
aircraft = ''
for key, value in self.new_country_code.items():
if value == country:
country = key
break
add_element(doc, "Platform", flightInformation, aircraft)
add_element(doc, "Operator", flightInformation, operator)
add_element(doc, "OperatorCountry", flightInformation, country)
add_element(doc, "Manufacturer", flightInformation, manufacturer)
add_element(doc, "RegistrationNumber", flightInformation, registration)
if self.location_cb.currentText() == "Make a choice...":
add_element(doc, "Localisation", flightInformation, "")
elif self.detailList.currentText() == "Make a choice...":
add_element(doc, "Localisation", flightInformation, "")
else:
add_element(doc, "Localisation", flightInformation, self.detailList.currentText())
###########################
# Metadata Contact Info
###########################
contactInfo = add_element(doc, "ContactInfo", doc_root)
add_element(doc, "ContactName", contactInfo, self.contactName_ln.text())
if self.contact_cb.currentText() == 'Make a choice...':
add_element(doc, "ContactRole", contactInfo, '')
else:
add_element(doc, "ContactRole", contactInfo, self.contact_cb.currentText())
add_element(doc, "ContactEmail", contactInfo, self.contactEmail_ln.text())
############################
# Scientific Aims
############################
scientificAims = add_element(doc, "ScientificAims", doc_root)
add_check_elements(doc, self.scientific_aims_check_dict, "SA_Code", scientificAims)
if self.sa_ck_list:
for i in range(self.gridLayout_5.count()):
if isinstance(self.gridLayout_5.itemAt(i).widget(), QtWidgets.QCheckBox):
if self.gridLayout_5.itemAt(i).widget().isChecked():
add_element(doc,"SA_User", scientificAims, self.gridLayout_5.itemAt(i).widget().
text())
add_element(doc, "SA_Other", scientificAims, self.SAOtherTextBox.toPlainText())
############################
# Geographical Region
############################
geographicalRegion = add_element(doc, "GeographicalRegion", doc_root)
geographicBoundingBox = add_element(doc, "GeographicBoundingBox", geographicalRegion)
add_element(doc, "westBoundLongitude", geographicBoundingBox, self.westBoundLongitudeLine.text())
add_element(doc, "eastBoundLongitude", geographicBoundingBox, self.eastBoundLongitudeLine.text())
add_element(doc, "northBoundLatitude", geographicBoundingBox, self.northBoundLatitudeLine.text())
add_element(doc, "southBoundLatitude", geographicBoundingBox, self.southBoundLatitudeLine.text())
add_element(doc, "minAltitude", geographicBoundingBox, self.minAltitudeLine.text())
add_element(doc, "maxAltitude", geographicBoundingBox, self.maxAltitudeLine.text())
add_check_elements(doc, self.geographical_region_check_dict, "GR_Code", geographicalRegion)
if self.gr_ck_list:
for i in range(self.gridLayout_8.count()):
if isinstance(self.gridLayout_8.itemAt(i).widget(), QtWidgets.QCheckBox):
if self.gridLayout_8.itemAt(i).widget().isChecked():
add_element(doc,"GR_User", geographicalRegion, self.gridLayout_8.itemAt(i).
widget().text())
add_element(doc, "GR_Other", geographicalRegion, self.GROtherTextBox.toPlainText())
############################
# Atmospheric Features
############################
atmosphericFeatures = add_element(doc, "AtmosFeatures", doc_root)
add_check_elements(doc, self.atmospheric_features_check_dict, "AF_Code", atmosphericFeatures)
if self.af_ck_list:
for i in range(self.gridLayout_9.count()):
if isinstance(self.gridLayout_9.itemAt(i).widget(), QtWidgets.QCheckBox):
if self.gridLayout_9.itemAt(i).widget().isChecked():
add_element(doc,"AF_User", atmosphericFeatures, self.gridLayout_9.itemAt(i).
widget().text())
add_element(doc, "AF_Other", atmosphericFeatures, self.AFOtherTextBox.toPlainText())
############################
# Cloud Types
############################
cloudTypes = add_element(doc, "CloudTypes", doc_root)
add_check_elements(doc, self.cloud_types_check_dict, "CT_Code", cloudTypes)
if self.ct_ck_list:
for i in range(self.gridLayout_10.count()):
if isinstance(self.gridLayout_10.itemAt(i).widget(), QtWidgets.QCheckBox):
if self.gridLayout_10.itemAt(i).widget().isChecked():
add_element(doc,"CT_User", cloudTypes, self.gridLayout_10.itemAt(i).widget().
text())
add_element(doc, "CT_Other", cloudTypes, self.CTOtherTextBox.toPlainText())
############################
# Particles Sampled
############################
particlesSampled = add_element(doc, "ParticlesSampled", doc_root)
add_check_elements(doc, self.particles_sampled_check_dict, "PS_Code", particlesSampled)
if self.ps_ck_list:
for i in range(self.gridLayout_11.count()):
if isinstance(self.gridLayout_11.itemAt(i).widget(), QtWidgets.QCheckBox):
if self.gridLayout_11.itemAt(i).widget().isChecked():
add_element(doc,"PS_User", particlesSampled, self.gridLayout_11.itemAt(i).
widget().text())
add_element(doc, "PS_Other", particlesSampled, self.PSOtherTextBox.toPlainText())
############################
# Surfaces Overflown
############################
surfacesOverflown = add_element(doc, "SurfacesOverflown", doc_root)
add_check_elements(doc, self.surfaces_overflown_check_dict, "SO_Code", surfacesOverflown)
if self.so_ck_list:
for i in range(self.gridLayout_13.count()):
if isinstance(self.gridLayout_13.itemAt(i).widget(), QtWidgets.QCheckBox):
if self.gridLayout_13.itemAt(i).widget().isChecked():
add_element(doc,"SO_User", surfacesOverflown, self.gridLayout_13.itemAt(i).
widget().text())
add_element(doc, "SO_Other", surfacesOverflown, self.SOOtherTextBox.toPlainText())
############################
# Altitude Ranges
############################
altitudeRanges = add_element(doc, "AltitudeRanges", doc_root)
add_check_elements(doc, self.altitude_ranges_check_dict, "AR_Code", altitudeRanges)
if self.ar_ck_list:
for i in range(self.gridLayout_14.count()):
if isinstance(self.gridLayout_14.itemAt(i).widget(), QtWidgets.QCheckBox):
if self.gridLayout_14.itemAt(i).widget().isChecked():
add_element(doc,"AR_User", altitudeRanges, self.gridLayout_14.itemAt(i).
widget().text())
add_element(doc, "AR_Other", altitudeRanges, self.AROtherTextBox.toPlainText())
############################
# Flight Types
############################
flightTypes = add_element(doc, "FlightTypes", doc_root)
add_check_elements(doc, self.flight_types_check_dict, "FT_Code", flightTypes)
if self.fm_ck_list:
for i in range(self.gridLayout_15.count()):
if isinstance(self.gridLayout_15.itemAt(i).widget(), QtWidgets.QCheckBox):
if self.gridLayout_15.itemAt(i).widget().isChecked():
add_element(doc,"FT_User", flightTypes, self.gridLayout_15.itemAt(i).widget().
text())
add_element(doc, "FT_Other", flightTypes, self.FTOtherTextBox.toPlainText())
############################
# Satellite coordination
############################
satelliteCoordination = add_element(doc, "SatelliteCoordination", doc_root)
add_check_elements(doc, self.satellite_coordination_check_dict, "SC_Code", satelliteCoordination)
if self.sc_ck_list:
for i in range(self.gridLayout_25.count()):
if isinstance(self.gridLayout_25.itemAt(i).widget(), QtWidgets.QCheckBox):
if self.gridLayout_25.itemAt(i).widget().isChecked():
add_element(doc,"SC_User", satelliteCoordination, self.gridLayout_25.itemAt(i).
widget().text())
add_element(doc, "SC_Other", satelliteCoordination, self.SCOtherTextBox.toPlainText())
############################
# Surface Observations
############################
surfaceObs = add_element(doc, "SurfaceObs", doc_root)
for item in self.ground_site_list:
add_element(doc, "GroundSite", surfaceObs, item)
for item in self.research_vessel_list:
add_element(doc, "ResearchVessel", surfaceObs, item)
for item in self.arm_site_list:
add_element(doc, "ArmSite", surfaceObs, item)
for item in self.arm_mobile_list:
add_element(doc, "ArmMobile", surfaceObs, item)
############################
# Other Comments
############################
if self.OtherCommentsTextBox.toPlainText():
add_element(doc, "OtherComments", doc_root, self.OtherCommentsTextBox.toPlainText())
############################
# File Creation
############################
f = open(out_file_name, 'w')
f.write(doc.toprettyxml())
f.close()
self.saved = True
self.modified = False
logging.debug('asmm_xml.py - create_asmm_xml - file created successfully')
def read_asmm_xml(self, in_file_name):
logging.debug('asmm_xml.py - read_asmm_xml - out_file_name ' + in_file_name)
self.reset_all_fields()
f = open(in_file_name, 'r')
doc = xml.dom.minidom.parse(f)
############################
# Flight Information
############################
self.create_date = get_element_value(doc, "CreationDate")
flightInformation = get_element(doc, "FlightInformation")
set_text_value(self.flightNumber_ln, flightInformation, "FlightNumber")
date = get_element_value(flightInformation, "Date")
self.date_dt.setDate(QtCore.QDate.fromString(date, QtCore.Qt.ISODate))
set_text_value(self.projectAcronym_ln, flightInformation, "ProjectAcronym")
set_text_value(self.missionSci_ln, flightInformation, "MissionScientist")
set_text_value(self.flightManager_ln, flightInformation, "FlightManager")
operator = get_element_value(flightInformation, "Operator")
aircraft = get_element_value(flightInformation, "Platform")
registration = get_element_value(flightInformation, "RegistrationNumber")
aircraft_found = False
if registration:
for i in range(len(self.new_operators_aircraft)):
if registration == self.new_operators_aircraft[i][2]:
aircraft_found = True
self.operator_cb.setCurrentIndex(self.operator_cb.findText(operator))
self.operator_changed()
index = self.aircraft_cb.findText(aircraft)
if index != -1:
self.aircraft_cb.setCurrentIndex(index)
else:
index = self.aircraft_cb.findText(aircraft + ' - ' + registration)
self.aircraft_cb.setCurrentIndex(index)
break
if not aircraft_found:
self.operator_cb.setCurrentIndex(1)
self.operator_changed()
self.newOperator_ln.setText(operator)
self.newAircraft_ln.setText(aircraft)
self.newRegistration_ln.setText(registration)
self.newManufacturer_ln.setText(get_element_value(flightInformation, "Manufacturer"))
if get_element_value(flightInformation, "OperatorCountry"):
self.newCountry_cb.setCurrentIndex(self.newCountry_cb.findText(get_element_value(flightInformation, "OperatorCountry")))
else:
self.operator_cb.setCurrentIndex(1)
self.operator_changed()
self.newOperator_ln.setText(operator)
self.newAircraft_ln.setText(aircraft)
self.newRegistration_ln.setText(registration)
self.newManufacturer_ln.setText(get_element_value(flightInformation, "Manufacturer"))
if get_element_value(flightInformation, "OperatorCountry"):
index = self.newCountry_cb.findText(get_element_value(flightInformation, "OperatorCountry"))
if index != -1:
self.newCountry_cb.setCurrentIndex(index)
combo_text = get_element_value(flightInformation, "Localisation")
if combo_text != None:
if combo_text in self.countries:
self.location_cb.setCurrentIndex(self.location_cb.findText("Countries"))
self.detailList.clear()
self.detailList.setEnabled(True)
self.detailList.addItems(self.countries)
self.detailList.setCurrentIndex(self.detailList.findText(combo_text))
elif combo_text in self.continents:
self.location_cb.setCurrentIndex(self.location_cb.findText("Continents"))
self.detailList.clear()
self.detailList.setEnabled(True)
self.detailList.addItems(self.continents)
self.detailList.setCurrentIndex(self.detailList.findText(combo_text))
elif combo_text in self.oceans:
self.location_cb.setCurrentIndex(self.location_cb.findText("Oceans"))
self.detailList.clear()
self.detailList.setEnabled(True)
self.detailList.addItems(self.oceans)
self.detailList.setCurrentIndex(self.detailList.findText(combo_text))
elif combo_text in self.regions:
self.location_cb.setCurrentIndex(self.location_cb.findText("Regions"))
self.detailList.clear()
self.detailList.setEnabled(True)
self.detailList.addItems(self.regions)
self.detailList.setCurrentIndex(self.detailList.findText(combo_text))
#############################
# Metadata Contact Info
#############################
contactInfo = get_element(doc, "ContactInfo")
set_text_value(self.contactName_ln, contactInfo, "ContactName")
set_text_value(self.contactEmail_ln, contactInfo, "ContactEmail")
combo_text = get_element_value(contactInfo, "ContactRole")
if combo_text != None:
self.contact_cb.setCurrentIndex(self.contact_cb.findText(combo_text))
#############################
# Scientific Aims
#############################
scientificAims = get_element(doc, "ScientificAims")
try:
set_check_values(self.scientific_aims_check_dict, scientificAims, "SA_Code")
except IndexError:
set_check_values(self.old_scientific_aims_check_dict, scientificAims, "SA_Code")
set_text_value(self.SAOtherTextBox, scientificAims, "SA_Other")
values = get_element_values(scientificAims, "SA_User")
for item in values:
add_read(self, "SA", item)
#############################
# Geographical Region
#############################
geographicalRegion = get_element(doc, "GeographicalRegion")
geographicBoundingBox = get_element(geographicalRegion, "GeographicBoundingBox")
set_text_value_coord(self, self.westBoundLongitudeLine, geographicBoundingBox, "westBoundLongitude")
set_text_value_coord(self, self.eastBoundLongitudeLine, geographicBoundingBox, "eastBoundLongitude")
set_text_value_coord(self, self.northBoundLatitudeLine, geographicBoundingBox, "northBoundLatitude")
set_text_value_coord(self, self.southBoundLatitudeLine, geographicBoundingBox, "southBoundLatitude")
set_text_value_coord(self, self.minAltitudeLine, geographicBoundingBox, "minAltitude")
set_text_value_coord(self, self.maxAltitudeLine, geographicBoundingBox, "maxAltitude")
try:
set_check_values(self.geographical_region_check_dict, geographicalRegion, "GR_Code")
except IndexError:
set_check_values(self.old_geographical_region_check_dict, geographicalRegion, "GR_Code")
set_text_value(self.GROtherTextBox, geographicalRegion, "GR_Other")
values = get_element_values(geographicalRegion, "GR_User")
for item in values:
add_read(self, "GR", item)
#############################
# Atmospheric Features
#############################
atmosphericFeatures = get_element(doc, "AtmosFeatures")
try:
set_check_values(self.atmospheric_features_check_dict, atmosphericFeatures, "AF_Code")
except IndexError:
set_check_values(self.old_atmospheric_features_check_dict, atmosphericFeatures, "AF_Code")
set_text_value(self.AFOtherTextBox, atmosphericFeatures, "AF_Other")
values = get_element_values(atmosphericFeatures, "AF_User")
for item in values:
add_read(self, "AF", item)
#############################
# Cloud Types
#############################
cloudTypes = get_element(doc, "CloudTypes")
try:
set_check_values(self.cloud_types_check_dict, cloudTypes, "CT_Code")
except IndexError:
set_check_values(self.old_cloud_types_check_dict, cloudTypes, "CT_Code")
set_text_value(self.CTOtherTextBox, cloudTypes, "CT_Other")
values = get_element_values(cloudTypes, "CT_User")
for item in values:
add_read(self, "CT", item)
#############################
# Particles Sampled
#############################
particlesSampled = get_element(doc, "ParticlesSampled")
try:
set_check_values(self.particles_sampled_check_dict, particlesSampled, "PS_Code")
except IndexError:
set_check_values(self.old_particles_sampled_check_dict, particlesSampled, "PS_Code")
set_text_value(self.PSOtherTextBox, particlesSampled, "PS_Other")
values = get_element_values(particlesSampled, "PS_User")
for item in values:
add_read(self, "PS", item)
#############################
# Surfaces Overflown
#############################
surfacesOverflown = get_element(doc, "SurfacesOverflown")
try:
set_check_values(self.surfaces_overflown_check_dict, surfacesOverflown, "SO_Code")
except IndexError:
set_check_values(self.old_surfaces_overflown_check_dict, surfacesOverflown, "SO_Code")
set_text_value(self.SOOtherTextBox, surfacesOverflown, "SO_Other")
values = get_element_values(surfacesOverflown, "SO_User")
for item in values:
add_read(self, "SO", item)
#############################
# Altitude Ranges
#############################
altitudeRanges = get_element(doc, "AltitudeRanges")
try:
set_check_values(self.altitude_ranges_check_dict, altitudeRanges, "AR_Code")
except IndexError:
set_check_values(self.old_altitude_ranges_check_dict, altitudeRanges, "AR_Code")
set_text_value(self.AROtherTextBox, altitudeRanges, "AR_Other")
values = get_element_values(altitudeRanges, "AR_User")
for item in values:
add_read(self, "AR", item)
#############################
# Flight Types
#############################
flightTypes = get_element(doc, "FlightTypes")
try:
set_check_values(self.flight_types_check_dict, flightTypes, "FT_Code")
except IndexError:
set_check_values(self.old_flight_types_check_dict, flightTypes, "FT_Code")
set_text_value(self.FTOtherTextBox, flightTypes, "FT_Other")
values = get_element_values(flightTypes, "FT_User")
for item in values:
add_read(self, "FM", item)
#############################
# Satellite Coordination
#############################
satelliteCoordination = get_element(doc, "SatelliteCoordination")
try:
set_check_values(self.satellite_coordination_check_dict, satelliteCoordination, "SC_Code")
except IndexError:
set_check_values(self.old_satellite_coordination_check_dict, satelliteCoordination, "SC_Code")
set_text_value(self.SCOtherTextBox, satelliteCoordination, "SC_Other")
values = get_element_values(satelliteCoordination, "SC_User")
for item in values:
add_read(self, "SC", item)
#############################
# Surface Observations
#############################
surfaceObservations = get_element(doc, "SurfaceObs")
self.ground_site_list = get_element_values(surfaceObservations, "GroundSite")
self.groundListWidget.addItems(self.ground_site_list)
self.research_vessel_list = get_element_values(surfaceObservations, "ResearchVessel")
self.vesselListWidget.addItems(self.research_vessel_list)
self.arm_site_list = get_element_values(surfaceObservations, "ArmSite")
self.armListWidget.addItems(self.arm_site_list)
self.arm_mobile_list = get_element_values(surfaceObservations, "ArmMobile")
self.armMobileListWidget.addItems(self.arm_mobile_list)
##############################
# Other Comments
##############################
set_text_value(self.OtherCommentsTextBox, doc, "OtherComments")
logging.debug('asmm_xml.py - create_asmm_xml - file read successfully')
def get_element(parent, element_name):
logging.debug('asmm_xml.py - get_element - parent ' + str(parent) + ' ; element_name ' + str(element_name))
return parent.getElementsByTagNameNS(NAMESPACE_URI, element_name)[0]
def get_element_value(parent, element_name):
logging.debug('asmm_xml.py - get_element_value - parent ' + str(parent) + ' ; element_name ' + str(element_name))
elements = parent.getElementsByTagNameNS(NAMESPACE_URI, element_name)
if elements:
element = elements[0]
nodes = element.childNodes
for node in nodes:
if node.nodeType == node.TEXT_NODE:
return node.data.strip()
def get_element_values(parent, element_name):
logging.debug('asmm_xml.py - get_element_values - parent ' + str(parent) + ' ; element_name ' + str(element_name))
value_list = []
elements = parent.getElementsByTagNameNS(NAMESPACE_URI, element_name)
for element in elements:
value_list.append(element.childNodes[0].data.strip())
return value_list
def set_check_values(check_dict, parent, element_name):
logging.debug('asmm_xml.py - set_check_values - parent ' + str(parent) + ' ; element_name ' + str(element_name))
elements = parent.getElementsByTagNameNS(NAMESPACE_URI, element_name)
for element in elements:
check_widget = find_key(check_dict, element.childNodes[0].data.strip())
if check_widget is not None:
check_widget.setChecked(True)
def set_text_value(text_widget, parent, element_name):
logging.debug('asmm_xml.py - set_text_value - parent ' + str(parent) + ' ; element_name ' + str(element_name))
node_data = get_element_value(parent, element_name)
if node_data:
text_widget.setText(node_data)
def set_text_value_coord(self, text_widget, parent, element_name):
logging.debug('asmm_xml.py - set_text_value_coord - parent ' + str(parent) + ' ; element_name ' + str(element_name))
node_data = get_element_value(parent, element_name)
if node_data:
text_widget.setText(clean_coordinate_string(self, node_data))
def add_element(doc, element_name, parent, value=None):
logging.debug('asmm_xml.py - add_element - parent ' + str(parent) + ' ; element_name ' + str(element_name) + ' ; value ' + str(value))
new_element = doc.createElementNS(NAMESPACE_URI, "asmm:" + element_name)
if value:
new_text = doc.createTextNode(value)
new_element.appendChild(new_text)
parent.appendChild(new_element)
return new_element
def add_check_elements(doc, check_dict, code_name, parent):
logging.debug('asmm_xml.py - add_check_elements - parent ' + str(parent) + ' ; element_name ' + str(code_name))
for key, val in iter(check_dict.items()):
if key.isChecked():
add_element(doc, code_name, parent, val)
def find_key(dic, val):
return [k for k, v in iter(dic.items()) if v == val][0]
def clean_coordinate_string(self, string):
logging.debug('asmm_xml.py - clean_coordinate_string - string ' + string)
for key, val in self.coordinate_units_list.items():
try:
string = string[:string.index(key)]
if val < 0:
string = '-' + string
break
except ValueError:
pass
return string
| 2.375 | 2 |
github/models.py | pyprism/Hiren-Git-Commit-Reminder | 0 | 12902 | <reponame>pyprism/Hiren-Git-Commit-Reminder
from django.db import models
# Create your models here.
class Hiren(models.Model):
access_token = models.CharField(max_length=200)
authorized = models.BooleanField(default=False)
| 2.078125 | 2 |
linux-distro/package/nuxleus/Source/Vendor/Microsoft/IronPython-2.0.1/Lib/Axon/idGen.py | mdavid/nuxleus | 1 | 12903 | #!/usr/bin/python
#
# Copyright (C) 2004 British Broadcasting Corporation and Kamaelia Contributors(1)
# All Rights Reserved.
#
# You may only modify and redistribute this under the terms of any of the
# following licenses(2): Mozilla Public License, V1.1, GNU General
# Public License, V2.0, GNU Lesser General Public License, V2.1
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://kamaelia.sourceforge.net/AUTHORS - please extend this file,
# not this notice.
# (2) Reproduced in the COPYING file, and at:
# http://kamaelia.sourceforge.net/COPYING
# Under section 3.5 of the MPL, we are using this text since we deem the MPL
# notice inappropriate for this file. As per MPL/GPL/LGPL removal of this
# notice is prohibited.
#
# Please contact us via: <EMAIL>
# to discuss alternative licensing.
# -------------------------------------------------------------------------
"""\
====================
Unique ID generation
====================
The methods of the idGen class are used to generate unique IDs in various forms
(numbers, strings, etc) which are used to give microprocesses and other Axon
objects a unique identifier and name.
* Every Axon.Microprocess.microprocess gets a unique ID
* Axon.ThreadedComponent.threadedcomponent uses unique IDs to identify threads
Generating a new unique ID
--------------------------
Do not use the idGen class defined in this module directly. Instead, use any
of these module methods to obtain a unique ID:
* **Axon.idGen.newId(thing)** - returns a unique identifier as a string based on
the class name of the object provided
* **Axon.idGen.strId(thing)** - returns a unique identifier as a string based on
the class name of the object provided
* **Axon.idGen.numId()** - returns a unique identifier as a number
* **Axon.idGen.tupleId(thing)** - returns both the numeric and string versions
of a new unique id as a tuple (where the string version is based on the class
name of the object provided)
Calling tupleId(thing) is *not* equivalent to calling numId() then strId(thing)
because doing that would return two different id values!
Examples::
>>> x=Component.component()
>>> idGen.newId(x)
'Component.component_4'
>>> idGen.strId(x)
'Component.component_5'
>>> idGen.numId()
6
>>> idGen.tupleId(x)
(7, 'Component.component_7')
"""
import debug;
debugger = debug.debug()
debugger.useConfig()
Debug = debugger.debug
# idGen - A class to provide Unique Identifiers
#
# Ids can provide be provided as numerical, string or a tuple.
#
# numerical ids are integers allocated on a "next integer" basis.
# eg object 1, apple 2, orange 3. (Not object 1, apple 2, orange 3)
#
# string ids consist of the '__str__' of the object, with the numerical
# id tacked on the end.
#
# tuple ids consists : '(the numerical id, the string id)'
#
class idGen(object):
"""\
Unique ID creator.
Use numId(), strId(), and tupleId() methods to obtain unique IDs.
"""
lowestAllocatedId = 0
def nextId(self):
"""\
**INTERNAL**
Returns the next unique id, incrementing the private class variable
"""
idGen.lowestAllocatedId = idGen.lowestAllocatedId +1
return idGen.lowestAllocatedId
next = nextId # pseudonym
def idToString(self,thing,aNumId):
"""\
**INTERNAL**
Combines the 'str()' of the object's class with the id to form a string id
"""
# This next line takes <class '__main__.foo'>
# and chops out the __main__.foo part
r = str(thing.__class__)[8:][:-2] + "_" + str(aNumId)
return r
def numId(self):
"""Allocates & returns the next available id"""
result = self.nextId()
assert Debug("idGen.numId", 1, "idGen.numId:", result)
return result
def strId(self,thing):
"""\
Allocates & returns the next available id combined with the object's
class name, in string form
"""
theId = self.nextId()
strid = self.idToString(thing,theId)
assert Debug("idGen.strId", 1, "idGen.strId:", strid)
return strid
def tupleId(self,thing):
"""\
Allocates the next available id and returns it both as a tuple (num,str)
containing both the numeric version and a string version where it is
combined with the object's class name.
"""
theId = self.nextId()
strId = self.idToString(thing,theId)
assert Debug("idGen.tupleId", 1, "idGen.tupleId:", theId, strId)
return theId, strId
newId = idGen().strId
strId=idGen().strId
numId=idGen().numId
tupleId=idGen().tupleId
if __name__ == '__main__':
class foo: pass
class bar: pass
class bibble: pass
print newId(foo())
print newId(bar())
print newId(bibble())
| 1.84375 | 2 |
pyCEvNS/flux.py | athompson-tamu/pyCEvNS | 0 | 12904 | """
flux related class and functions
"""
from scipy.integrate import quad
import pandas as pd
from .helper import LinearInterp, polar_to_cartesian, lorentz_boost, lorentz_matrix
from .oscillation import survival_solar
from .parameters import *
def _invs(ev):
return 1/ev**2
class FluxBaseContinuous:
def __init__(self, ev, flux, norm=1):
self.norm = norm
self.ev = ev
self.fx = flux
self.ev_min = self.ev[0]
self.ev_max = self.ev[-1]
self.binw = self.ev[1:] - self.ev[:-1]
self.precalc = {None: self.binw*(self.fx[1:]+self.fx[:-1])/2}
def __call__(self, ev):
if ev == self.ev_min:
return self.fx[0] * self.norm
if ev == self.ev_max:
return self.fx[-1] * self.norm
if self.ev_min < ev < self.ev_max:
idx = self.ev.searchsorted(ev)
l1 = ev - self.ev[idx-1]
l2 = self.ev[idx] - ev
h1 = self.fx[idx-1]
h2 = self.fx[idx]
return (l1*h2 + l2*h1) / (l1 + l2) * self.norm
return 0
def integrate(self, ea, eb, weight_function=None):
if eb <= ea:
return 0
res = 0
if weight_function not in self.precalc:
weighted = weight_function(self.ev)*self.fx
self.precalc[weight_function] = self.binw * (weighted[1:]+weighted[:-1]) / 2
eb = min(eb, self.ev_max)
ea = max(ea, self.ev_min)
idxmin = self.ev.searchsorted(ea, side='right')
idxmax = self.ev.searchsorted(eb, side='left')
if idxmin == idxmax:
l1 = ea - self.ev[idxmin - 1]
l2 = self.ev[idxmin] - ea
h1 = self.fx[idxmin - 1] * weight_function(self.ev[idxmin - 1]) \
if weight_function is not None else self.fx[idxmin - 1]
h2 = self.fx[idxmin] * weight_function(self.ev[idxmin]) \
if weight_function is not None else self.fx[idxmin]
ha = (l1*h2+l2*h1)/(l1+l2)
l1 = eb - self.ev[idxmax - 1]
l2 = self.ev[idxmax] - eb
hb = (l1*h2+l2*h1)/(l1+l2)
return (ha + hb) * (eb - ea) / 2 * self.norm
res += np.sum(self.precalc[weight_function][idxmin:idxmax-1])
l1 = ea - self.ev[idxmin-1]
l2 = self.ev[idxmin] - ea
h1 = self.fx[idxmin-1]*weight_function(self.ev[idxmin-1]) \
if weight_function is not None else self.fx[idxmin-1]
h2 = self.fx[idxmin]*weight_function(self.ev[idxmin]) \
if weight_function is not None else self.fx[idxmin]
res += ((l1*h2+l2*h1)/(l1+l2)+h2)*l2/2
l1 = eb - self.ev[idxmax - 1]
l2 = self.ev[idxmax] - eb
h1 = self.fx[idxmax - 1] * weight_function(self.ev[idxmax - 1]) \
if weight_function is not None else self.fx[idxmax-1]
h2 = self.fx[idxmax] * weight_function(self.ev[idxmax]) \
if weight_function is not None else self.fx[idxmax]
res += ((l1 * h2 + l2 * h1) / (l1 + l2) + h1) * l1 / 2
return res * self.norm
class Flux:
"""
flux class,
flux at source
"""
def __init__(self, fl_name, delimiter=',', fl_unc=0):
"""
initializing flux, can take in user provided flux
restrictions: user provided data must have 7 columns,
first column is neutrino energy in MeV,
other columns are neutrino flux in cm^2/s/MeV, they are enu, munu, taunu, enubar, munubar, taunubar
:param fl_name: name of the flux or path to the file or array of neutrino flux
:param delimiter: delimiter of the input file, default is ','
:param fl_unc: uncertainty of flux
"""
if isinstance(fl_name, str):
self.fl_name = fl_name.lower()
else:
self.fl_name = 'default'
if self.fl_name == 'reactor':
self.evMin = 0.0
self.evMax = 30 # MeV
self.flUn = 0.02
fpers = 3.0921 * (10 ** 16) # antineutrinos per fission
nuperf = 6.14102
self.__nuflux1m = nuperf * fpers / (4 * np.pi) * (meter_by_mev ** 2)
elif self.fl_name in ['sns', 'prompt', 'delayed']:
self.evMin = 0
self.evMax = 52 # MeV
self.flUn = 0.1
self.__norm = 1.13 * (10 ** 11) * (meter_by_mev ** 2)
elif self.fl_name in ['solar', 'b8', 'f17', 'n13', 'o15', 'pp', 'hep']:
f = np.genfromtxt(pkg_resources.resource_filename(__name__, 'data/' + self.fl_name + '.csv'), delimiter=',')
self.flUn = 0
self.evMin = f[0, 0]
self.evMax = f[-1, 0]
self.__nue = LinearInterp(f[:, 0], f[:, 1] * ((100 * meter_by_mev) ** 2))
else:
if isinstance(fl_name, np.ndarray):
f = fl_name
else:
f = np.genfromtxt(fl_name, delimiter=delimiter)
self.evMin = np.amin(f[:, 0])
self.evMax = np.amax(f[:, 0])
self.flUn = fl_unc
self.__nue = LinearInterp(f[:, 0], f[:, 1] * ((100 * meter_by_mev) ** 2))
self.__numu = LinearInterp(f[:, 0], f[:, 2] * ((100 * meter_by_mev) ** 2))
self.__nutau = LinearInterp(f[:, 0], f[:, 3] * ((100 * meter_by_mev) ** 2))
self.__nuebar = LinearInterp(f[:, 0], f[:, 4] * ((100 * meter_by_mev) ** 2))
self.__numubar = LinearInterp(f[:, 0], f[:, 5] * ((100 * meter_by_mev) ** 2))
self.__nutaubar = LinearInterp(f[:, 0], f[:, 6] * ((100 * meter_by_mev) ** 2))
def flux(self, ev, flavor='e', f=None, **kwargs):
"""
differential neutrino flux at the detector, unit MeV^-3*s^-1
:param ev: nuetrino energy
:param flavor: nuetrino flavor
:param f: function that convolves with neutrino flux, typically neutrino oscillation,
the first argument must be neutrino energy,
the last two arguments must be input flavor nui and out put flavor nuf
:param kwargs: parameters with keys that goes into function f
:return: neutrino flux
"""
if self.fl_name == 'reactor':
# Phys.Rev.D39, 11 Vogel
# 5.323608902707208 = Integrate[Exp[.870 - .16*e - .091*e^2], {e, 0, 10}]
# reactor neutrino is actually anti-neutrino, this may cause problem when doing electron scattering
if flavor == 'ebar':
if f is not None:
return np.exp(0.87 - 0.16 * ev - 0.091 * (ev ** 2)) / 5.323608902707208 * \
f(ev, nui='ebar', nuf=flavor, **kwargs)
return np.exp(0.87 - 0.16 * ev - 0.091 * (ev ** 2)) / 5.323608902707208 * self.__nuflux1m
elif flavor[-1] == 'r':
if f is not None:
return np.exp(0.87 - 0.16 * ev - 0.091 * (ev ** 2)) / 5.323608902707208 * \
f(ev, nui='ebar', nuf=flavor, **kwargs)
return 0
else:
return 0
elif self.fl_name in ['sns', 'delayed']:
if flavor[-1] != 'r':
if f is not None:
return (3 * ((ev / (2 / 3 * 52)) ** 2) - 2 * ((ev / (2 / 3 * 52)) ** 3)) / 29.25 * self.__norm * \
f(ev, nui='e', nuf=flavor, **kwargs)
return (3 * ((ev / (2 / 3 * 52)) ** 2) - 2 * ((ev / (2 / 3 * 52)) ** 3)) / 29.25 * self.__norm \
if flavor == 'e' else 0
else:
if f is not None:
return (3 * ((ev / 52) ** 2) - 2 * ((ev / 52) ** 3)) / 26 * self.__norm * \
f(ev, nui='mubar', nuf=flavor, **kwargs)
return (3 * ((ev / 52) ** 2) - 2 * ((ev / 52) ** 3)) / 26 * self.__norm if flavor == 'mubar' else 0
elif self.fl_name == 'prompt':
return 0
elif self.fl_name in ['solar', 'b8', 'f17', 'n13', 'o15', 'pp', 'hep']:
if flavor[-1] != 'r':
if f is None:
f = survival_solar
return self.__nue(ev) * f(ev, nui='e', nuf=flavor, **kwargs)
return 0
else:
if flavor[-1] != 'r':
if f is None:
if flavor == 'e':
return self.__nue(ev)
elif flavor == 'mu':
return self.__numu(ev)
elif flavor == 'tau':
return self.__nutau(ev)
else:
return 0
return self.__nue(ev) * f(ev, nui='e', nuf=flavor, **kwargs) + \
self.__numu(ev) * f(ev, nui='mu', nuf=flavor, **kwargs) + \
self.__nutau(ev) * f(ev, nui='tau', nuf=flavor, **kwargs)
else:
if f is None:
if flavor == 'ebar':
return self.__nuebar(ev)
elif flavor == 'mubar':
return self.__numubar(ev)
elif flavor == 'taubar':
return self.__nutaubar(ev)
else:
return 0
return self.__nuebar(ev) * f(ev, nui='ebar', nuf=flavor, **kwargs) + \
self.__numubar(ev) * f(ev, nui='mubar', nuf=flavor, **kwargs) + \
self.__nutaubar(ev) * f(ev, nui='taubar', nuf=flavor, **kwargs)
def fint(self, er, m, flavor='e', f=None, **kwargs):
"""
flux integration over the range that can produce a recoil energy er
:param er: recoil energy
:param m: mass of the target, it can be an array
:param flavor: neutrino flavor
:param f: function that convolves with neutrino flux, typically neutrino oscillation,
the first argument must be neutrino energy,
the last two arguments must be input flavor nui and out put flavor nuf
:param kwargs: parameters with keys that goes into function f
:return: the result of integration, it can be an array
"""
emin = 0.5 * (np.sqrt(er ** 2 + 2 * er * m) + er)
def fx(ev):
return self.flux(ev, flavor, f, **kwargs)
if not isinstance(emin, np.ndarray):
res = quad(fx, emin, self.evMax)[0] # no need to check range, because outside evMin and evMax are 0
if self.fl_name == 'solar':
if f is None:
f = survival_solar
# pep
res += 1.44e8 * ((100 * meter_by_mev) ** 2) * f(1.439, nui='e', nuf=flavor, **kwargs) \
if emin < 1.439 else 0
# be7
res += 5e9 * ((100 * meter_by_mev) ** 2) * f(0.8613, nui='e', nuf=flavor, **kwargs) \
if emin < 0.8613 else 0
elif self.fl_name in ['sns', 'prompt']:
if f is None and flavor == 'mu':
# prompt neutrino
res += self.__norm if emin <= 29 else 0
elif f is not None and flavor[-1] != 'r':
res += self.__norm * f(29, nui='mu', nuf=flavor, **kwargs) if emin <= 29 else 0
else:
res = np.zeros_like(emin)
for i in range(emin.shape[0]):
res[i] = quad(fx, emin[i], self.evMax)[0]
if self.fl_name == 'solar':
if f is None:
f = survival_solar
# pep
res[i] += 1.44e8 * ((100 * meter_by_mev) ** 2) * f(1.439, nui='e', nuf=flavor, **kwargs) \
if emin[i] < 1.439 else 0
# be7
res[i] += 5e9 * ((100 * meter_by_mev) ** 2) * f(0.8613, nui='e', nuf=flavor, **kwargs) \
if emin[i] < 0.8613 else 0
elif self.fl_name in ['sns', 'prompt']:
if f is None and flavor == 'mu':
# prompt neutrino
res[i] += self.__norm if emin[i] <= 29 else 0
elif f is not None and flavor[-1] != 'r':
res[i] += self.__norm * f(29, nui='mu', nuf=flavor, **kwargs) if emin[i] <= 29 else 0
return res
def fintinv(self, er, m, flavor='e', f=None, **kwargs):
"""
flux/ev integration over the range that can produce a recoil energy er
:param er: recoil energy
:param m: mass of the target, it can be an array
:param flavor: neutrino flavor
:param f: function that convolves with neutrino flux, typically neutrino oscillation,
the first argument must be neutrino energy,
the last two arguments must be input flavor nui and out put flavor nuf
:param kwargs: parameters with keys that goes into function f
:return: the result of integration, it can be an array
"""
emin = 0.5 * (np.sqrt(er ** 2 + 2 * er * m) + er)
def finv(ev):
"""
flux/ev
"""
return self.flux(ev, flavor, f, **kwargs) / ev
if not isinstance(emin, np.ndarray):
res = quad(finv, emin, self.evMax)[0]
if self.fl_name == 'solar':
if f is None:
f = survival_solar
# pep
res += 1.44e8 * ((100 * meter_by_mev) ** 2) * f(1.439, nui='e', nuf=flavor, **kwargs) / 1.439 \
if emin < 1.439 else 0
# be7
res += 5e9 * ((100 * meter_by_mev) ** 2) * f(0.8613, nui='e', nuf=flavor, **kwargs) / 0.8613 \
if emin < 0.8613 else 0
elif self.fl_name in ['sns', 'prompt']:
if f is None and flavor == 'mu':
# prompt neutrino
res += self.__norm / 29 if emin <= 29 else 0
elif f is not None and flavor[-1] != 'r':
res += self.__norm / 29 * f(29, nui='mu', nuf=flavor, **kwargs) if emin <= 29 else 0
else:
res = np.zeros_like(emin)
for i in range(emin.shape[0]):
res[i] = quad(finv, emin[i], self.evMax)[0]
if self.fl_name == 'solar':
if f is None:
f = survival_solar
# pep
res[i] += 1.44e8 * ((100 * meter_by_mev) ** 2) * f(1.439, nui='e', nuf=flavor, **kwargs) / \
1.439 if emin[i] < 1.439 else 0
# be7
res[i] += 5e9 * ((100 * meter_by_mev) ** 2) * f(0.8613, nui='e', nuf=flavor, **kwargs) / \
0.8613 if emin[i] < 0.8613 else 0
elif self.fl_name in ['sns', 'prompt']:
if f is None and flavor == 'mu':
# prompt neutrino
res[i] += self.__norm / 29 if emin[i] <= 29 else 0
elif f is not None and flavor[-1] != 'r':
res[i] += self.__norm / 29 * f(29, nui='mu', nuf=flavor, **kwargs) \
if emin[i] <= 29 else 0
return res
def fintinvs(self, er, m, flavor='e', f=None, **kwargs):
"""
flux/ev^2 integration over the range that can produce a recoil energy er
:param er: recoil energy
:param m: mass of the target, it can be an array
:param flavor: neutrino flavor
:param f: function that convolves with neutrino flux, typically neutrino oscillation,
the first argument must be neutrino energy,
the last two arguments must be input flavor nui and out put flavor nuf
:param kwargs: parameters with keys that goes into function f
:return: the result of integration, it can be an array
"""
emin = 0.5 * (np.sqrt(er ** 2 + 2 * er * m) + er)
def finvs(ev):
"""
flux/ev^2
"""
return self.flux(ev, flavor, f, **kwargs) / (ev ** 2)
if not isinstance(emin, np.ndarray):
res = quad(finvs, emin, self.evMax)[0]
if self.fl_name == 'solar':
if f is None:
f = survival_solar
# pep
res += 1.44e8 * ((100 * meter_by_mev) ** 2) * f(1.439, nui='e', nuf=flavor, **kwargs) / 1.439**2\
if emin < 1.439 else 0
# be7
res += 5e9 * ((100 * meter_by_mev) ** 2) * f(0.8613, nui='e', nuf=flavor, **kwargs) / 0.8613**2 \
if emin < 0.8613 else 0
elif self.fl_name in ['sns', 'prompt']:
if f is None and flavor == 'mu':
# prompt neutrino
res += self.__norm / 29**2 if emin <= 29 else 0
elif f is not None and flavor[-1] != 'r':
res += self.__norm / 29**2 * f(29, nui='mu', nuf=flavor, **kwargs) if emin <= 29 else 0
else:
res = np.zeros_like(emin)
for i in range(emin.shape[0]):
res[i] = quad(finvs, emin[i], self.evMax)[0]
if self.fl_name == 'solar':
if f is None:
f = survival_solar
# pep
res[i] += 1.44e8 * ((100 * meter_by_mev) ** 2) * f(1.439, nui='e', nuf=flavor, **kwargs) / \
1.439**2 if emin[i] < 1.439 else 0
# be7
res[i] += 5e9 * ((100 * meter_by_mev) ** 2) * f(0.8613, nui='e', nuf=flavor, **kwargs) / \
0.8613**2 if emin[i] < 0.8613 else 0
elif self.fl_name in ['sns', 'prompt']:
if f is None and flavor == 'mu':
# prompt neutrino
res[i] += self.__norm / 29**2 if emin[i] <= 29 else 0
elif f is not None and flavor[-1] != 'r':
res[i] += self.__norm / 29**2 * f(29, nui='mu', nuf=flavor, **kwargs) \
if emin[i] <= 29 else 0
return res
class NeutrinoFluxFactory:
def __init__(self):
self.flux_list = ['solar', 'solar_b8', 'solar_f17', 'solar_hep', 'solar_n13', 'solar_o15', 'solar_pp',
'solar_pep', 'solar_be7', 'coherent', 'coherent_prompt', 'coherent_delayed',
'far_beam_nu', 'far_beam_nubar', 'atmospheric','jsns_prompt', 'jsns_delayed', 'jsns_prompt_continuous',
'near_beam_nu', 'near_beam_nubar',]
def print_available(self):
print(self.flux_list)
def interp_flux(self, nrg, data):
return np.interp(nrg, data[:,0], data[:,1])
def get(self, flux_name, **kwargs):
if flux_name not in self.flux_list:
print('flux name not in current list: ', self.flux_list)
raise Exception('flux not found.')
if flux_name in ['solar_b8', 'solar_f17', 'solar_hep', 'solar_n13', 'solar_o15', 'solar_pp']:
f = np.genfromtxt(pkg_resources.resource_filename(__name__, 'data/' + flux_name[6:] + '.csv'), delimiter=',')
return NeutrinoFlux(continuous_fluxes={'ev': f[:, 0], 'e': f[:, 1]})
if flux_name == 'solar':
f = np.genfromtxt(pkg_resources.resource_filename(__name__, 'data/' + flux_name + '.csv'), delimiter=',')
return NeutrinoFlux(continuous_fluxes={'ev': f[:, 0], 'e': f[:, 1]}, delta_fluxes={'e': [(1.439, 1.44e8), (0.8613, 5e9)]})
if flux_name == 'pep':
return NeutrinoFlux(delta_fluxes={'e': [(1.439, 1.44e8), ]})
if flux_name == 'be7':
return NeutrinoFlux(delta_fluxes={'e': [(0.8613, 5e9), ]})
if flux_name == 'coherent':
def de(evv):
return (3 * ((evv / (2 / 3 * 52)) ** 2) - 2 * ((evv / (2 / 3 * 52)) ** 3)) / 29.25
def dmubar(evv):
return (3 * ((evv / 52) ** 2) - 2 * ((evv / 52) ** 3)) / 26
ev = np.linspace(0.001, 52, 100)
return NeutrinoFlux(continuous_fluxes={'ev': ev, 'e': de(ev), 'mubar': dmubar(ev)},
delta_fluxes={'mu': [(29, 1)]}, norm=1.13 * (10 ** 7)) ## default unit is /(cm^2*s)
if flux_name == 'coherent_delayed':
def de(evv):
return (3 * ((evv / (2 / 3 * 52)) ** 2) - 2 * ((evv / (2 / 3 * 52)) ** 3)) / 29.25
def dmubar(evv):
return (3 * ((evv / 52) ** 2) - 2 * ((evv / 52) ** 3)) / 26
ev = np.linspace(0.001, 52, kwargs['npoints'] if 'npoints' in kwargs else 100)
return NeutrinoFlux(continuous_fluxes={'ev': ev, 'e': de(ev), 'mubar': dmubar(ev)}, norm=1.13 * (10 ** 7))
if flux_name == 'coherent_prompt':
return NeutrinoFlux(delta_fluxes={'mu': [(29, 1)]}, norm=1.13 * (10 ** 7))
if flux_name == 'jsns':
nu_e = np.genfromtxt(pkg_resources.resource_filename(__name__, "data/jsns2/jsns_nu_e.txt"), delimiter=',')
nu_mu = np.genfromtxt(pkg_resources.resource_filename(__name__, "data/jsns2/jsns_nu_mu_nodelta.txt"), delimiter=',')
nubar_mu = np.genfromtxt(pkg_resources.resource_filename(__name__, "data/jsns2/jsns_nubar_mu.txt"), delimiter=',')
norm_nu_e = quad(self.interp_flux, 0, 300, args=(nu_e,))[0]
norm_nu_mu = quad(self.interp_flux, 0, 300, args=(nu_mu,))[0]
norm_nubar_mu = quad(self.interp_flux, 0, 300, args=(nubar_mu,))[0]
def numuPDF(energy):
return self.interp_flux(energy, nu_mu) / norm_nu_mu
def nuePDF(energy):
return self.interp_flux(energy, nu_e) / norm_nu_e
def nubarmuPDF(energy):
return self.interp_flux(energy, nubar_mu) / norm_nubar_mu
edges = np.arange(0, 302, 2) # energy bin edges
ev = (edges[:-1] + edges[1:]) / 2
return NeutrinoFlux(continuous_fluxes={'ev': ev, 'e': nuePDF(ev), 'mubar': nubarmuPDF(ev), 'mu': numuPDF(ev)},
delta_fluxes={'mu': [(29, 1),(236, 0.013)]}, norm=4.9 * (10 ** 7)) ## default unit is /(cm^2*s)
if flux_name == 'jsns_delayed':
nu_e = np.genfromtxt(pkg_resources.resource_filename(__name__, "data/jsns2/jsns_nu_e.txt"), delimiter=',')
nubar_mu = np.genfromtxt(pkg_resources.resource_filename(__name__, "data/jsns2/jsns_nubar_mu.txt"), delimiter=',')
norm_nu_e = quad(self.interp_flux, 0, 300, args=(nu_e,))[0]
norm_nubar_mu = quad(self.interp_flux, 0, 300, args=(nubar_mu,))[0]
def nuePDF(energy):
return self.interp_flux(energy, nu_e) / norm_nu_e
def nubarmuPDF(energy):
return self.interp_flux(energy, nubar_mu) / norm_nubar_mu
edges = np.arange(0, 302, 2) # energy bin edges
ev = (edges[:-1] + edges[1:]) / 2
return NeutrinoFlux(continuous_fluxes={'ev': ev, 'e': nuePDF(ev), 'mubar': nubarmuPDF(ev)}, norm=3 * (10 ** 7))
if flux_name == 'jsns_prompt':
return NeutrinoFlux(delta_fluxes={'mu': [(29, 1),(236, 0.013)]}, norm=1.85 * (10 ** 7))
if flux_name == 'jsns_prompt_continuous':
nu_mu = np.genfromtxt(pkg_resources.resource_filename(__name__, "data/jsns2/jsns_nu_mu_nodelta.txt"), delimiter=',')
norm_nu_mu = quad(self.interp_flux, 0, 300, args=(nu_mu,))[0]
def numuPDF(energy):
return self.interp_flux(energy, nu_mu) / norm_nu_mu
edges = np.arange(0, 302, 2) # energy bin edges
ev = (edges[:-1] + edges[1:]) / 2
return NeutrinoFlux(continuous_fluxes={'ev': ev, 'mu': numuPDF(ev)},
norm=1.85 * (10 ** 4))
if flux_name == 'far_beam_nu':
far_beam_txt = 'data/dune_beam_fd_nu_flux_120GeVoptimized.txt'
f_beam = np.genfromtxt(pkg_resources.resource_filename(__name__, far_beam_txt), delimiter=',')
nu = {'ev': f_beam[:, 0],
'e': f_beam[:, 1],
'mu': f_beam[:, 2],
'ebar': f_beam[:, 4],
'mubar': f_beam[:, 5]}
return NeutrinoFlux(continuous_fluxes=nu)
if flux_name == 'far_beam_nubar':
far_beam_txt = 'data/dune_beam_fd_antinu_flux_120GeVoptimized.txt'
f_beam = np.genfromtxt(pkg_resources.resource_filename(__name__, far_beam_txt), delimiter=',')
nu = {'ev': f_beam[:, 0],
'e': f_beam[:, 1],
'mu': f_beam[:, 2],
'ebar': f_beam[:, 4],
'mubar': f_beam[:, 5]}
return NeutrinoFlux(continuous_fluxes=nu)
if flux_name == 'near_beam_nu':
far_beam_txt = 'data/dune_beam_nd_nu_flux_120GeVoptimized.txt'
f_beam = np.genfromtxt(pkg_resources.resource_filename(__name__, far_beam_txt))
nu = {'ev': f_beam[:, 0],
'e': f_beam[:, 1],
'mu': f_beam[:, 2],
'ebar': f_beam[:, 4],
'mubar': f_beam[:, 5]}
return NeutrinoFlux(continuous_fluxes=nu)
if flux_name == 'near_beam_nubar':
far_beam_txt = 'data/dune_beam_nd_antinu_flux_120GeVoptimized.txt'
f_beam = np.genfromtxt(pkg_resources.resource_filename(__name__, far_beam_txt))
nu = {'ev': f_beam[:, 0],
'e': f_beam[:, 1],
'mu': f_beam[:, 2],
'ebar': f_beam[:, 4],
'mubar': f_beam[:, 5]}
return NeutrinoFlux(continuous_fluxes=nu)
if flux_name == 'atmospheric':
if 'zenith' not in kwargs:
raise Exception('please specify zenith angle')
zen = np.round(kwargs['zenith'], decimals=3)
zen_list = np.round(np.linspace(-0.975, 0.975, 40), decimals=3)
if zen not in zen_list:
print('available choice of zenith angle: ', zen_list)
raise Exception('zenith angle not available')
idx = (0.975 - zen) / 0.05 * 61
f_atmos = np.genfromtxt(pkg_resources.resource_filename(__name__, 'data/atmos.txt'), delimiter=',')
nu = {'ev': f_atmos[int(round(idx)):int(round(idx))+61, 0],
'e': f_atmos[int(round(idx)):int(round(idx))+61, 2],
'mu': f_atmos[int(round(idx)):int(round(idx))+61, 3],
'ebar': f_atmos[int(round(idx)):int(round(idx))+61, 5],
'mubar': f_atmos[int(round(idx)):int(round(idx))+61, 6]}
return NeutrinoFlux(continuous_fluxes=nu)
class NeutrinoFlux:
def __init__(self, continuous_fluxes=None, delta_fluxes=None, norm=1):
self.norm = norm * ((100 * meter_by_mev) ** 2)
self.ev_min = None
self.ev_max = None
if continuous_fluxes is None:
self.nu = None
elif isinstance(continuous_fluxes, dict):
self.ev = continuous_fluxes['ev']
sorted_idx = np.argsort(self.ev)
self.ev = self.ev[sorted_idx]
self.ev_min = self.ev[0]
self.ev_max = self.ev[-1]
if self.ev_min == 0:
raise Exception('flux with neutrino energy equal to zeros is not supported. '
'please consider using a small value for your lower bound.')
self.nu = {'e': continuous_fluxes['e'][sorted_idx] if 'e' in continuous_fluxes else None,
'mu': continuous_fluxes['mu'][sorted_idx] if 'mu' in continuous_fluxes else None,
'tau': continuous_fluxes['tau'][sorted_idx] if 'tau' in continuous_fluxes else None,
'ebar': continuous_fluxes['ebar'][sorted_idx] if 'ebar' in continuous_fluxes else None,
'mubar': continuous_fluxes['mubar'][sorted_idx] if 'mubar' in continuous_fluxes else None,
'taubar': continuous_fluxes['taubar'][sorted_idx] if 'taubar' in continuous_fluxes else None}
self.binw = self.ev[1:] - self.ev[:-1]
self.precalc = {None: {flr: self.binw*(flx[1:]+flx[:-1])/2 if flx is not None else None for flr, flx in self.nu.items()}}
else:
raise Exception('only support dict as input.')
if delta_fluxes is None:
self.delta_nu = None
elif isinstance(delta_fluxes, dict):
self.delta_nu = {'e': delta_fluxes['e'] if 'e' in delta_fluxes else None,
'mu': delta_fluxes['mu'] if 'mu' in delta_fluxes else None,
'tau': delta_fluxes['tau'] if 'tau' in delta_fluxes else None,
'ebar': delta_fluxes['ebar'] if 'ebar' in delta_fluxes else None,
'mubar': delta_fluxes['mubar'] if 'mubar' in delta_fluxes else None,
'taubar': delta_fluxes['taubar'] if 'taubar' in delta_fluxes else None}
for flavor in self.delta_nu: # grab the maximum energy of the delta fluxes
if self.delta_nu[flavor] is None:
continue
energies = [self.delta_nu[flavor][i][0] for i in range(len(self.delta_nu[flavor]))]
if self.ev_max is None or max(energies) > self.ev_max:
self.ev_max = max(energies)
else:
raise Exception("'delta_fluxes' must be a dictionary of a list of tuples! e.g. {'e': [(12, 4), (14, 15)], ...}")
def __call__(self, ev, flavor):
if self.nu is None or self.nu[flavor] is None:
return 0
if ev == self.ev_min:
return self.nu[flavor][0] * self.norm
if ev == self.ev_max:
return self.nu[flavor][-1] * self.norm
if self.ev_min < ev < self.ev_max:
idx = self.ev.searchsorted(ev)
l1 = ev - self.ev[idx - 1]
l2 = self.ev[idx] - ev
h1 = self.nu[flavor][idx - 1]
h2 = self.nu[flavor][idx]
return (l1*h2+l2*h1)/(l1+l2) * self.norm
return 0
def integrate(self, ea, eb, flavor, weight_function=None):
"""
Please avoid using lambda as your weight_function!!!
:param ea:
:param eb:
:param flavor:
:param weight_function:
:return:
"""
if eb <= ea:
return 0
res = 0
if self.delta_nu is not None and self.delta_nu[flavor] is not None:
for deltas in self.delta_nu[flavor]:
if ea < deltas[0] <= eb: # self.ev_max should be included with <=
res += deltas[1] if weight_function is None else deltas[1]*weight_function(deltas[0])
if self.nu is not None and self.nu[flavor] is not None:
if weight_function not in self.precalc:
weight = weight_function(self.ev)
self.precalc[weight_function] = {flr: self.binw*((flx*weight)[1:]+(flx*weight)[:-1])/2
if flx is not None else None for flr, flx in self.nu.items()}
eb = min(eb, self.ev_max)
ea = max(ea, self.ev_min)
idxmin = self.ev.searchsorted(ea, side='right')
idxmax = self.ev.searchsorted(eb, side='left')
if idxmin == idxmax:
l1 = ea - self.ev[idxmin - 1]
l2 = self.ev[idxmin] - ea
h1 = self.nu[flavor][idxmin - 1] * weight_function(self.ev[idxmin - 1]) \
if weight_function is not None else self.nu[flavor][idxmin - 1]
h2 = self.nu[flavor][idxmin] * weight_function(self.ev[idxmin]) \
if weight_function is not None else self.nu[flavor][idxmin]
ha = (l1*h2+l2*h1)/(l1+l2)
l1 = eb - self.ev[idxmax - 1]
l2 = self.ev[idxmax] - eb
hb = (l1*h2+l2*h1)/(l1+l2)
return (ha + hb) * (eb - ea) / 2 * self.norm
res += np.sum(self.precalc[weight_function][flavor][idxmin:idxmax-1])
l1 = ea - self.ev[idxmin-1]
l2 = self.ev[idxmin] - ea
h1 = self.nu[flavor][idxmin-1]*weight_function(self.ev[idxmin-1]) \
if weight_function is not None else self.nu[flavor][idxmin-1]
h2 = self.nu[flavor][idxmin]*weight_function(self.ev[idxmin]) \
if weight_function is not None else self.nu[flavor][idxmin]
res += ((l1*h2+l2*h1)/(l1+l2)+h2)*l2/2
l1 = eb - self.ev[idxmax - 1]
l2 = self.ev[idxmax] - eb
h1 = self.nu[flavor][idxmax - 1] * weight_function(self.ev[idxmax - 1]) \
if weight_function is not None else self.nu[flavor][idxmax-1]
h2 = self.nu[flavor][idxmax] * weight_function(self.ev[idxmax]) \
if weight_function is not None else self.nu[flavor][idxmax]
res += ((l1 * h2 + l2 * h1) / (l1 + l2) + h1) * l1 / 2
return res * self.norm
def change_parameters(self):
pass
class DMFlux:
"""
Dark matter flux at COHERENT
"""
def __init__(self, dark_photon_mass, life_time, coupling_quark, dark_matter_mass,
detector_distance=19.3, pot_mu=0.75, pot_sigma=0.25, size=100000, mono_energy=None):
"""
initialize and generate flux
:param dark_photon_mass: dark photon mass
:param life_time: life time of dark photon in rest frame, unit in micro second
:param coupling_quark: dark photon coupling to quarks
:param dark_matter_mass: mass of dark matter, unit in MeV
:param detector_distance: distance from the detector to the Hg target
:param pot_mu: mean of guassian distribution of proton on target, unit in micro second
:param pot_sigma: std of guassian distribution of proton on target, unit in micro second
:param size: size of sampling dark photons
"""
self.dp_m = dark_photon_mass
self.dm_m = dark_matter_mass
self.epsi_quark = coupling_quark
self.det_dist = detector_distance / meter_by_mev
self.dp_life = life_time * 1e-6 * c_light / meter_by_mev
self.pot_mu = pot_mu * 1e-6 * c_light / meter_by_mev
self.pot_sigma = pot_sigma * 1e-6 * c_light / meter_by_mev
if mono_energy is None:
self.timing, self.energy = self._generate(size)
else:
self.timing, self.energy = self._mono_flux(mono_energy, pot_mu)
self.ed_min = self.energy.min()
self.ed_max = self.energy.max()
self.dm_norm = self.epsi_quark**2*0.23*1e20 / (4*np.pi*(detector_distance**2)*24*3600) * (meter_by_mev**2) * \
self.timing.shape[0] * 2 / size
def _generate(self, size=1000000):
"""
generate dark matter flux at COHERENT
:param size: size of sampling dark photons
:return: time and energy histogram of dark matter
"""
dp_m = self.dp_m
dp_e = ((massofpi+massofp)**2 - massofn**2 + dp_m**2)/(2*(massofpi+massofp))
dp_p = np.sqrt(dp_e ** 2 - dp_m ** 2)
dp_v = dp_p / dp_e
gamma = dp_e / dp_m
tau = self.dp_life * gamma
tf = np.random.normal(self.pot_mu, self.pot_sigma, size) # POT
t = np.random.exponential(tau, size) # life time of each dark photon
cs = np.random.uniform(-1, 1, size) # direction of each dark photon
# in rest frame
estar = dp_m / 2
pstar = np.sqrt(estar ** 2 - self.dm_m ** 2)
pstarx = pstar * cs
pstary = pstar * np.sqrt(1 - cs ** 2)
# boost to lab frame
elab = gamma * (estar + dp_v * pstarx)
plabx = gamma * (pstarx + dp_v * estar)
plaby = pstary
vx = plabx / elab
vy = plaby / elab
timing = []
energy = []
for i in range(size):
a = vx[i] ** 2 + vy[i] ** 2
b = 2 * vx[i] * t[i] * dp_v
cc = dp_v ** 2 * t[i] ** 2 - self.det_dist ** 2
if b ** 2 - 4 * a * cc >= 0:
if (-b - np.sqrt(b ** 2 - 4 * a * cc)) / (2 * a) > 0:
timing.append((-b - np.sqrt(b ** 2 - 4 * a * cc)) / (2 * a) + t[i] + tf[i])
energy.append(elab[i])
if (-b + np.sqrt(b ** 2 - 4 * a * cc)) / (2 * a) > 0:
timing.append((-b + np.sqrt(b ** 2 - 4 * a * cc)) / (2 * a) + t[i] + tf[i])
energy.append(elab[i])
return np.array(timing) / c_light * meter_by_mev * 1e6, np.array(energy)
def _mono_flux(self, e_chi, t_trig, size=1000):
return np.random.normal(loc=t_trig, scale=0.01*t_trig, size=size), np.random.normal(loc=e_chi, scale=0.005*e_chi, size=size)
def flux(self, ev):
"""
dark matter flux
:param ev: dark matter energy
:return: dark matter flux
"""
return 1/(self.ed_max-self.ed_min)*self.dm_norm if self.ed_min <= ev <= self.ed_max else 0
def fint(self, er, m, **kwargs):
"""
flux/(ex^2-mx^2) integration
:param er: recoil energy in MeV
:param m: target nucleus mass in MeV
:param kwargs: other argument
:return: flux/(ex^2-mx^2) integration
"""
emin = 0.5 * (np.sqrt((er**2*m+2*er*m**2+2*er*self.dm_m**2+4*m*self.dm_m**2)/m) + er)
emin = 0.0 * emin
def integrand(ex):
return self.flux(ex)/(ex**2 - self.dm_m**2)
if not isinstance(emin, np.ndarray):
res = quad(integrand, emin, self.ed_max)[0]
else:
res = np.zeros_like(emin)
for i in range(emin.shape[0]):
res[i] = quad(integrand, emin[i], self.ed_max)[0]
return res
def fint1(self, er, m, **kwargs):
"""
flux*ex/(ex^2-mx^2) integration
:param er: recoil energy in MeV
:param m: target nucleus mass in MeV
:param kwargs: other argument
:return: flux*ex/(ex^2-mx^2) integration
"""
emin = 0.5 * (np.sqrt((er**2*m+2*er*m**2+2*er*self.dm_m**2+4*m*self.dm_m**2)/m) + er)
emin = 0.0 * emin
def integrand(ex):
return self.flux(ex) * ex / (ex ** 2 - self.dm_m ** 2)
if not isinstance(emin, np.ndarray):
res = quad(integrand, emin, self.ed_max)[0]
else:
res = np.zeros_like(emin)
for i in range(emin.shape[0]):
res[i] = quad(integrand, emin[i], self.ed_max)[0]
return res
def fint2(self, er, m, **kwargs):
"""
flux*ex^2/(ex^2-mx^2) integration
:param er: recoil energy in MeV
:param m: target nucleus mass in MeV
:param kwargs: other argument
:return: flux*ex^2/(ex^2-mx^2) integration
"""
emin = 0.5 * (np.sqrt((er**2*m+2*er*m**2+2*er*self.dm_m**2+4*m*self.dm_m**2)/m) + er)
emin = 0.0 * emin
def integrand(ex):
return self.flux(ex) * ex**2 / (ex ** 2 - self.dm_m ** 2)
if not isinstance(emin, np.ndarray):
res = quad(integrand, emin, self.ed_max)[0]
else:
res = np.zeros_like(emin)
for i in range(emin.shape[0]):
res[i] = quad(integrand, emin[i], self.ed_max)[0]
return res
class DMFluxIsoPhoton(FluxBaseContinuous):
def __init__(self, photon_distribution, dark_photon_mass, coupling, dark_matter_mass, life_time=0.001,
detector_distance=19.3, pot_rate=5e20, pot_sample=100000, brem_suppress=True,
pot_mu=0.7, pot_sigma=0.15, sampling_size=100, nbins=20, verbose=False):
self.nbins = nbins
self.photon_flux = photon_distribution
self.dp_m = dark_photon_mass
self.dm_m = dark_matter_mass
self.epsilon = coupling
self.life_time = life_time # input in mus, internal in s
self.det_dist = detector_distance # meters
self.pot_rate = pot_rate # the number of POT/day in the experiment
self.pot_mu = pot_mu
self.pot_sigma = pot_sigma
self.pot_sample = pot_sample # the number of POT in photon_distribution
self.time = []
self.energy = []
self.weight = []
self.norm = 1
self.sampling_size = sampling_size
self.supp = brem_suppress # add phase space suppression
self.verbose = verbose
for photon_events in photon_distribution:
if self.verbose:
print("getting photons from E =", photon_events[0], "Size =", photon_events[1])
self._generate_single(photon_events, self.sampling_size)
normalization = self.epsilon ** 2 * (self.pot_rate / self.pot_sample) \
/ (4 * np.pi * (self.det_dist ** 2) * 24 * 3600) * (meter_by_mev**2)
self.norm = normalization
self.weight = [x * self.norm for x in self.weight]
self.timing = np.array(self.time) * 1e6
hist, bin_edges = np.histogram(self.energy, bins=nbins, weights=self.weight, density=True)
super().__init__((bin_edges[:-1] + bin_edges[1:]) / 2, hist, norm=np.sum(self.weight))
def getScaledWeights(self):
wgt = self.weight
wgt = [x * self.norm * 24 * 3600 / (meter_by_mev**2) for x in wgt]
return wgt
def simulate(self):
self.time = []
self.energy = []
self.weight = []
normalization = self.epsilon ** 2 * (self.pot_rate / self.pot_sample) \
/ (4 * np.pi * (self.det_dist ** 2) * 24 * 3600) * (meter_by_mev**2)
self.norm = normalization
for photon_events in self.photon_flux:
if self.verbose:
print("getting photons from E =", photon_events[0], "Size =", photon_events[1])
self._generate_single(photon_events, self.sampling_size)
self.weight = [x * self.norm for x in self.weight]
self.timing = np.array(self.time) * 1e6
hist, bin_edges = np.histogram(self.energy, bins=self.nbins, weights=self.weight, density=True)
super().__init__((bin_edges[:-1] + bin_edges[1:]) / 2, hist, norm=np.sum(self.weight))
def _generate_single(self, photon_events, nsamples):
# Initiate photon position, energy and momentum.
if photon_events[0]**2 < self.dp_m**2:
return
dp_m = self.dp_m
dp_e = photon_events[0]
dp_p = np.sqrt(dp_e ** 2 - self.dp_m ** 2)
dp_momentum = np.array([dp_e, 0, 0, dp_p])
# dark photon to dark matter
dm_m = self.dm_m
dm_e = self.dp_m / 2
dm_p = np.sqrt(dm_e ** 2 - dm_m ** 2)
# Directional sampling.
dp_wgt = photon_events[1] / nsamples # Event weight
# Brem suppression
if self.supp == True:
el_e = 1.0773*dp_e + 13.716 # most likely electron energy that produced this dark photon
supp_fact = min(1, 1154 * np.exp(-24.42 * np.power(dp_m/el_e, 0.3174)))
dp_wgt *= supp_fact
## optimize
#pos = np.zeros(3) ## optimize
t = np.random.normal(self.pot_mu * 1e-6, self.pot_sigma * 1e-6, nsamples)
t_dp = np.random.exponential(1e-6 * self.life_time * dp_momentum[0] / dp_m, nsamples)
t += t_dp
csd = np.random.uniform(-1, 1, nsamples)
phid = np.random.uniform(0, 2 * np.pi, nsamples)
boost_matr = lorentz_matrix(np.array([-dp_momentum[1] / dp_momentum[0],
-dp_momentum[2] / dp_momentum[0],
-dp_momentum[3] / dp_momentum[0]]))
pos_z = c_light * t_dp * dp_momentum[3] / dp_momentum[0] # position is along z by construction
for i in range(nsamples):
dm_momentum = np.array([dm_e, dm_p * np.sqrt(1 - csd[i] ** 2) * np.cos(phid[i]),
dm_p * np.sqrt(1 - csd[i] ** 2) * np.sin(phid[i]), dm_p * csd[i]])
dm_momentum = boost_matr @ dm_momentum
# dark matter arrives at detector, assuming azimuthal symmetric
# append the time and energy spectrum of the DM.
# DM particle 1
v = dm_momentum[1:] / dm_momentum[0] * c_light
a = v[0]*v[0] + v[1]*v[1] + v[2]*v[2] #np.sum(v ** 2)
b = 2*v[2]*pos_z[i] # dot product is along z by construction
c = pos_z[i]**2 - self.det_dist ** 2
if b ** 2 - 4 * a * c >= 0:
t_dm = (-b - np.sqrt(b ** 2 - 4 * a * c)) / (2 * a)
if t_dm >= 0:
if self.verbose:
print("adding weight", dp_wgt)
self.time.append(t[i]+t_dm)
self.energy.append(dm_momentum[0])
self.weight.append(dp_wgt)
t_dm = (-b + np.sqrt(b ** 2 - 4 * a * c)) / (2 * a)
if t_dm >= 0:
if self.verbose:
print("adding weight", dp_wgt)
self.time.append(t[i]+t_dm)
self.energy.append(dm_momentum[0])
self.weight.append(dp_wgt)
# DM particle 2
v = (dp_momentum - dm_momentum)[1:] / (dp_momentum - dm_momentum)[0] * c_light
a = v[0]*v[0] + v[1]*v[1] + v[2]*v[2] #np.sum(v ** 2)
b = b = 2*v[2]*pos_z[i]
c = pos_z[i]**2 - self.det_dist ** 2
if b ** 2 - 4 * a * c >= 0:
t_dm = (-b - np.sqrt(b ** 2 - 4 * a * c)) / (2 * a)
if t_dm >= 0:
if self.verbose:
print("adding weight", dp_wgt)
self.time.append(t[i]+t_dm)
self.energy.append((dp_momentum - dm_momentum)[0])
self.weight.append(dp_wgt)
t_dm = (-b + np.sqrt(b ** 2 - 4 * a * c)) / (2 * a)
if t_dm >= 0:
if self.verbose:
print("adding weight", dp_wgt)
self.time.append(t[i]+t_dm)
self.energy.append((dp_momentum - dm_momentum)[0])
self.weight.append(dp_wgt)
def fint(self, er, m):
if np.isscalar(m):
m = np.array([m])
emin = 0.5 * (np.sqrt((er**2*m+2*er*m**2+2*er*self.dm_m**2+4*m*self.dm_m**2)/m) + er)
res = np.zeros_like(emin)
for i in range(emin.shape[0]):
res[i] = self.integrate(emin[i], self.ev_max, weight_function=self.f0)
return res
def fint1(self, er, m):
if np.isscalar(m):
m = np.array([m])
emin = 0.5 * (np.sqrt((er**2*m+2*er*m**2+2*er*self.dm_m**2+4*m*self.dm_m**2)/m) + er)
res = np.zeros_like(emin)
for i in range(emin.shape[0]):
res[i] = self.integrate(emin[i], self.ev_max, weight_function=self.f1)
return res
def fint2(self, er, m):
if np.isscalar(m):
m = np.array([m])
emin = 0.5 * (np.sqrt((er**2*m+2*er*m**2+2*er*self.dm_m**2+4*m*self.dm_m**2)/m) + er)
res = np.zeros_like(emin)
for i in range(emin.shape[0]):
res[i] = self.integrate(emin[i], self.ev_max, weight_function=self.f2)
return res
def f0(self, ev):
return 1/(ev**2 - self.dm_m**2)
def f1(self, ev):
return ev/(ev**2 - self.dm_m**2)
def f2(self, ev):
return ev**2 / (ev**2 - self.dm_m**2)
class DMFluxFromPiMinusAbsorption:
r"""
Dark matter flux from pi^- + p -> A^\prime + n -> \chi + \chi + n
"""
def __init__(self, dark_photon_mass, coupling_quark, dark_matter_mass, life_time=0.001,
detector_distance=19.3, pot_rate=5e20, pot_mu=0.7, pot_sigma=0.15, pion_rate=18324/500000,
sampling_size=100000):
"""
initialize and generate flux
default values are COHERENT experiment values
:param dark_photon_mass: dark photon mass
:param life_time: life time of dark photon in rest frame, unit in micro second
:param coupling_quark: dark photon coupling to quarks divided by electron charge
:param dark_matter_mass: mass of dark matter, unit in MeV
:param detector_distance: distance from the detector to the target
:param pot_rate: proton on target rate, unit POT/day
:param pot_mu: mean of guassian distribution of proton on target, unit in micro second
:param pot_sigma: std of guassian distribution of proton on target, unit in micro second
:param pion_rate: pi^- production rate
:param sampling_size: size of sampling dark photons
"""
self.dp_m = dark_photon_mass
self.dm_m = dark_matter_mass
self.epsi_quark = coupling_quark
self.det_dist = detector_distance / meter_by_mev
self.life_time = life_time # input in mus, internal in s
self.pot_mu = pot_mu
self.pot_sigma = pot_sigma
self.pot_rate = pot_rate
self.pion_rate = pion_rate
self.sampling_size = sampling_size
self.timing = []
self.energy = []
self.ed_min = None
self.ed_max = None
self.norm = None
self.simulate()
self.ev_min = self.ed_min
self.ev_max = self.ed_max
def get_lifetime(self, g, m):
return ((16 * np.pi ** 2) / ((g ** 2) * m)) * mev_per_hz
def simulate(self):
"""
generate dark matter flux
"""
# First check that the dp mass is less than the pi- mass.
if self.dp_m > massofpi:
self.norm = 0.0
return
dp_m = self.dp_m
dp_e = ((massofpi + massofp) ** 2 - massofn ** 2 + dp_m ** 2) / (2 * (massofpi + massofp))
dp_p = np.sqrt(dp_e ** 2 - dp_m ** 2)
dp_v = dp_p / dp_e
gamma = dp_e / dp_m
tau = (self.life_time * 1e-6 * c_light / meter_by_mev) * gamma
tf = np.random.normal(self.pot_mu * 1e-6 * c_light / meter_by_mev,
self.pot_sigma * 1e-6 * c_light / meter_by_mev,
self.sampling_size) # POT
t = np.random.exponential(tau, self.sampling_size) # life time of each dark photon
cs = np.random.uniform(-1, 1, self.sampling_size) # direction of each dark photon
# in rest frame
estar = dp_m / 2
pstar = np.sqrt(estar ** 2 - self.dm_m ** 2)
pstarx = pstar * cs
pstary = pstar * np.sqrt(1 - cs ** 2)
# boost to lab frame
elab = gamma * (estar + dp_v * pstarx)
plabx = gamma * (pstarx + dp_v * estar)
plaby = pstary
vx = plabx / elab
vy = plaby / elab
timing = []
energy = []
for i in range(self.sampling_size):
a = vx[i] ** 2 + vy[i] ** 2
b = 2 * vx[i] * t[i] * dp_v
cc = dp_v ** 2 * t[i] ** 2 - self.det_dist ** 2
if b ** 2 - 4 * a * cc >= 0:
if (-b - np.sqrt(b ** 2 - 4 * a * cc)) / (2 * a) > 0:
timing.append((-b - np.sqrt(b ** 2 - 4 * a * cc)) / (2 * a) + t[i] + tf[i])
energy.append(elab[i])
if (-b + np.sqrt(b ** 2 - 4 * a * cc)) / (2 * a) > 0:
timing.append((-b + np.sqrt(b ** 2 - 4 * a * cc)) / (2 * a) + t[i] + tf[i])
energy.append(elab[i])
self.timing = np.array(timing) / c_light * meter_by_mev * 1e6
self.energy = np.array(energy)
self.ed_min = min(energy)
self.ed_max = max(energy)
self.ev_min = self.ed_min
self.ev_max = self.ed_max
self.norm = self.epsi_quark ** 2 * self.pot_rate * self.pion_rate / (4 * np.pi * (self.det_dist ** 2) * 24 * 3600) * \
self.timing.shape[0] * 2 / self.sampling_size
def __call__(self, ev):
"""
dark matter flux, the spectrum is flat because of isotropic
:param ev: dark matter energy
:return: dark matter flux
"""
return 1 / (self.ed_max - self.ed_min) * self.norm if self.ed_min <= ev <= self.ed_max else 0
def integrate(self, ea, eb, weight_function=None):
"""
adaptive quadrature can achieve almost linear time on simple weight function, no need to do precalculation
:param ea: lowerbound
:param eb: upperbound
:param weight_function: weight function
:return: integration of the flux, weighted by the weight function
"""
if eb <= ea:
return 0
eb = min(eb, self.ed_max)
ea = max(ea, self.ed_min)
if weight_function is None:
return (eb - ea) / (self.ed_max - self.ed_min) * self.norm
return quad(weight_function, ea, eb, epsrel=1e-3)[0] / (self.ed_max - self.ed_min) * self.norm
def change_parameters(self, dark_photon_mass=None, life_time=None, coupling_quark=None, dark_matter_mass=None,
detector_distance=None, pot_rate=None, pot_mu=None, pot_sigma=None, pion_rate=None, sampling_size=None):
self.dp_m = dark_photon_mass if dark_photon_mass is not None else self.dp_m
self.dp_life = life_time * 1e-6 * c_light / meter_by_mev if life_time is not None else self.dp_life
self.epsi_quark = coupling_quark if coupling_quark is not None else self.epsi_quark
self.dm_m = dark_matter_mass if dark_matter_mass is not None else self.dm_m
self.det_dist = detector_distance / meter_by_mev if detector_distance is not None else self.det_dist
self.pot_rate = pot_rate if pot_rate is not None else self.pot_rate
self.pot_mu = pot_mu * 1e-6 * c_light / meter_by_mev if pot_mu is not None else self.pot_mu
self.pot_sigma = pot_sigma * 1e-6 * c_light / meter_by_mev if pot_sigma is not None else self.pot_sigma
self.pion_rate = self.pion_rate if pion_rate is not None else self.pion_rate
self.sampling_size = sampling_size if sampling_size is not None else self.sampling_size
self.simulate()
def fint(self, er, m):
if np.isscalar(m):
m = np.array([m])
emin = 0.5 * (np.sqrt((er**2*m+2*er*m**2+2*er*self.dm_m**2+4*m*self.dm_m**2)/m) + er)
res = np.zeros_like(emin)
for i in range(emin.shape[0]):
res[i] = self.integrate(emin[i], self.ev_max, weight_function=self.f0)
return res
def fint1(self, er, m):
if np.isscalar(m):
m = np.array([m])
emin = 0.5 * (np.sqrt((er**2*m+2*er*m**2+2*er*self.dm_m**2+4*m*self.dm_m**2)/m) + er)
res = np.zeros_like(emin)
for i in range(emin.shape[0]):
res[i] = self.integrate(emin[i], self.ev_max, weight_function=self.f1)
return res
def fint2(self, er, m):
if np.isscalar(m):
m = np.array([m])
emin = 0.5 * (np.sqrt((er**2*m+2*er*m**2+2*er*self.dm_m**2+4*m*self.dm_m**2)/m) + er)
res = np.zeros_like(emin)
for i in range(emin.shape[0]):
res[i] = self.integrate(emin[i], self.ev_max, weight_function=self.f2)
return res
def f0(self, ev):
return 1/(ev**2 - self.dm_m**2)
def f1(self, ev):
return ev/(ev**2 - self.dm_m**2)
def f2(self, ev):
return ev**2 / (ev**2 - self.dm_m**2)
class DMFluxFromPi0Decay(FluxBaseContinuous):
"""
z direction is the direction of the beam
"""
def __init__(self, pi0_distribution, dark_photon_mass, coupling_quark, dark_matter_mass, meson_mass=massofpi0, life_time=0.001,
detector_distance=19.3, detector_direction=0, detector_width=0.1, pot_rate=5e20, pot_mu=0.7,
pot_sigma=0.15, pion_rate=52935/500000, nbins=20):
self.pi0_distribution = pi0_distribution
self.dp_m = dark_photon_mass
self.life_time = life_time
self.epsilon = coupling_quark # input in mus, internal in s
self.dm_m = dark_matter_mass
self.meson_mass = meson_mass
self.det_dist = detector_distance
self.det_direc = detector_direction
self.det_width = detector_width
self.pot_rate = pot_rate
self.pot_mu = pot_mu
self.pot_sigma = pot_sigma
self.pion_rate = pion_rate
self.time = []
self.energy = []
self.nbins = nbins
self.dm_m = dark_matter_mass
for pi0_events in pi0_distribution: # must be in the form [azimuth, cos(zenith), kinetic energy]
self._generate_single(pi0_events)
self.timing = np.array(self.time)*1e6
hist, bin_edges = np.histogram(self.energy, bins=nbins, density=True)
ps_factor = np.heaviside(self.meson_mass - self.dp_m, 0.0) * 2 * self.epsilon**2 * (1 - (self.dp_m / self.meson_mass)**2)**3
super().__init__((bin_edges[:-1]+bin_edges[1:])/2, hist,
norm=ps_factor*pot_rate*pion_rate*len(self.time)/len(pi0_distribution)/
(2*np.pi*(min(1.0, detector_direction+detector_width/2)-max(-1.0, detector_direction-detector_width/2))*detector_distance**2*24*3600)
*(meter_by_mev**2))
def get_lifetime(self, g, m):
return ((16 * np.pi ** 2) / ((g ** 2) * m)) * mev_per_hz
def simulate(self):
self.time = []
self.energy = []
for pi0_events in self.pi0_distribution: # must be in the form [azimuth, cos(zenith), kinetic energy]
self._generate_single(pi0_events)
self.timing = np.array(self.time)*1e6
hist, bin_edges = np.histogram(self.energy, bins=self.nbins, density=True)
ps_factor = np.heaviside(self.meson_mass - self.dp_m, 0.0) * 2 * self.epsilon**2 * (1 - (self.dp_m / self.meson_mass)**2)**3
norm = ps_factor * self.pot_rate * self.pion_rate * \
len(self.time)/len(self.pi0_distribution)/ \
(2*np.pi*(min(1.0, self.det_direc+self.det_width/2)-max(-1.0, self.det_direc-self.det_width/2))*self.det_dist**2*24*3600)*(meter_by_mev**2)
super().__init__((bin_edges[:-1]+bin_edges[1:])/2, hist, norm=norm)
def _generate_single(self, pi0_events):
if self.dp_m > self.meson_mass:
return
pos = np.zeros(3)
t = 0
t += np.random.normal(self.pot_mu * 1e-6, self.pot_sigma * 1e-6)
pi_e = self.meson_mass + pi0_events[2]
pi_p = np.sqrt(pi_e**2 - self.meson_mass**2)
pi_v = pi_p / pi_e
t_pi = np.random.exponential(8.4e-17*pi_e/self.meson_mass)
pos += pi_v * polar_to_cartesian(pi0_events[:2]) * t_pi * c_light
t += t_pi
# pi0 to dark photon
dp_m = self.dp_m
dp_e = (self.meson_mass**2 + dp_m**2)/(2*self.meson_mass)
dp_p = (self.meson_mass**2 - dp_m**2)/(2*self.meson_mass)
cs = np.random.uniform(-1, 1)
phi = np.random.uniform(0, 2*np.pi)
dp_momentum = np.array([dp_e, dp_p*np.sqrt(1-cs**2)*np.cos(phi), dp_p*np.sqrt(1-cs**2)*np.sin(phi), dp_p*cs])
dp_momentum = lorentz_boost(dp_momentum, -pi_v*polar_to_cartesian(pi0_events[:2]))
t_dp = np.random.exponential((self.life_time*1e-6)*dp_momentum[0]/dp_m)
pos += c_light*t_dp*np.array([dp_momentum[1]/dp_momentum[0], dp_momentum[2]/dp_momentum[0], dp_momentum[3]/dp_momentum[0]])
t += t_dp
# dark photon to dark matter
dm_m = self.dm_m
dm_e = dp_m / 2
dm_p = np.sqrt(dm_e**2 - dm_m**2)
csd = np.random.uniform(-1, 1)
phid = np.random.uniform(0, 2*np.pi)
dm_momentum = np.array([dm_e, dm_p*np.sqrt(1-csd**2)*np.cos(phid), dm_p*np.sqrt(1-csd**2)*np.sin(phid), dm_p*csd])
dm_momentum = lorentz_boost(dm_momentum, np.array([-dp_momentum[1]/dp_momentum[0],
-dp_momentum[2]/dp_momentum[0],
-dp_momentum[3]/dp_momentum[0]]))
# dark matter arrives at detector, assuming azimuthal symmetric
v = dm_momentum[1:]/dm_momentum[0]*c_light
a = np.sum(v**2)
b = 2*np.sum(v*pos) #2 * v[2] * (c_light * dp_p / dp_e) * t_dp
c = np.sum(pos**2) - self.det_dist**2
if b**2 - 4*a*c >= 0:
t_dm = (-b+np.sqrt(b**2-4*a*c))/(2*a)
if t_dm >= 0: #and self.det_direc-self.det_width/2 <= (pos[2]+v[2]*t_dm)/np.sqrt(np.sum((v*t_dm + pos)**2)) <= self.det_direc+self.det_width/2:
self.time.append(t+t_dm)
self.energy.append(dm_momentum[0])
t_dm = (-b-np.sqrt(b**2-4*a*c))/(2*a)
if t_dm >= 0: #and self.det_direc-self.det_width/2 <= (pos[2]+v[2]*t_dm)/np.sqrt(np.sum((v*t_dm + pos)**2)) <= self.det_direc+self.det_width/2:
self.time.append(t+t_dm)
self.energy.append(dm_momentum[0])
v = (dp_momentum-dm_momentum)[1:]/(dp_momentum-dm_momentum)[0]*c_light
a = np.sum(v**2)
b = 2*np.sum(v*pos)
c = np.sum(pos**2) - self.det_dist**2
if b**2 - 4*a*c >= 0:
t_dm = (-b+np.sqrt(b**2-4*a*c))/(2*a)
if t_dm >= 0: #and self.det_direc-self.det_width/2 <= (pos[2]+v[2]*t_dm)/np.sqrt(np.sum((v*t_dm + pos)**2)) <= self.det_direc+self.det_width/2:
self.time.append(t+t_dm)
self.energy.append((dp_momentum-dm_momentum)[0])
t_dm = (-b-np.sqrt(b**2-4*a*c))/(2*a)
if t_dm >= 0: #and self.det_direc-self.det_width/2 <= (pos[2]+v[2]*t_dm)/np.sqrt(np.sum((v*t_dm + pos)**2)) <= self.det_direc+self.det_width/2:
self.time.append(t+t_dm)
self.energy.append((dp_momentum-dm_momentum)[0])
def to_pandas(self):
return pd.DataFrame({'time': self.time, 'energy': self.energy})
def fint(self, er, m):
if np.isscalar(m):
m = np.array([m])
emin = 0.5 * (np.sqrt((er**2*m+2*er*m**2+2*er*self.dm_m**2+4*m*self.dm_m**2)/m) + er)
res = np.zeros_like(emin)
for i in range(emin.shape[0]):
res[i] = self.integrate(emin[i], self.ev_max, weight_function=self.f0)
return res
def fint1(self, er, m):
if np.isscalar(m):
m = np.array([m])
emin = 0.5 * (np.sqrt((er**2*m+2*er*m**2+2*er*self.dm_m**2+4*m*self.dm_m**2)/m) + er)
res = np.zeros_like(emin)
for i in range(emin.shape[0]):
res[i] = self.integrate(emin[i], self.ev_max, weight_function=self.f1)
return res
def fint2(self, er, m):
if np.isscalar(m):
m = np.array([m])
emin = 0.5 * (np.sqrt((er**2*m+2*er*m**2+2*er*self.dm_m**2+4*m*self.dm_m**2)/m) + er)
res = np.zeros_like(emin)
for i in range(emin.shape[0]):
res[i] = self.integrate(emin[i], self.ev_max, weight_function=self.f2)
return res
def f0(self, ev):
return 1/(ev**2 - self.dm_m**2)
def f1(self, ev):
return ev/(ev**2 - self.dm_m**2)
def f2(self, ev):
return ev**2 / (ev**2 - self.dm_m**2)
| 2.625 | 3 |
enso/contrib/minimessages.py | blackdaemon/enso-launcher-continued | 7 | 12905 | # Author : <NAME> "blackdaemon"
# Email : <EMAIL>
#
# Copyright (c) 2010, <NAME> <<EMAIL>>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Enso nor the names of its contributors may
# be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
#
# enso.contrib.minimessages
#
# ----------------------------------------------------------------------------
"""
An Enso plugin that makes all mini-messages related commands available.
Commands:
hide mini messages
"""
# ----------------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------------
from xml.sax.saxutils import escape as xml_escape
import enso.messages
from enso.commands import CommandManager, CommandObject
from enso.commands.factories import ArbitraryPostfixFactory
from enso.contrib.scriptotron.ensoapi import EnsoApi
from enso.contrib.scriptotron.tracebacks import safetyNetted
from enso.messages import MessageManager, TimedMiniMessage
ensoapi = EnsoApi()
# ----------------------------------------------------------------------------
# The 'hide mini messages' command
# ---------------------------------------------------------------------------
class HideMiniMessagesCommand(CommandObject):
"""
The 'hide mini messages' command.
"""
NAME = "hide mini messages"
DESCRIPTION = "Hides all mini messages."
def __init__(self):
super(HideMiniMessagesCommand, self).__init__()
self.setDescription(self.DESCRIPTION)
self.setName(self.NAME)
@safetyNetted
def run(self):
MessageManager.get().finishMessages()
# ----------------------------------------------------------------------------
# The 'show mini message' testing command
# ---------------------------------------------------------------------------
class ShowMiniMessageCommand(CommandObject):
"""
The 'show mini message {text}' command.
"""
LOREMIPSUM = u"Lorem ipsum dolor sit amet, consectetur adipiscing elit. "\
"Nunc fringilla ipsum dapibus mi porta et laoreet turpis porta. Class aptent "\
"taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. "\
"Duis commodo massa nec arcu mollis auctor. Nunc et orci quis lacus suscipit "\
"dictum eu vitae est. Donec neque massa, pretium sed venenatis sed, consequat "\
"quis est. Proin auctor consequat euismod. Praesent iaculis placerat libero eu "\
"gravida. Curabitur ullamcorper velit sit amet tortor fermentum fringilla. "\
"Pellentesque non lectus mauris, a iaculis ipsum. Cum sociis natoque penatibus "\
"et magnis dis parturient montes, nascetur ridiculus mus. Vivamus mauris nibh, "\
"ultrices in accumsan in, bibendum sed mi. Ut ut nunc a mi vestibulum luctus. "\
"Sed ornare euismod justo a condimentum."
def __init__(self, postfix):
super(ShowMiniMessageCommand, self).__init__()
self._postfix = postfix
self._msgmanager = MessageManager.get()
@safetyNetted
def run(self):
import random
text = self._postfix
if text and "," in text:
timeout, text = text.split(",")
timeout = max(int(timeout), 0)
else:
timeout = None
if not text:
pos = random.randint(0, self.LOREMIPSUM.count(" ") - 10 + 1)
cnt = random.randint(5, 10)
words = self.LOREMIPSUM.split()
text = " ".join(words[pos:pos + cnt])
if text[0].upper() != text[0]:
text = "..." + text
if text[-1] != ".":
text = text + "..."
if timeout:
caption = "test message (timed %ds)" % timeout
else:
caption = "test message"
msg = xml_escape(text)
caption = xml_escape(caption)
if caption:
xmltext = u"<p>%s</p><caption>%s</caption>" % (msg, caption)
else:
xmltext = u"<p>%s</p>" % (msg)
msg = TimedMiniMessage(
primaryXml=None,
miniXml=xmltext,
waitTime=timeout
)
self._msgmanager.newMessage(msg)
class ShowMiniMessageFactory(ArbitraryPostfixFactory):
"""
Generates a "show mini message {text}" command.
"""
PREFIX = "show mini message "
DESCRIPTION = "Show mini message with given timeout and text, both optional."
HELP_TEXT = "{timeout,text}"
NAME = "%s%s" % (PREFIX, HELP_TEXT)
def _generateCommandObj(self, postfix):
cmd = ShowMiniMessageCommand(postfix)
cmd.setDescription(self.DESCRIPTION)
cmd.setName(self.NAME)
cmd.setHelp(self.HELP_TEXT)
return cmd
class ShowRecentMessageCommand(CommandObject):
"""
The 'show recent message' command.
"""
NAME = "show recent message"
DESCRIPTION = "Show recent message."
def __init__(self):
super(ShowRecentMessageCommand, self).__init__()
self.setDescription(self.DESCRIPTION)
self.setName(self.NAME)
@safetyNetted
def run(self):
if not enso.messages.displayRecentMessage():
ensoapi.display_message(u"No recent messages.")
# ----------------------------------------------------------------------------
# Plugin initialization
# ---------------------------------------------------------------------------
def load():
cmdMan = CommandManager.get()
cmdMan.registerCommand(
HideMiniMessagesCommand.NAME,
HideMiniMessagesCommand()
)
cmdMan.registerCommand(
ShowMiniMessageFactory.NAME,
ShowMiniMessageFactory()
)
cmdMan.registerCommand(
ShowRecentMessageCommand.NAME,
ShowRecentMessageCommand()
)
# vim:set tabstop=4 shiftwidth=4 expandtab:
| 1.117188 | 1 |
article_curation/test_article_curation.py | mrkarezina/graph-recommendation-api | 0 | 12906 | <gh_stars>0
import unittest
from unittest.mock import Mock
import json
from processor import scrape_article
import main
class ArticleCurationTestCase(unittest.TestCase):
def test_article_fetch(self):
response = scrape_article(
url='https://www.cnn.com/2019/03/25/us/yale-rescinds-student-admissions-scandal/index.html')
self.assertGreater(len(response["text"].split()), 150)
self.assertIn("Yale rescinds", response["title"])
self.assertIn("http", response["img_url"])
# Tricky url, tests if the extended newspaper component works
response = scrape_article(
url='http://www.physiciansnewsnetwork.com/ximed/study-hospital-physician-vertical-integration-has-little-impact-on-quality/article_257c41a0-3a11-11e9-952b-97cc981efd76.html')
self.assertGreater(len(response["text"].split()), 150)
self.assertIn("http", response["img_url"])
def test_article_fetch_endpoint(self):
"""
Test the actual endpoint by simulating the request object
:return:
"""
data = {
"article_url": "https://techcrunch.com/2019/05/01/alexa-in-skill-purchasing-which-lets-developers-make-money-from-voice-apps-launches-internationally"
}
req = Mock(get_json=Mock(return_value=data), args=data)
response, code, headers = main.fetch_article(req)
self.assertEqual(code, 200)
self.assertGreater(len(json.loads(response)["text"].split()), 150)
# Testing a bad url, see error message
data = {
"article_url": "https://example.com/test123"
}
req = Mock(get_json=Mock(return_value=data), args=data)
response, code, headers = main.fetch_article(req)
self.assertEqual(code, 500)
def test_download_rss_endpoint(self):
data = {
"rss_url": "http://rss.cnn.com/rss/cnn_topstories.rss"
}
req = Mock(get_json=Mock(return_value=data), args=data)
response, code, headers = main.download_rss(req)
self.assertEqual(code, 200)
self.assertGreater(len(json.loads(response)), 1)
def test_fetch_rss_endpoint(self):
data = {
"rss_url": "http://rss.cnn.com/rss/cnn_topstories.rss"
}
req = Mock(get_json=Mock(return_value=data), args=data)
response, code, headers = main.fetch_rss(req)
self.assertEqual(code, 200)
self.assertGreater(len(json.loads(response)), 1)
# Test case when rss not in DB
data = {
"rss_url": "http://www.example.com/example.rss"
}
req = Mock(get_json=Mock(return_value=data), args=data)
response, code, headers = main.fetch_rss(req)
self.assertEqual(code, 404)
# def test_get_article_dicts_from_rss_cache(self):
#
# start = time.time()
# for i in range(1000):
# article_dicts = get_article_dicts_from_rss('http://rss.cnn.com/rss/cnn_topstories.rss')
#
# end = time.time()
# total_time = end - start
#
# # Make less than 10 sec, so cache works
# self.assertLess(total_time, 10)
#
# def test_get_article_dicts_from_rss(self):
#
# article_dicts = get_article_dicts_from_rss('http://rss.cnn.com/rss/cnn_topstories.rss')
# self.assertGreater(len(article_dicts), 0)
#
# for article in article_dicts:
# self.assertIn("http", article["img_url"])
#
# # Make sure title has more than 0 characters
# self.assertGreater(len(article["title"]), 0)
| 3.265625 | 3 |
test.py | chdre/noise-randomized | 0 | 12907 | import unittest
class PerlinTestCase(unittest.TestCase):
def test_perlin_1d_range(self):
from noise import pnoise1
for i in range(-10000, 10000):
x = i * 0.49
n = pnoise1(x)
self.assertTrue(-1.0 <= n <= 1.0, (x, n))
def test_perlin_1d_octaves_range(self):
from noise import pnoise1
for i in range(-1000, 1000):
for o in range(10):
x = i * 0.49
n = pnoise1(x, octaves=o + 1)
self.assertTrue(-1.0 <= n <= 1.0, (x, n))
def test_perlin_1d_base(self):
from noise import pnoise1
self.assertEqual(pnoise1(0.5), pnoise1(0.5, base=0))
self.assertNotEqual(pnoise1(0.5), pnoise1(0.5, base=5))
self.assertNotEqual(pnoise1(0.5, base=5), pnoise1(0.5, base=1))
def test_perlin_2d_range(self):
from noise import pnoise2
for i in range(-10000, 10000):
x = i * 0.49
y = -i * 0.67
n = pnoise2(x, y)
self.assertTrue(-1.0 <= n <= 1.0, (x, y, n))
def test_perlin_2d_octaves_range(self):
from noise import pnoise2
for i in range(-1000, 1000):
for o in range(10):
x = -i * 0.49
y = i * 0.67
n = pnoise2(x, y, octaves=o + 1)
self.assertTrue(-1.0 <= n <= 1.0, (x, n))
def test_perlin_2d_base(self):
from noise import pnoise2
x, y = 0.73, 0.27
self.assertEqual(pnoise2(x, y), pnoise2(x, y, base=0))
self.assertNotEqual(pnoise2(x, y), pnoise2(x, y, base=5))
self.assertNotEqual(pnoise2(x, y, base=5), pnoise2(x, y, base=1))
def test_perlin_3d_range(self):
from noise import pnoise3
for i in range(-10000, 10000):
x = -i * 0.49
y = i * 0.67
z = -i * 0.727
n = pnoise3(x, y, z)
self.assertTrue(-1.0 <= n <= 1.0, (x, y, z, n))
def test_perlin_3d_octaves_range(self):
from noise import pnoise3
for i in range(-1000, 1000):
x = i * 0.22
y = -i * 0.77
z = -i * 0.17
for o in range(10):
n = pnoise3(x, y, z, octaves=o + 1)
self.assertTrue(-1.0 <= n <= 1.0, (x, y, z, n))
def test_perlin_3d_base(self):
from noise import pnoise3
x, y, z = 0.1, 0.7, 0.33
self.assertEqual(pnoise3(x, y, z), pnoise3(x, y, z, base=0))
self.assertNotEqual(pnoise3(x, y, z), pnoise3(x, y, z, base=5))
self.assertNotEqual(pnoise3(x, y, z, base=5), pnoise3(x, y, z, base=1))
class SimplexTestCase(unittest.TestCase):
def test_randomize(self):
from noise import randomize
self.assertTrue(randomize(4096,23490))
def test_simplex_2d_range(self):
from noise import snoise2
for i in range(-10000, 10000):
x = i * 0.49
y = -i * 0.67
n = snoise2(x, y)
self.assertTrue(-1.0 <= n <= 1.0, (x, y, n))
def test_simplex_2d_octaves_range(self):
from noise import snoise2
for i in range(-1000, 1000):
for o in range(10):
x = -i * 0.49
y = i * 0.67
n = snoise2(x, y, octaves=o + 1)
self.assertTrue(-1.0 <= n <= 1.0, (x, n))
def test_simplex_3d_range(self):
from noise import snoise3
for i in range(-10000, 10000):
x = i * 0.31
y = -i * 0.7
z = i * 0.19
n = snoise3(x, y, z)
self.assertTrue(-1.0 <= n <= 1.0, (x, y, z, n))
def test_simplex_3d_octaves_range(self):
from noise import snoise3
for i in range(-1000, 1000):
x = -i * 0.12
y = i * 0.55
z = i * 0.34
for o in range(10):
n = snoise3(x, y, z, octaves=o + 1)
self.assertTrue(-1.0 <= n <= 1.0, (x, y, z, o+1, n))
def test_simplex_4d_range(self):
from noise import snoise4
for i in range(-10000, 10000):
x = i * 0.88
y = -i * 0.11
z = -i * 0.57
w = i * 0.666
n = snoise4(x, y, z, w)
self.assertTrue(-1.0 <= n <= 1.0, (x, y, z, w, n))
def test_simplex_4d_octaves_range(self):
from noise import snoise4
for i in range(-1000, 1000):
x = -i * 0.12
y = i * 0.55
z = i * 0.34
w = i * 0.21
for o in range(10):
n = snoise4(x, y, z, w, octaves=o + 1)
self.assertTrue(-1.0 <= n <= 1.0, (x, y, z, w, o+1, n))
if __name__ == '__main__':
unittest.main()
| 2.875 | 3 |
pipelines/controllers/datasets.py | platiagro/pipeline-generator | 1 | 12908 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import platiagro
import pandas as pd
from werkzeug.exceptions import NotFound
from pipelines.database import db_session
from pipelines.models import Operator
from pipelines.models.utils import raise_if_experiment_does_not_exist
def get_dataset_name(experiment_id, operator_id,):
"""Retrieves a dataset name from experiment.
Args:
experiment_id(str): the experiment uuid
operator_id(str): the operator uuid
Returns:
Dataset name
"""
raise_if_experiment_does_not_exist(experiment_id)
operator = Operator.query.get(operator_id)
if operator is None:
raise NotFound("The specified operator does not exist")
# get dataset name
dataset = operator.parameters.get('dataset')
if dataset is None:
# try to find dataset name in other operators
operators = db_session.query(Operator) \
.filter_by(experiment_id=experiment_id) \
.filter(Operator.uuid != operator_id) \
.all()
for operator in operators:
dataset = operator.parameters.get('dataset')
if dataset:
break
if dataset is None:
raise NotFound()
return dataset
def get_dataset_pagination(application_csv,
name,
operator_id,
page,
page_size,
run_id):
"""Retrieves a dataset.
Args:
application_csv(bool): if is to return dataset as csv
name(str): the dataset name
operator_id(str): the operator uuid
page_size(int) : record numbers
page(int): page number
run_id (str): the run id.
Returns:
Dataset
"""
try:
metadata = platiagro.stat_dataset(name=name, operator_id=operator_id)
if "run_id" not in metadata:
raise FileNotFoundError()
dataset = platiagro.load_dataset(name=name, operator_id=operator_id, run_id=run_id)
except FileNotFoundError as e:
raise NotFound(str(e))
if page_size == -1:
if application_csv:
return dataset.to_csv(index=False)
dataset = dataset.to_dict(orient="split")
del dataset["index"]
return dataset
else:
dataset = dataset.to_dict(orient="split")
del dataset["index"]
pdataset = pagination_datasets(page=page, page_size=page_size, dataset=dataset)
if application_csv:
df = pd.DataFrame(columns=pdataset['columns'], data=pdataset['data'])
return df.to_csv(index=False)
return pdataset
def pagination_datasets(page, page_size, dataset):
"""pagination of datasets.
Args:
page_size(int) : record numbers
page(int): page number
dataset(json): data to be paged
Returns:
Paged dataset
"""
try:
count = 0
new_datasets = []
total_elements = len(dataset['data'])
page = (page * page_size) - page_size
for i in range(page, total_elements):
new_datasets.append(dataset['data'][i])
count += 1
if page_size == count:
response = {
'columns': dataset['columns'],
'data': new_datasets,
'total': len(dataset['data'])
}
return response
if len(new_datasets) == 0:
raise NotFound("The informed page does not contain records")
else:
response = {
'columns': dataset['columns'],
'data': new_datasets,
'total': len(dataset['data'])
}
return response
except RuntimeError:
raise NotFound("The specified page does not exist")
| 2.484375 | 2 |
tests/test_message.py | jfkinslow/flask-mailing | 0 | 12909 | <reponame>jfkinslow/flask-mailing
import pytest
from flask_mailing.schemas import Message, MultipartSubtypeEnum
from flask_mailing.msg import MailMsg
import os
CONTENT = "file test content"
def test_initialize():
message = Message(
subject="test subject",
recipients=["<EMAIL>"],
body="test",
subtype="plain"
)
assert message.subject == "test subject"
def test_recipients_properly_initialized():
message = Message(
subject="test subject",
recipients=[],
body="test",
subtype="plain"
)
assert message.recipients == []
def test_add_recipient_method():
message = Message(
subject="test subject",
recipients=[],
body="test",
subtype="plain"
)
message.add_recipient("<EMAIL>")
assert message.recipients == ["<EMAIL>"]
def test_sendto_properly_set():
msg = Message(subject="subject", recipients=["<EMAIL>", "<EMAIL>"],
cc=["<EMAIL>"], bcc=["<EMAIL>"], reply_to=["<EMAIL>"])
assert len(msg.recipients) == 2
assert len(msg.cc) == 1
assert len(msg.bcc) == 1
assert len(msg.reply_to) == 1
def test_plain_message():
message = Message(
subject="test subject",
recipients=["<EMAIL>"],
body="test",
subtype="plain"
)
assert message.body == "test"
def test_charset():
message = Message(
subject="test subject",
recipients=["<EMAIL>"],
body="test",
subtype="plain"
)
assert message.charset == "utf-8"
def test_message_str():
message = Message(
subject="test subject",
recipients=["<EMAIL>"],
body="test",
subtype="plain"
)
assert type(message.body) == str
def test_plain_message_with_attachments():
directory = os.getcwd()
attachement = directory + "/files/attachement.txt"
msg = Message(subject="testing",
recipients=["<EMAIL>"],
attachments=[attachement],
body="test mail body")
with open(attachement, "w") as file:
file.write(CONTENT)
assert len(msg.attachments) == 1
def test_plain_message_with_attach_method():
directory = os.getcwd()
attachement = directory + "/files/attachement_1.txt"
msg = Message(subject="testing",
recipients=["<EMAIL>"],
body="test mail body")
with open(attachement, "w") as file:
file.write(CONTENT)
with open(attachement, "rb") as fp:
msg.attach("attachement_1.txt", fp.read())
assert len(msg.attachments) == 1
def test_empty_subject_header():
message = Message(
subject="",
recipients=["<EMAIL>"],
body="test",
subtype="plain"
)
assert len(message.subject) == 0
def test_bcc():
msg = Message(subject="subject", recipients=[],
bcc=["<EMAIL>"])
assert len(msg.bcc) == 1
assert msg.bcc == ["<EMAIL>"]
def test_replyto():
msg = Message(subject="subject", recipients=[],
reply_to=["<EMAIL>"])
assert len(msg.reply_to) == 1
assert msg.reply_to == ["<EMAIL>"]
def test_cc():
msg = Message(subject="subject", recipients=[],
cc=["<EMAIL>"])
assert len(msg.cc) == 1
assert msg.cc == ["<EMAIL>"]
def test_multipart_subtype():
message = Message(
subject="test subject",
recipients=["<EMAIL>"],
body="test",
subtype="plain"
)
assert message.multipart_subtype == MultipartSubtypeEnum.mixed
@pytest.mark.asyncio
async def test_msgid_header():
message = Message(
subject="test subject",
recipients=["<EMAIL>"],
body="test",
subtype="plain"
)
msg = MailMsg(**message.dict())
msg_object = await msg._message('<EMAIL>')
assert msg_object['Message-ID'] is not None
@pytest.mark.asyncio
async def test_message_charset():
message = Message(
subject="test subject",
recipients=["<EMAIL>"],
body="test",
subtype="plain"
)
msg = MailMsg(**message.dict())
msg_object = await msg._message('<EMAIL>')
assert msg_object._charset is not None
assert msg_object._charset == "utf-8" | 2.578125 | 3 |
utils/checks.py | JDJGInc/JDBot | 12 | 12910 | import discord
def check(ctx):
def inner(m):
return m.author == ctx.author
return inner
def Membercheck(ctx):
def inner(m):
return m.author == ctx.guild.me
return inner
def warn_permission(ctx, Member):
if isinstance(ctx.channel, discord.TextChannel):
return ctx.author.guild_permissions.manage_messages and ctx.author.top_role > Member.top_role and ctx.author.guild_permissions >= Member.guild_permissions
#bug with user with same permissions maybe and other stuff(seems fixed for right now, leaving note just in case.)
if isinstance(ctx.channel, discord.DMChannel):
return True
def cleanup_permission(ctx):
if isinstance(ctx.channel, discord.TextChannel):
return ctx.author.guild_permissions.manage_messages
if isinstance(ctx.channel, discord.DMChannel):
return True
def mutual_guild_check(ctx, user):
mutual_guilds = set(ctx.author.mutual_guilds)
mutual_guilds2 = set(user.mutual_guilds)
return bool(mutual_guilds.intersection(mutual_guilds2))
async def filter_commands(ctx, command_list):
async def check(cmd, ctx):
try:
return await cmd.can_run(ctx)
except:
return False
return [cmd for cmd in command_list if await check(cmd, ctx)] | 2.3125 | 2 |
scripts/mgear/rigbits/eye_rigger.py | stormstudios/rigbits | 1 | 12911 | <reponame>stormstudios/rigbits
"""Rigbits eye rigger tool"""
import json
import traceback
from functools import partial
import mgear.core.pyqt as gqt
import pymel.core as pm
from maya.app.general.mayaMixin import MayaQWidgetDockableMixin
from mgear.core import meshNavigation, curve, applyop, node, primitive, icon
from mgear.core import transform, utils, attribute, skin, string
from mgear.vendor.Qt import QtCore, QtWidgets
from pymel.core import datatypes
from mgear import rigbits
##########################################################
# Eye rig constructor
##########################################################
def eyeRig(eyeMesh,
edgeLoop,
blinkH,
namePrefix,
offset,
rigidLoops,
falloffLoops,
headJnt,
doSkin,
parent=None,
ctlName="ctl",
sideRange=False,
customCorner=False,
intCorner=None,
extCorner=None,
ctlGrp=None,
defGrp=None):
"""Create eyelid and eye rig
Args:
eyeMesh (TYPE): Description
edgeLoop (TYPE): Description
blinkH (TYPE): Description
namePrefix (TYPE): Description
offset (TYPE): Description
rigidLoops (TYPE): Description
falloffLoops (TYPE): Description
headJnt (TYPE): Description
doSkin (TYPE): Description
parent (None, optional): Description
ctlName (str, optional): Description
sideRange (bool, optional): Description
customCorner (bool, optional): Description
intCorner (None, optional): Description
extCorner (None, optional): Description
ctlGrp (None, optional): Description
defGrp (None, optional): Description
Returns:
TYPE: Description
"""
# Checkers
if edgeLoop:
edgeLoopList = [pm.PyNode(e) for e in edgeLoop.split(",")]
else:
pm.displayWarning("Please set the edge loop first")
return
if eyeMesh:
try:
eyeMesh = pm.PyNode(eyeMesh)
except pm.MayaNodeError:
pm.displayWarning("The object %s can not be found in the "
"scene" % (eyeMesh))
return
else:
pm.displayWarning("Please set the eye mesh first")
if doSkin:
if not headJnt:
pm.displayWarning("Please set the Head Jnt or unCheck "
"Compute Topological Autoskin")
return
# Initial Data
bboxCenter = meshNavigation.bboxCenter(eyeMesh)
extr_v = meshNavigation.getExtremeVertexFromLoop(edgeLoopList, sideRange)
upPos = extr_v[0]
lowPos = extr_v[1]
inPos = extr_v[2]
outPos = extr_v[3]
edgeList = extr_v[4]
vertexList = extr_v[5]
# Detect the side L or R from the x value
if inPos.getPosition(space='world')[0] < 0.0:
side = "R"
inPos = extr_v[3]
outPos = extr_v[2]
normalPos = outPos
npw = normalPos.getPosition(space='world')
normalVec = npw - bboxCenter
else:
side = "L"
normalPos = outPos
npw = normalPos.getPosition(space='world')
normalVec = bboxCenter - npw
# Manual Vertex corners
if customCorner:
if intCorner:
try:
if side == "R":
inPos = pm.PyNode(extCorner)
else:
inPos = pm.PyNode(intCorner)
except pm.MayaNodeError:
pm.displayWarning("%s can not be found" % intCorner)
return
else:
pm.displayWarning("Please set the internal eyelid corner")
return
if extCorner:
try:
normalPos = pm.PyNode(extCorner)
npw = normalPos.getPosition(space='world')
if side == "R":
outPos = pm.PyNode(intCorner)
normalVec = npw - bboxCenter
else:
outPos = pm.PyNode(extCorner)
normalVec = bboxCenter - npw
except pm.MayaNodeError:
pm.displayWarning("%s can not be found" % extCorner)
return
else:
pm.displayWarning("Please set the external eyelid corner")
return
# Check if we have prefix:
if namePrefix:
namePrefix = string.removeInvalidCharacter(namePrefix)
else:
pm.displayWarning("Prefix is needed")
return
def setName(name, ind=None):
namesList = [namePrefix, side, name]
if ind is not None:
namesList[1] = side + str(ind)
name = "_".join(namesList)
return name
if pm.ls(setName("root")):
pm.displayWarning("The object %s already exist in the scene. Please "
"choose another name prefix" % setName("root"))
return
# Eye root
eye_root = primitive.addTransform(None, setName("root"))
eyeCrv_root = primitive.addTransform(eye_root, setName("crvs"))
# Eyelid Main crvs
try:
upEyelid = meshNavigation.edgeRangeInLoopFromMid(
edgeList, upPos, inPos, outPos)
upCrv = curve.createCurveFromOrderedEdges(
upEyelid, inPos, setName("upperEyelid"), parent=eyeCrv_root)
upCrv_ctl = curve.createCurveFromOrderedEdges(
upEyelid, inPos, setName("upCtl_crv"), parent=eyeCrv_root)
pm.rebuildCurve(upCrv_ctl, s=2, rt=0, rpo=True, ch=False)
lowEyelid = meshNavigation.edgeRangeInLoopFromMid(
edgeList, lowPos, inPos, outPos)
lowCrv = curve.createCurveFromOrderedEdges(
lowEyelid, inPos, setName("lowerEyelid"), parent=eyeCrv_root)
lowCrv_ctl = curve.createCurveFromOrderedEdges(
lowEyelid,
inPos,
setName("lowCtl_crv"),
parent=eyeCrv_root)
pm.rebuildCurve(lowCrv_ctl, s=2, rt=0, rpo=True, ch=False)
except UnboundLocalError:
if customCorner:
pm.displayWarning("This error is maybe caused because the custom "
"Corner vertex is not part of the edge loop")
pm.displayError(traceback.format_exc())
return
upBlink = curve.createCurveFromCurve(
upCrv, setName("upblink_crv"), nbPoints=30, parent=eyeCrv_root)
lowBlink = curve.createCurveFromCurve(
lowCrv, setName("lowBlink_crv"), nbPoints=30, parent=eyeCrv_root)
upTarget = curve.createCurveFromCurve(
upCrv, setName("upblink_target"), nbPoints=30, parent=eyeCrv_root)
lowTarget = curve.createCurveFromCurve(
lowCrv, setName("lowBlink_target"), nbPoints=30, parent=eyeCrv_root)
midTarget = curve.createCurveFromCurve(
lowCrv, setName("midBlink_target"), nbPoints=30, parent=eyeCrv_root)
rigCrvs = [upCrv,
lowCrv,
upCrv_ctl,
lowCrv_ctl,
upBlink,
lowBlink,
upTarget,
lowTarget,
midTarget]
for crv in rigCrvs:
crv.attr("visibility").set(False)
# localBBOX
localBBox = eyeMesh.getBoundingBox(invisible=True, space='world')
wRadius = abs((localBBox[0][0] - localBBox[1][0]))
dRadius = abs((localBBox[0][1] - localBBox[1][1]) / 1.7)
# Groups
if not ctlGrp:
ctlGrp = "rig_controllers_grp"
try:
ctlSet = pm.PyNode(ctlGrp)
except pm.MayaNodeError:
pm.sets(n=ctlGrp, em=True)
ctlSet = pm.PyNode(ctlGrp)
if not defGrp:
defGrp = "rig_deformers_grp"
try:
defset = pm.PyNode(defGrp)
except pm.MayaNodeError:
pm.sets(n=defGrp, em=True)
defset = pm.PyNode(defGrp)
# Calculate center looking at
averagePosition = ((upPos.getPosition(space='world')
+ lowPos.getPosition(space='world')
+ inPos.getPosition(space='world')
+ outPos.getPosition(space='world'))
/ 4)
if side == "R":
negate = False
offset = offset
over_offset = dRadius
else:
negate = False
over_offset = dRadius
if side == "R" and sideRange or side == "R" and customCorner:
axis = "z-x"
# axis = "zx"
else:
axis = "z-x"
t = transform.getTransformLookingAt(
bboxCenter,
averagePosition,
normalVec,
axis=axis,
negate=negate)
over_npo = primitive.addTransform(
eye_root, setName("center_lookatRoot"), t)
over_ctl = icon.create(over_npo,
setName("over_%s" % ctlName),
t,
icon="square",
w=wRadius,
d=dRadius,
ro=datatypes.Vector(1.57079633, 0, 0),
po=datatypes.Vector(0, 0, over_offset),
color=4)
node.add_controller_tag(over_ctl)
attribute.add_mirror_config_channels(over_ctl)
attribute.setKeyableAttributes(
over_ctl,
params=["tx", "ty", "tz", "ro", "rx", "ry", "rz", "sx", "sy", "sz"])
if side == "R":
over_npo.attr("rx").set(over_npo.attr("rx").get() * -1)
over_npo.attr("ry").set(over_npo.attr("ry").get() + 180)
over_npo.attr("sz").set(-1)
if len(ctlName.split("_")) == 2 and ctlName.split("_")[-1] == "ghost":
pass
else:
pm.sets(ctlSet, add=over_ctl)
center_lookat = primitive.addTransform(
over_ctl, setName("center_lookat"), t)
# Tracking
# Eye aim control
t_arrow = transform.getTransformLookingAt(bboxCenter,
averagePosition,
upPos.getPosition(space='world'),
axis="zy", negate=False)
radius = abs((localBBox[0][0] - localBBox[1][0]) / 1.7)
arrow_npo = primitive.addTransform(eye_root, setName("aim_npo"), t_arrow)
arrow_ctl = icon.create(arrow_npo,
setName("aim_%s" % ctlName),
t_arrow,
icon="arrow",
w=1,
po=datatypes.Vector(0, 0, radius),
color=4)
if len(ctlName.split("_")) == 2 and ctlName.split("_")[-1] == "ghost":
pass
else:
pm.sets(ctlSet, add=arrow_ctl)
attribute.setKeyableAttributes(arrow_ctl, params=["rx", "ry", "rz"])
# tracking custom trigger
if side == "R":
tt = t_arrow
else:
tt = t
aimTrigger_root = primitive.addTransform(
center_lookat, setName("aimTrigger_root"), tt)
aimTrigger_lvl = primitive.addTransform(
aimTrigger_root, setName("aimTrigger_lvl"), tt)
aimTrigger_lvl.attr("tz").set(1.0)
aimTrigger_ref = primitive.addTransform(
aimTrigger_lvl, setName("aimTrigger_ref"), tt)
aimTrigger_ref.attr("tz").set(0.0)
# connect trigger with arrow_ctl
pm.parentConstraint(arrow_ctl, aimTrigger_ref, mo=True)
# Controls lists
upControls = []
trackLvl = []
# upper eyelid controls
upperCtlNames = ["inCorner", "upInMid", "upMid", "upOutMid", "outCorner"]
cvs = upCrv_ctl.getCVs(space="world")
if side == "R" and not sideRange:
# if side == "R":
cvs = [cv for cv in reversed(cvs)]
for i, cv in enumerate(cvs):
if utils.is_odd(i):
color = 14
wd = .5
icon_shape = "circle"
params = ["tx", "ty", "tz"]
else:
color = 4
wd = .7
icon_shape = "square"
params = ["tx",
"ty",
"tz",
"ro",
"rx",
"ry",
"rz",
"sx",
"sy",
"sz"]
t = transform.setMatrixPosition(t, cvs[i])
npo = primitive.addTransform(center_lookat,
setName("%s_npo" % upperCtlNames[i]),
t)
npoBase = npo
if i == 2:
# we add an extra level to input the tracking ofset values
npo = primitive.addTransform(npo,
setName("%s_trk" % upperCtlNames[i]),
t)
trackLvl.append(npo)
ctl = icon.create(npo,
setName("%s_%s" % (upperCtlNames[i], ctlName)),
t,
icon=icon_shape,
w=wd,
d=wd,
ro=datatypes.Vector(1.57079633, 0, 0),
po=datatypes.Vector(0, 0, offset),
color=color)
attribute.add_mirror_config_channels(ctl)
node.add_controller_tag(ctl, over_ctl)
upControls.append(ctl)
if len(ctlName.split("_")) == 2 and ctlName.split("_")[-1] == "ghost":
pass
else:
pm.sets(ctlSet, add=ctl)
attribute.setKeyableAttributes(ctl, params)
if side == "R":
npoBase.attr("ry").set(180)
npoBase.attr("sz").set(-1)
# adding parent average contrains to odd controls
for i, ctl in enumerate(upControls):
if utils.is_odd(i):
pm.parentConstraint(upControls[i - 1],
upControls[i + 1],
ctl.getParent(),
mo=True)
# lower eyelid controls
lowControls = [upControls[0]]
lowerCtlNames = ["inCorner",
"lowInMid",
"lowMid",
"lowOutMid",
"outCorner"]
cvs = lowCrv_ctl.getCVs(space="world")
if side == "R" and not sideRange:
cvs = [cv for cv in reversed(cvs)]
for i, cv in enumerate(cvs):
# we skip the first and last point since is already in the uper eyelid
if i in [0, 4]:
continue
if utils.is_odd(i):
color = 14
wd = .5
icon_shape = "circle"
params = ["tx", "ty", "tz"]
else:
color = 4
wd = .7
icon_shape = "square"
params = ["tx",
"ty",
"tz",
"ro",
"rx",
"ry",
"rz",
"sx",
"sy",
"sz"]
t = transform.setMatrixPosition(t, cvs[i])
npo = primitive.addTransform(center_lookat,
setName("%s_npo" % lowerCtlNames[i]),
t)
npoBase = npo
if i == 2:
# we add an extra level to input the tracking ofset values
npo = primitive.addTransform(npo,
setName("%s_trk" % lowerCtlNames[i]),
t)
trackLvl.append(npo)
ctl = icon.create(npo,
setName("%s_%s" % (lowerCtlNames[i], ctlName)),
t,
icon=icon_shape,
w=wd,
d=wd,
ro=datatypes.Vector(1.57079633, 0, 0),
po=datatypes.Vector(0, 0, offset),
color=color)
attribute.add_mirror_config_channels(ctl)
lowControls.append(ctl)
if len(ctlName.split("_")) == 2 and ctlName.split("_")[-1] == "ghost":
pass
else:
pm.sets(ctlSet, add=ctl)
attribute.setKeyableAttributes(ctl, params)
# mirror behaviout on R side controls
if side == "R":
npoBase.attr("ry").set(180)
npoBase.attr("sz").set(-1)
for lctl in reversed(lowControls[1:]):
node.add_controller_tag(lctl, over_ctl)
lowControls.append(upControls[-1])
# adding parent average contrains to odd controls
for i, ctl in enumerate(lowControls):
if utils.is_odd(i):
pm.parentConstraint(lowControls[i - 1],
lowControls[i + 1],
ctl.getParent(),
mo=True)
# Connecting control crvs with controls
applyop.gear_curvecns_op(upCrv_ctl, upControls)
applyop.gear_curvecns_op(lowCrv_ctl, lowControls)
# adding wires
w1 = pm.wire(upCrv, w=upBlink)[0]
w2 = pm.wire(lowCrv, w=lowBlink)[0]
w3 = pm.wire(upTarget, w=upCrv_ctl)[0]
w4 = pm.wire(lowTarget, w=lowCrv_ctl)[0]
# adding blendshapes
bs_upBlink = pm.blendShape(upTarget,
midTarget,
upBlink,
n="blendShapeUpBlink")
bs_lowBlink = pm.blendShape(lowTarget,
midTarget,
lowBlink,
n="blendShapeLowBlink")
bs_mid = pm.blendShape(lowTarget,
upTarget,
midTarget,
n="blendShapeLowBlink")
# setting blendshape reverse connections
rev_node = pm.createNode("reverse")
pm.connectAttr(bs_upBlink[0].attr(midTarget.name()), rev_node + ".inputX")
pm.connectAttr(rev_node + ".outputX", bs_upBlink[0].attr(upTarget.name()))
rev_node = pm.createNode("reverse")
pm.connectAttr(bs_lowBlink[0].attr(midTarget.name()), rev_node + ".inputX")
pm.connectAttr(rev_node + ".outputX",
bs_lowBlink[0].attr(lowTarget.name()))
rev_node = pm.createNode("reverse")
pm.connectAttr(bs_mid[0].attr(upTarget.name()), rev_node + ".inputX")
pm.connectAttr(rev_node + ".outputX", bs_mid[0].attr(lowTarget.name()))
# setting default values
bs_mid[0].attr(upTarget.name()).set(blinkH)
# joints root
jnt_root = primitive.addTransformFromPos(
eye_root, setName("joints"), pos=bboxCenter)
# head joint
if headJnt:
try:
headJnt = pm.PyNode(headJnt)
jnt_base = headJnt
except pm.MayaNodeError:
pm.displayWarning(
"Aborted can not find %s " % headJnt)
return
else:
# Eye root
jnt_base = jnt_root
eyeTargets_root = primitive.addTransform(eye_root,
setName("targets"))
eyeCenter_jnt = rigbits.addJnt(arrow_ctl,
jnt_base,
grp=defset,
jntName=setName("center_jnt"))
# Upper Eyelid joints ##################################################
cvs = upCrv.getCVs(space="world")
upCrv_info = node.createCurveInfoNode(upCrv)
# aim constrain targets and joints
upperEyelid_aimTargets = []
upperEyelid_jnt = []
upperEyelid_jntRoot = []
for i, cv in enumerate(cvs):
# aim targets
trn = primitive.addTransformFromPos(eyeTargets_root,
setName("upEyelid_aimTarget", i),
pos=cv)
upperEyelid_aimTargets.append(trn)
# connecting positions with crv
pm.connectAttr(upCrv_info + ".controlPoints[%s]" % str(i),
trn.attr("translate"))
# joints
jntRoot = primitive.addJointFromPos(jnt_root,
setName("upEyelid_jnt_base", i),
pos=bboxCenter)
jntRoot.attr("radius").set(.08)
jntRoot.attr("visibility").set(False)
upperEyelid_jntRoot.append(jntRoot)
applyop.aimCns(jntRoot, trn, axis="zy", wupObject=jnt_root)
jnt_ref = primitive.addJointFromPos(jntRoot,
setName("upEyelid_jnt_ref", i),
pos=cv)
jnt_ref.attr("radius").set(.08)
jnt_ref.attr("visibility").set(False)
jnt = rigbits.addJnt(jnt_ref,
jnt_base,
grp=defset,
jntName=setName("upEyelid_jnt", i))
upperEyelid_jnt.append(jnt)
# Lower Eyelid joints ##################################################
cvs = lowCrv.getCVs(space="world")
lowCrv_info = node.createCurveInfoNode(lowCrv)
# aim constrain targets and joints
lowerEyelid_aimTargets = []
lowerEyelid_jnt = []
lowerEyelid_jntRoot = []
for i, cv in enumerate(cvs):
if i in [0, len(cvs) - 1]:
continue
# aim targets
trn = primitive.addTransformFromPos(eyeTargets_root,
setName("lowEyelid_aimTarget", i),
pos=cv)
lowerEyelid_aimTargets.append(trn)
# connecting positions with crv
pm.connectAttr(lowCrv_info + ".controlPoints[%s]" % str(i),
trn.attr("translate"))
# joints
jntRoot = primitive.addJointFromPos(jnt_root,
setName("lowEyelid_base", i),
pos=bboxCenter)
jntRoot.attr("radius").set(.08)
jntRoot.attr("visibility").set(False)
lowerEyelid_jntRoot.append(jntRoot)
applyop.aimCns(jntRoot, trn, axis="zy", wupObject=jnt_root)
jnt_ref = primitive.addJointFromPos(jntRoot,
setName("lowEyelid_jnt_ref", i),
pos=cv)
jnt_ref.attr("radius").set(.08)
jnt_ref.attr("visibility").set(False)
jnt = rigbits.addJnt(jnt_ref,
jnt_base,
grp=defset,
jntName=setName("lowEyelid_jnt", i))
lowerEyelid_jnt.append(jnt)
# Channels
# Adding and connecting attributes for the blink
up_ctl = upControls[2]
blink_att = attribute.addAttribute(
over_ctl, "blink", "float", 0, minValue=0, maxValue=1)
blinkMult_att = attribute.addAttribute(
over_ctl, "blinkMult", "float", 1, minValue=1, maxValue=2)
midBlinkH_att = attribute.addAttribute(
over_ctl, "blinkHeight", "float", blinkH, minValue=0, maxValue=1)
mult_node = node.createMulNode(blink_att, blinkMult_att)
pm.connectAttr(mult_node + ".outputX",
bs_upBlink[0].attr(midTarget.name()))
pm.connectAttr(mult_node + ".outputX",
bs_lowBlink[0].attr(midTarget.name()))
pm.connectAttr(midBlinkH_att, bs_mid[0].attr(upTarget.name()))
low_ctl = lowControls[2]
# Adding channels for eye tracking
upVTracking_att = attribute.addAttribute(up_ctl,
"vTracking",
"float",
.02,
minValue=0,
maxValue=1,
keyable=False,
channelBox=True)
upHTracking_att = attribute.addAttribute(up_ctl,
"hTracking",
"float",
.01,
minValue=0,
maxValue=1,
keyable=False,
channelBox=True)
lowVTracking_att = attribute.addAttribute(low_ctl,
"vTracking",
"float",
.01,
minValue=0,
maxValue=1,
keyable=False,
channelBox=True)
lowHTracking_att = attribute.addAttribute(low_ctl,
"hTracking",
"float",
.01,
minValue=0,
maxValue=1,
keyable=False,
channelBox=True)
mult_node = node.createMulNode(upVTracking_att, aimTrigger_ref.attr("ty"))
pm.connectAttr(mult_node + ".outputX", trackLvl[0].attr("ty"))
mult_node = node.createMulNode(upHTracking_att, aimTrigger_ref.attr("tx"))
pm.connectAttr(mult_node + ".outputX", trackLvl[0].attr("tx"))
mult_node = node.createMulNode(lowVTracking_att, aimTrigger_ref.attr("ty"))
pm.connectAttr(mult_node + ".outputX", trackLvl[1].attr("ty"))
mult_node = node.createMulNode(lowHTracking_att, aimTrigger_ref.attr("tx"))
pm.connectAttr(mult_node + ".outputX", trackLvl[1].attr("tx"))
# Tension on blink
node.createReverseNode(blink_att, w1.scale[0])
node.createReverseNode(blink_att, w3.scale[0])
node.createReverseNode(blink_att, w2.scale[0])
node.createReverseNode(blink_att, w4.scale[0])
###########################################
# Reparenting
###########################################
if parent:
try:
if isinstance(parent, basestring):
parent = pm.PyNode(parent)
parent.addChild(eye_root)
except pm.MayaNodeError:
pm.displayWarning("The eye rig can not be parent to: %s. Maybe "
"this object doesn't exist." % parent)
###########################################
# Auto Skinning
###########################################
if doSkin:
# eyelid vertex rows
totalLoops = rigidLoops + falloffLoops
vertexLoopList = meshNavigation.getConcentricVertexLoop(vertexList,
totalLoops)
vertexRowList = meshNavigation.getVertexRowsFromLoops(vertexLoopList)
# we set the first value 100% for the first initial loop
skinPercList = [1.0]
# we expect to have a regular grid topology
for r in range(rigidLoops):
for rr in range(2):
skinPercList.append(1.0)
increment = 1.0 / float(falloffLoops)
# we invert to smooth out from 100 to 0
inv = 1.0 - increment
for r in range(falloffLoops):
for rr in range(2):
if inv < 0.0:
inv = 0.0
skinPercList.append(inv)
inv -= increment
# this loop add an extra 0.0 indices to avoid errors
for r in range(10):
for rr in range(2):
skinPercList.append(0.0)
# base skin
geo = pm.listRelatives(edgeLoopList[0], parent=True)[0]
# Check if the object has a skinCluster
objName = pm.listRelatives(geo, parent=True)[0]
skinCluster = skin.getSkinCluster(objName)
if not skinCluster:
skinCluster = pm.skinCluster(headJnt,
geo,
tsb=True,
nw=2,
n='skinClsEyelid')
eyelidJoints = upperEyelid_jnt + lowerEyelid_jnt
pm.progressWindow(title='Auto skinning process',
progress=0,
max=len(eyelidJoints))
firstBoundary = False
for jnt in eyelidJoints:
pm.progressWindow(e=True, step=1, status='\nSkinning %s' % jnt)
skinCluster.addInfluence(jnt, weight=0)
v = meshNavigation.getClosestVertexFromTransform(geo, jnt)
for row in vertexRowList:
if v in row:
it = 0 # iterator
inc = 1 # increment
for i, rv in enumerate(row):
try:
perc = skinPercList[it]
t_val = [(jnt, perc), (headJnt, 1.0 - perc)]
pm.skinPercent(skinCluster,
rv,
transformValue=t_val)
if rv.isOnBoundary():
# we need to compare with the first boundary
# to check if the row have inverted direction
# and offset the value
if not firstBoundary:
firstBoundary = True
firstBoundaryValue = it
else:
if it < firstBoundaryValue:
it -= 1
elif it > firstBoundaryValue:
it += 1
inc = 2
except IndexError:
continue
it = it + inc
pm.progressWindow(e=True, endProgress=True)
# Eye Mesh skinning
skinCluster = skin.getSkinCluster(eyeMesh)
if not skinCluster:
skinCluster = pm.skinCluster(eyeCenter_jnt,
eyeMesh,
tsb=True,
nw=1,
n='skinClsEye')
##########################################################
# Eye Rig UI
##########################################################
class eyeRigUI(MayaQWidgetDockableMixin, QtWidgets.QDialog):
valueChanged = QtCore.Signal(int)
def __init__(self, parent=None):
super(eyeRigUI, self).__init__(parent)
self.create()
def create(self):
self.setWindowTitle("Rigbits: Eye Rigger")
self.setWindowFlags(QtCore.Qt.Window)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose, 1)
self.create_controls()
self.create_layout()
self.create_connections()
def create_controls(self):
# Geometry input controls
self.geometryInput_group = QtWidgets.QGroupBox("Geometry Input")
self.eyeball_label = QtWidgets.QLabel("Eyeball:")
self.eyeball_lineEdit = QtWidgets.QLineEdit()
self.eyeball_button = QtWidgets.QPushButton("<<")
self.edgeloop_label = QtWidgets.QLabel("Edge Loop:")
self.edgeloop_lineEdit = QtWidgets.QLineEdit()
self.edgeloop_button = QtWidgets.QPushButton("<<")
# Manual corners
self.manualCorners_group = QtWidgets.QGroupBox("Custom Eye Corners")
self.manualCorners_check = QtWidgets.QCheckBox(
"Set Manual Vertex Corners")
self.manualCorners_check.setChecked(False)
self.intCorner_label = QtWidgets.QLabel("Internal Corner")
self.intCorner_lineEdit = QtWidgets.QLineEdit()
self.intCorner_button = QtWidgets.QPushButton("<<")
self.extCorner_label = QtWidgets.QLabel("External Corner")
self.extCorner_lineEdit = QtWidgets.QLineEdit()
self.extCorner_button = QtWidgets.QPushButton("<<")
# Blink heigh slider
self.blinkHeigh_group = QtWidgets.QGroupBox("Blink High")
self.blinkHeight_value = QtWidgets.QSpinBox()
self.blinkHeight_value.setRange(0, 100)
self.blinkHeight_value.setSingleStep(10)
self.blinkHeight_value.setValue(20)
self.blinkHeight_slider = QtWidgets.QSlider(QtCore.Qt.Horizontal)
self.blinkHeight_slider.setRange(0, 100)
self.blinkHeight_slider.setSingleStep(
self.blinkHeight_slider.maximum() / 10.0)
self.blinkHeight_slider.setValue(20)
# Name prefix
self.prefix_group = QtWidgets.QGroupBox("Name Prefix")
self.prefix_lineEdit = QtWidgets.QLineEdit()
self.prefix_lineEdit.setText("eye")
self.control_group = QtWidgets.QGroupBox("Control Name Extension")
self.control_lineEdit = QtWidgets.QLineEdit()
self.control_lineEdit.setText("ctl")
# joints
self.joints_group = QtWidgets.QGroupBox("Joints")
self.headJnt_label = QtWidgets.QLabel("Head or Eye area Joint:")
self.headJnt_lineEdit = QtWidgets.QLineEdit()
self.headJnt_button = QtWidgets.QPushButton("<<")
# Topological Autoskin
self.topoSkin_group = QtWidgets.QGroupBox("Skin")
self.rigidLoops_label = QtWidgets.QLabel("Rigid Loops:")
self.rigidLoops_value = QtWidgets.QSpinBox()
self.rigidLoops_value.setRange(0, 30)
self.rigidLoops_value.setSingleStep(1)
self.rigidLoops_value.setValue(2)
self.falloffLoops_label = QtWidgets.QLabel("Falloff Loops:")
self.falloffLoops_value = QtWidgets.QSpinBox()
self.falloffLoops_value.setRange(0, 30)
self.falloffLoops_value.setSingleStep(1)
self.falloffLoops_value.setValue(4)
self.topSkin_check = QtWidgets.QCheckBox(
'Compute Topological Autoskin')
self.topSkin_check.setChecked(True)
# Options
self.options_group = QtWidgets.QGroupBox("Options")
self.parent_label = QtWidgets.QLabel("Rig Parent:")
self.parent_lineEdit = QtWidgets.QLineEdit()
self.parent_button = QtWidgets.QPushButton("<<")
self.ctlShapeOffset_label = QtWidgets.QLabel("Controls Offset:")
self.ctlShapeOffset_value = QtWidgets.QDoubleSpinBox()
self.ctlShapeOffset_value.setRange(0, 10)
self.ctlShapeOffset_value.setSingleStep(.05)
self.ctlShapeOffset_value.setValue(.05)
self.sideRange_check = QtWidgets.QCheckBox(
"Use Z axis for wide calculation (i.e: Horse and fish side eyes)")
self.sideRange_check.setChecked(False)
self.ctlGrp_label = QtWidgets.QLabel("Controls Group:")
self.ctlGrp_lineEdit = QtWidgets.QLineEdit()
self.ctlGrp_button = QtWidgets.QPushButton("<<")
self.deformersGrp_label = QtWidgets.QLabel("Deformers Group:")
self.deformersGrp_lineEdit = QtWidgets.QLineEdit()
self.deformersGrp_button = QtWidgets.QPushButton("<<")
# Build button
self.build_button = QtWidgets.QPushButton("Build Eye Rig")
self.export_button = QtWidgets.QPushButton("Export Config to json")
def create_layout(self):
# Eyeball Layout
eyeball_layout = QtWidgets.QHBoxLayout()
eyeball_layout.setContentsMargins(1, 1, 1, 1)
eyeball_layout.addWidget(self.eyeball_label)
eyeball_layout.addWidget(self.eyeball_lineEdit)
eyeball_layout.addWidget(self.eyeball_button)
# Edge Loop Layout
edgeloop_layout = QtWidgets.QHBoxLayout()
edgeloop_layout.setContentsMargins(1, 1, 1, 1)
edgeloop_layout.addWidget(self.edgeloop_label)
edgeloop_layout.addWidget(self.edgeloop_lineEdit)
edgeloop_layout.addWidget(self.edgeloop_button)
# Geometry Input Layout
geometryInput_layout = QtWidgets.QVBoxLayout()
geometryInput_layout.setContentsMargins(6, 1, 6, 2)
geometryInput_layout.addLayout(eyeball_layout)
geometryInput_layout.addLayout(edgeloop_layout)
self.geometryInput_group.setLayout(geometryInput_layout)
# Blink High Layout
blinkHeight_layout = QtWidgets.QHBoxLayout()
blinkHeight_layout.setContentsMargins(1, 1, 1, 1)
blinkHeight_layout.addWidget(self.blinkHeight_value)
blinkHeight_layout.addWidget(self.blinkHeight_slider)
self.blinkHeigh_group.setLayout(blinkHeight_layout)
# joints Layout
headJnt_layout = QtWidgets.QHBoxLayout()
headJnt_layout.addWidget(self.headJnt_label)
headJnt_layout.addWidget(self.headJnt_lineEdit)
headJnt_layout.addWidget(self.headJnt_button)
joints_layout = QtWidgets.QVBoxLayout()
joints_layout.setContentsMargins(6, 4, 6, 4)
joints_layout.addLayout(headJnt_layout)
self.joints_group.setLayout(joints_layout)
# topological autoskin Layout
skinLoops_layout = QtWidgets.QGridLayout()
skinLoops_layout.addWidget(self.rigidLoops_label, 0, 0)
skinLoops_layout.addWidget(self.falloffLoops_label, 0, 1)
skinLoops_layout.addWidget(self.rigidLoops_value, 1, 0)
skinLoops_layout.addWidget(self.falloffLoops_value, 1, 1)
topoSkin_layout = QtWidgets.QVBoxLayout()
topoSkin_layout.setContentsMargins(6, 4, 6, 4)
topoSkin_layout.addWidget(self.topSkin_check,
alignment=QtCore.Qt.Alignment())
topoSkin_layout.addLayout(skinLoops_layout)
self.topoSkin_group.setLayout(topoSkin_layout)
# Manual Corners Layout
intCorner_layout = QtWidgets.QHBoxLayout()
intCorner_layout.addWidget(self.intCorner_label)
intCorner_layout.addWidget(self.intCorner_lineEdit)
intCorner_layout.addWidget(self.intCorner_button)
extCorner_layout = QtWidgets.QHBoxLayout()
extCorner_layout.addWidget(self.extCorner_label)
extCorner_layout.addWidget(self.extCorner_lineEdit)
extCorner_layout.addWidget(self.extCorner_button)
manualCorners_layout = QtWidgets.QVBoxLayout()
manualCorners_layout.setContentsMargins(6, 4, 6, 4)
manualCorners_layout.addWidget(self.manualCorners_check,
alignment=QtCore.Qt.Alignment())
manualCorners_layout.addLayout(intCorner_layout)
manualCorners_layout.addLayout(extCorner_layout)
self.manualCorners_group.setLayout(manualCorners_layout)
# Options Layout
parent_layout = QtWidgets.QHBoxLayout()
parent_layout.addWidget(self.parent_label)
parent_layout.addWidget(self.parent_lineEdit)
parent_layout.addWidget(self.parent_button)
offset_layout = QtWidgets.QHBoxLayout()
offset_layout.addWidget(self.ctlShapeOffset_label)
offset_layout.addWidget(self.ctlShapeOffset_value)
ctlGrp_layout = QtWidgets.QHBoxLayout()
ctlGrp_layout.addWidget(self.ctlGrp_label)
ctlGrp_layout.addWidget(self.ctlGrp_lineEdit)
ctlGrp_layout.addWidget(self.ctlGrp_button)
deformersGrp_layout = QtWidgets.QHBoxLayout()
deformersGrp_layout.addWidget(self.deformersGrp_label)
deformersGrp_layout.addWidget(self.deformersGrp_lineEdit)
deformersGrp_layout.addWidget(self.deformersGrp_button)
options_layout = QtWidgets.QVBoxLayout()
options_layout.setContentsMargins(6, 1, 6, 2)
options_layout.addLayout(parent_layout)
options_layout.addLayout(offset_layout)
options_layout.addWidget(self.blinkHeigh_group)
options_layout.addWidget(self.sideRange_check)
options_layout.addLayout(ctlGrp_layout)
options_layout.addLayout(deformersGrp_layout)
self.options_group.setLayout(options_layout)
# Name prefix
namePrefix_layout = QtWidgets.QVBoxLayout()
namePrefix_layout.setContentsMargins(1, 1, 1, 1)
namePrefix_layout.addWidget(self.prefix_lineEdit)
self.prefix_group.setLayout(namePrefix_layout)
# Name prefix
controlExtension_layout = QtWidgets.QVBoxLayout()
controlExtension_layout.setContentsMargins(1, 1, 1, 1)
controlExtension_layout.addWidget(self.control_lineEdit)
self.control_group.setLayout(controlExtension_layout)
# Main Layout
main_layout = QtWidgets.QVBoxLayout()
main_layout.setContentsMargins(6, 6, 6, 6)
main_layout.addWidget(self.prefix_group)
main_layout.addWidget(self.control_group)
main_layout.addWidget(self.geometryInput_group)
main_layout.addWidget(self.manualCorners_group)
main_layout.addWidget(self.options_group)
main_layout.addWidget(self.joints_group)
main_layout.addWidget(self.topoSkin_group)
main_layout.addWidget(self.build_button)
main_layout.addWidget(self.export_button)
self.setLayout(main_layout)
def create_connections(self):
self.blinkHeight_value.valueChanged[int].connect(
self.blinkHeight_slider.setValue)
self.blinkHeight_slider.valueChanged[int].connect(
self.blinkHeight_value.setValue)
self.eyeball_button.clicked.connect(partial(self.populate_object,
self.eyeball_lineEdit))
self.parent_button.clicked.connect(partial(self.populate_object,
self.parent_lineEdit))
self.headJnt_button.clicked.connect(partial(self.populate_object,
self.headJnt_lineEdit,
1))
self.edgeloop_button.clicked.connect(self.populate_edgeloop)
self.build_button.clicked.connect(self.buildRig)
self.export_button.clicked.connect(self.exportDict)
self.intCorner_button.clicked.connect(partial(self.populate_element,
self.intCorner_lineEdit,
"vertex"))
self.extCorner_button.clicked.connect(partial(self.populate_element,
self.extCorner_lineEdit,
"vertex"))
self.ctlGrp_button.clicked.connect(partial(self.populate_element,
self.ctlGrp_lineEdit,
"objectSet"))
self.deformersGrp_button.clicked.connect(partial(
self.populate_element, self.deformersGrp_lineEdit, "objectSet"))
# SLOTS ##########################################################
def populate_element(self, lEdit, oType="transform"):
if oType == "joint":
oTypeInst = pm.nodetypes.Joint
elif oType == "vertex":
oTypeInst = pm.MeshVertex
elif oType == "objectSet":
oTypeInst = pm.nodetypes.ObjectSet
else:
oTypeInst = pm.nodetypes.Transform
oSel = pm.selected()
if oSel:
if isinstance(oSel[0], oTypeInst):
lEdit.setText(oSel[0].name())
else:
pm.displayWarning(
"The selected element is not a valid %s" % oType)
else:
pm.displayWarning("Please select first one %s." % oType)
def populate_object(self, lEdit, oType=None):
if oType == 1:
oType = pm.nodetypes.Joint
else:
oType = pm.nodetypes.Transform
oSel = pm.selected()
if oSel:
if isinstance(oSel[0], oType):
lEdit.setText(oSel[0].name())
else:
pm.displayWarning("The selected element is not a valid object")
else:
pm.displayWarning("Please select first the object.")
def populate_edgeloop(self):
oSel = pm.selected(fl=1)
if oSel:
edgeList = ""
separator = ""
for e in oSel:
if isinstance(e, pm.MeshEdge):
if edgeList:
separator = ","
edgeList = edgeList + separator + str(e)
if not edgeList:
pm.displayWarning("Please select first the eyelid edge loop.")
elif len(edgeList.split(",")) < 4:
pm.displayWarning("The minimun edge count is 4")
else:
self.edgeloop_lineEdit.setText(edgeList)
else:
pm.displayWarning("Please select first the eyelid edge loop.")
def populateDict(self):
self.buildDict = {}
blinkH = float(self.blinkHeight_value.value()) / 100.0
self.buildDict["eye"] = [self.eyeball_lineEdit.text(),
self.edgeloop_lineEdit.text(),
blinkH,
self.prefix_lineEdit.text(),
self.ctlShapeOffset_value.value(),
self.rigidLoops_value.value(),
self.falloffLoops_value.value(),
self.headJnt_lineEdit.text(),
self.topSkin_check.isChecked(),
self.parent_lineEdit.text(),
self.control_lineEdit.text(),
self.sideRange_check.isChecked(),
self.manualCorners_check.isChecked(),
self.intCorner_lineEdit.text(),
self.extCorner_lineEdit.text(),
self.ctlGrp_lineEdit.text(),
self.deformersGrp_lineEdit.text()]
def buildRig(self):
self.populateDict()
eyeRig(*self.buildDict["eye"])
def exportDict(self):
self.populateDict()
data_string = json.dumps(self.buildDict, indent=4, sort_keys=True)
filePath = pm.fileDialog2(
dialogStyle=2,
fileMode=0,
fileFilter='Eyes Rigger Configuration .eyes (*%s)' % ".eyes")
if not filePath:
return
if not isinstance(filePath, basestring):
filePath = filePath[0]
f = open(filePath, 'w')
f.write(data_string)
f.close()
# build lips from json file:
def eyesFromfile(path):
buildDict = json.load(open(path))
eyeRig(*buildDict["eye"])
def showEyeRigUI(*args):
gqt.showDialog(eyeRigUI)
if __name__ == "__main__":
showEyeRigUI()
# path = "C:\\Users\\miquel\\Desktop\\eye_L.eyes"
# eyesFromfile(path)
# path = "C:\\Users\\miquel\\Desktop\\eye_R.eyes"
# eyesFromfile(path)
| 1.875 | 2 |
Python/expert/interact_with_linux/solution.py | fpichl/ProgrammingTasks | 2 | 12912 | #!/usr/bin/env python3
import os
import shutil
import sys
import pathlib
import logging
# I will NEVER EVER use subproccess again
# At least not for something like Popen
try:
from sh import wget
except Exception:
print('[!] Just install sh right now!(pip install --user sh)')
sys.exit(0)
# Dumb Python2 support
if sys.version_info[0] == 2:
input = raw_input
# Path where this python script is located when it's run
curr_dir = pathlib.Path(os.path.dirname(os.path.abspath(__file__)))
# The URL
url = input('[$] Url(none for ema.perfact.de): ')
url = url if url else 'ema.perfact.de'
print('[*] Url: {}\n'.format(url))
# Get name of the directory where the whole page should be saved
dir_name = input('[$] Directory name for the page(none for "1337"): ')
dir_name = dir_name if dir_name else '1337'
page_dir = curr_dir / dir_name
if page_dir.is_dir():
print('[!] {} is already a directory and will be overwritten!'.format(page_dir))
choice = input('[!] Continue?(y/n):').lower()
if choice != 'y':
sys.exit(0)
print('[*] Directory to save the page: {}\n'.format(dir_name))
# Get name of directory where the files will be saved we actually want to save
save_name = input('[$] Directory name to save findings(none for "saved"): ')
save_name = save_name if save_name else 'saved'
save_dir = curr_dir / save_name
if save_dir.is_dir():
print('[!] {} is already a directory!'.format(save_dir))
choice = input('[!] Delete it?(y/n): '.format(save_dir)).lower()
if choice == 'y':
shutil.rmtree(save_dir.absolute().as_posix())
else:
sys.exit(0)
os.makedirs(save_dir.absolute().as_posix())
print('[*] Directory to save findings: {}\n'.format(save_name))
# The searchterm (which files we want to copy)
print('[*] Everything with the following substring will be copied')
search_term = input('[$] Files to copy to that directory(none for ".png"): ')
search_term = search_term if search_term else '.png'
print('[*] Searchterm: {}\n'.format(search_term))
input('\n[$] Press any key to continue...')
# We will give these exit_codes to the wget call later
# to disabled every exit/error message (will look horribly else)
exit_codes = (i for i in range(0, 9))
# Sets off the wget -m <url> -P <directory> commande
# It's written so weird, so we can see the output of the program
try:
for line in wget('-m', url, '-P', dir_name, _iter=True, _err_to_out=True,
_out_bufsize=1, _ok_code=exit_codes):
print(line)
except Exception:
pass
# Copying the files we want to save
try:
# Get every file with the correct searchterm from the folder where the webpage is saved
files = list(page_dir.glob("**/*{}".format(search_term)))
if not files:
print("[!] No matching files found")
else:
print("[*] Copying {} *{} files...".format(len(files), search_term))
for f in files:
shutil.copy(f.absolute().as_posix(), save_dir.absolute().as_posix())
except Exception as e:
print('[!] Something went wrong while copying data')
print(e)
# Deleting the saved webpage, cause we don't need it anymore
print('\n[*] Cleaning up...\n')
if page_dir.is_dir():
shutil.rmtree(page_dir.absolute().as_posix())
print('[*] All done!')
| 2.8125 | 3 |
Asap-3.8.4/Projects/NanoparticleMC/misc/viewatomsmc.py | auag92/n2dm | 1 | 12913 | import ase
from ase import Atoms
from ase.atom import Atom
import sys
from ase.visualize import view
import pickle
f = open(sys.argv[1],'r') #The .amc file
p = pickle.load(f)
positions = p['atomspositions']
atms = Atoms()
for p0 in positions:
a = Atom('Au',position=p0)
atms.append(a)
atms.center(vacuum=2)
view(atms)
| 2.8125 | 3 |
bot_settings_example.py | nikmedoed/BalanceBot | 0 | 12914 | # это dev среда
TELEGRAM_TOKEN = "..."
RELATIVE_CHAT_IDS = [ "...", '...']
TEXT = {
"bot_info": ('Привет, я бот, который отвечает за равномерное распределение участников по комнатам.\n\n'
'Нажми кнопку, если готов сменить комнату'),
"get_link": "Получить рекомендацию",
"new_room": "Ваша новая комната\n%s",
"nothing_to_change": "На данный момент ничего менять не требуется"
}
def logger(*message):
print(message) | 2.0625 | 2 |
test/xslt/borrowed/sm_20000304.py | zepheira/amara | 6 | 12915 | ########################################################################
# test/xslt/sm20000304.py
# Example from <NAME> <<EMAIL>>
# to <NAME> <<EMAIL>>
# on 4 March 2000
"""
From: "<NAME>" <<EMAIL>>
To: <<EMAIL>>
Subject: Re: SVG charts and graphs from XML input
Date: Sat, 4 Mar 2000 18:02:53 -0800 (19:02 MST)
This is by no means a bullet-proof, one-size-fits
all charting stylesheet, but it *was* my first foray
into SVG from XSLT.
Given XML results of an Oracle XSQL Page like:
<xsql:query xmlns:xsql="urn:oracle-xsql" connection="demo">
select ename, sal from dept
</xsql:query>
Which under the covers produces a dynamic XML doc like:
[SNIP source]
The following "salchart.xsl" XSLT stylesheet
renders a dynamic bar chart with "cool colors"
for the employees in the department.
You may have to modify the namespace of the
Java extension functions to get it to work in
XT or Saxon or other XSLT engines.
[SNIP stylesheet]
"""
import os
import cStringIO
import unittest
from amara.lib import treecompare
from amara.test import test_main
from amara.test.xslt import xslt_test, filesource, stringsource
### dalke - added to make the imports work
def NumberValue(x):
return x
#Extensions
ORACLE_JAVA_NS = 'http://www.oracle.com/XSL/Transform/java'
JAVA_COLOR_NS = ORACLE_JAVA_NS + '/java.awt.Color'
JAVA_INTEGER_NS = ORACLE_JAVA_NS + '/java.lang.Integer'
def Java_Color_GetHSBColor(context, hue, saturation, brightness):
hue = NumberValue(hue)
saturation = NumberValue(saturation)
brightness = NumberValue(brightness)
if saturation == 0:
r = g = b = int(brightness * 255)
else:
r = g = b = 0
h = (hue - int(hue)) * 6.0
f = h - int(h)
p = brightness * (1.0 - saturation)
q = brightness * (1.0 - saturation * f)
t = brightness * (1.0 - (saturation * (1.0 - f)))
h = int(h)
if h == 0:
r = int(brightness * 255)
g = int(t * 255)
b = int(p * 255)
elif h == 1:
r = int(q * 255)
g = int(brightness * 255)
b = int(p * 255)
elif h == 2:
r = int(p * 255)
g = int(brightness * 255)
b = int(t * 255)
elif h == 3:
r = int(p * 255)
g = int(q * 255)
b = int(brightness * 255)
elif h == 4:
r = int(t * 255)
g = int(p * 255)
b = int(brightness * 255)
elif h == 5:
r = int(brightness * 255)
g = int(p * 255)
b = int(q * 255)
return 0xff000000L | (r << 16) | (g << 8) | (b << 0)
def Java_Color_GetRed(context, color):
color = NumberValue(color)
return (long(color) >> 16) & 0xff
def Java_Color_GetGreen(context, color):
color = NumberValue(color)
return (long(color) >> 8) & 0xff
def Java_Color_GetBlue(context, color):
color = NumberValue(color)
return long(color) & 0xff
def Java_Integer_ToHexString(context, number):
return '%X' % NumberValue(number)
ExtFunctions = {
(JAVA_COLOR_NS, 'getHSBColor') : Java_Color_GetHSBColor,
(JAVA_COLOR_NS, 'getRed') : Java_Color_GetRed,
(JAVA_COLOR_NS, 'getGreen') : Java_Color_GetGreen,
(JAVA_COLOR_NS, 'getBlue') : Java_Color_GetBlue,
(JAVA_INTEGER_NS, 'toHexString') : Java_Integer_ToHexString,
}
class test_xslt_call_template_ed_20010101(xslt_test):
source = stringsource("""<?xml version = '1.0'?>
<ROWSET>
<ROW num="1">
<ENAME>CLARK</ENAME>
<SAL>2450</SAL>
</ROW>
<ROW num="2">
<ENAME>KING</ENAME>
<SAL>3900</SAL>
</ROW>
<ROW num="3">
<ENAME>MILLER</ENAME>
<SAL>1300</SAL>
</ROW>
</ROWSET>
""")
transform = stringsource('''<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:Color="http://www.oracle.com/XSL/Transform/java/java.awt.Color"
xmlns:Integer="http://www.oracle.com/XSL/Transform/java/java.lang.Integer"
exclude-result-prefixes="Color Integer">
<xsl:output media-type="image/svg"/>
<xsl:template match="/">
<svg xml:space="preserve" width="1000" height="1000">
<desc>Salary Chart</desc>
<g style="stroke:#000000;stroke-width:1;font-family:Arial;font-size:16">
<xsl:for-each select="ROWSET/ROW">
<xsl:call-template name="drawBar">
<xsl:with-param name="rowIndex" select="position()"/>
<xsl:with-param name="ename" select="ENAME"/>
<xsl:with-param name="sal" select="number(SAL)"/>
</xsl:call-template>
</xsl:for-each>
</g>
</svg>
</xsl:template>
<xsl:template name="drawBar">
<xsl:param name="rowIndex" select="number(0)"/>
<xsl:param name="ename"/>
<xsl:param name="sal" select="number(0)"/>
<xsl:variable name="xOffset" select="number(100)"/>
<xsl:variable name="yOffset" select="number(20)"/>
<xsl:variable name="barHeight" select="number(25)"/>
<xsl:variable name="gap" select="number(10)"/>
<xsl:variable name="x" select="$xOffset"/>
<xsl:variable name="y" select="$yOffset + $rowIndex * ($barHeight + $gap)"/>
<xsl:variable name="barWidth" select="$sal div number(10)"/>
<rect x="{$x}" y="{$y}" height="{$barHeight}" width="{$barWidth}">
<xsl:attribute name="style">
<xsl:text>fill:#</xsl:text>
<xsl:call-template name="getCoolColorStr" xml:space="default">
<xsl:with-param name="colorIndex" select="$rowIndex"/>
<xsl:with-param name="totalColors" select="number(14)"/>
</xsl:call-template>
<xsl:text> </xsl:text>
</xsl:attribute>
</rect>
<xsl:variable name="fontHeight" select="number(18)"/>
<text x="20" y="{$y + $fontHeight}">
<xsl:value-of select="$ename"/>
</text>
<xsl:variable name="x2" select="$xOffset + $barWidth + 10"/>
<text x="{$x2}" y="{$y + $fontHeight}">
<xsl:value-of select="$sal"/>
</text>
</xsl:template>
<xsl:template name="getCoolColorStr">
<xsl:param name="colorIndex"/>
<xsl:param name="totalColors"/>
<xsl:variable name="SATURATION" select="number(0.6)"/>
<xsl:variable name="BRIGHTNESS" select="number(0.9)"/>
<xsl:variable name="hue" select="$colorIndex div $totalColors"/>
<xsl:variable name="c" select="Color:getHSBColor($hue, $SATURATION, $BRIGHTNESS)"/>
<xsl:variable name="r" select="Color:getRed($c)"/>
<xsl:variable name="g" select="Color:getGreen($c)"/>
<xsl:variable name="b" select="Color:getBlue($c)"/>
<xsl:variable name="rs" select="Integer:toHexString($r)"/>
<xsl:variable name="gs" select="Integer:toHexString($g)"/>
<xsl:variable name="bs" select="Integer:toHexString($b)"/>
<xsl:if test="$r < 16">0</xsl:if><xsl:value-of select="$rs"/>
<xsl:if test="$g < 16">0</xsl:if><xsl:value-of select="$gs"/>
<xsl:if test="$b < 16">0</xsl:if><xsl:value-of select="$bs"/>
</xsl:template>
</xsl:stylesheet>
''')
parameters = {}
expected = """<?xml version='1.0' encoding='UTF-8'?>
<svg height='1000' xml:space='preserve' width='1000'>
<desc>Salary Chart</desc>
<g style='stroke:#000000;stroke-width:1;font-family:Arial;font-size:16'>
<rect height='25' x='100' style='fill:#E5965B ' width='245' y='55'/><text x='20' y='73'>CLARK</text><text x='355' y='73'>2450</text>
<rect height='25' x='100' style='fill:#E5D15B ' width='390' y='90'/><text x='20' y='108'>KING</text><text x='500' y='108'>3900</text>
<rect height='25' x='100' style='fill:#BEE55B ' width='130' y='125'/><text x='20' y='143'>MILLER</text><text x='240' y='143'>1300</text>
</g>
</svg>"""
# def test_transform(self):
# import sys
# from amara.xslt import transform
#
# result = transform(self.source, self.transform, output=io)
#
# #FIXME: the numerics break under Python 2.3
# test_harness.XsltTest(tester, source, [sheet], expected_1,
# extensionModules=[__name__])
#
# self.assert_(treecompare.html_compare(self.expected, io.getvalue()))
#
# return
# Hide the test framework from nose
del xslt_test
if __name__ == '__main__':
test_main()
| 2.65625 | 3 |
model-server/config.py | campos537/deep-fashion-system | 1 | 12916 | import json
def Config(config_path):
with open(config_path) as config_file:
return json.load(config_file)
| 2.171875 | 2 |
pmfp/entrypoint/grpc/build_/build_pb_go.py | Python-Tools/pmfp | 4 | 12917 | """编译go语言模块."""
import warnings
from typing import List, Optional
from pathlib import Path
from pmfp.utils.run_command_utils import run
def _build_grpc(includes: str, flag: str, to: str, target: str, cwd: Path) -> None:
command = f"protoc {includes} {flag} --go_out={to} --go-grpc_out={to} {target}"
try:
run(command, cwd=cwd, visible=True)
except Exception as e:
warnings.warn(f"""根据模板构造grpc项目失败
{str(e)}
编译为go语言依赖如下插件,请检查是否安装:
"google.golang.org/protobuf/cmd/protoc-gen-go"
"google.golang.org/grpc/cmd/protoc-gen-go-grpc"
""")
else:
print(f"编译grpc项目 {target} 为go语言模块完成!")
def build_pb_go(serv_file: str, includes: List[str], to: str,
source_relative: bool, cwd: Path, files: Optional[List[str]] = None, **kwargs: str) -> None:
"""编译grpc的protobuffer定义文件为go语言模块.
Args:
serv_file (str): 定义grpc service的目标proto文件
includes (List[str]): 待编译的protobuffer文件所在的文件夹
to (str): 编译成的模块文件放到的路径
source_relative (bool): 是否使用路径作为包名,只针对go语言
cwd (Path): 执行目录.
files (Optional[List[str]]): 其他待编译的protobuffer文件
"""
includes_str = " ".join([f"-I {include}" for include in includes])
target_str = serv_file
if files:
target_str += " " + " ".join(files)
flag_str = ""
if source_relative:
flag_str += " --go_opt=paths=source_relative --go-grpc_opt=paths=source_relative"
if kwargs:
if flag_str:
flag_str += " "
flag_str += " ".join([f"{k}={v}" for k, v in kwargs.items()])
_build_grpc(includes_str, flag_str, to, target_str, cwd)
| 2.375 | 2 |
instapp/migrations/0003_auto_20190522_0007.py | imekenye/Instagram-clone | 0 | 12918 | <reponame>imekenye/Instagram-clone
# Generated by Django 2.2.1 on 2019-05-22 00:07
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('instapp', '0002_auto_20190522_0006'),
]
operations = [
migrations.RenameField(
model_name='image',
old_name='profile',
new_name='user_profile',
),
]
| 1.492188 | 1 |
app/api/v1_0/users.py | daichi-yoshikawa/flask-boilerplate | 1 | 12919 | import json
import logging
from flask import jsonify, make_response, request
from flask_jwt_extended import jwt_required
from flask_restful import Resource
from http import HTTPStatus
from marshmallow import ValidationError, Schema
from werkzeug.security import generate_password_hash
from app.models import db
from app.models.user import User, user_schema
from app.api.utils import get_url
from app.utils.exceptions import ApiException
logger = logging.getLogger(__name__)
class RequestSchema:
class PostUsers(Schema):
name = type(user_schema.fields['name'])(
required=True, validate=user_schema.fields['name'].validate)
email = type(user_schema.fields['email'])(
required=True, validate=user_schema.fields['email'].validate)
password = type(user_schema.fields['password'])(
required=True, validate=user_schema.fields['password'].validate)
role_id = type(user_schema.fields['role_id'])(
required=True, validate=user_schema.fields['role_id'].validate)
class ResponseSchema:
class GetUser(Schema):
id = type(user_schema.fields['id'])(
required=True, validate=user_schema.fields['name'].validate)
name = type(user_schema.fields['name'])(
required=True, validate=user_schema.fields['name'].validate)
email = type(user_schema.fields['email'])(
required=True, validate=user_schema.fields['email'].validate)
class UserListApi(Resource):
"""
GET: Return all users.
POST: Create new user account.
PUT: N/A
DELETE: N/A
"""
def post(self):
"""Sign up"""
status = HTTPStatus.CREATED
ret = {}
error_msg = {}
try:
data = request.get_json()
if data is None:
raise ApiException('Request is empty.', status=HTTPStatus.BAD_REQUEST)
errors = RequestSchema.PostUsers().validate(data)
if errors:
raise ValidationError(errors)
data = RequestSchema.PostUsers().dump(data)
if User.query.filter_by(name=data['name']).count() > 0:
raise ApiException(
f"Username:{data['name']} is already used.", status=HTTPStatus.CONFLICT)
if User.query.filter_by(email=data['email']).count() > 0:
raise ApiException(
f"Email:{data['email']} is already used.", status=HTTPStatus.CONFLICT)
data['password'] = generate_password_hash(data['password'])
user = User(**data)
db.session.add(user)
db.session.commit()
ret['link'] = {'self': get_url(tail_url=user.id)}
except ValidationError as e:
status = HTTPStatus.BAD_REQUEST
error_msg = e.normalized_messages()
except ApiException as e:
status = e.status
error_msg = str(e)
except Exception as e:
error_msg = f'{type(e)} : {str(e)} '
if status == HTTPStatus.CREATED:
status = HTTPStatus.INTERNAL_SERVER_ERROR
error_msg = f'Signup failed due to internal server error. ' + error_msg
finally:
if status != HTTPStatus.CREATED:
db.session.rollback()
ret = { 'error': { 'message': error_msg } }
logger.error(ret)
return make_response(jsonify(ret), status)
class UserApi(Resource):
"""
GET: Return user.
POST: N/A
PUT: Update user data.
DELETE: Delete user account.
"""
@jwt_required
def get(self, id):
"""Return user."""
status = HTTPStatus.OK
ret = {}
error_msg = ''
try:
query = User.query.filter_by(id=id)
user = query.first()
if not user:
raise ApiException(
f'User ID:{id} was not found.', status=HTTPStatus.NOT_FOUND)
ret = ResponseSchema.GetUser().dump(user)
ret['link'] = {'self': get_url(tail_url='')}
except ApiException as e:
status = e.status
error_msg = str(e)
except Exception as e:
status.e = HTTPStatus.INTERNAL_SERVER_ERROR
error_msg = str(e)
finally:
if error_msg != '':
ret = { 'error': { 'message': error_msg } }
logger.error(ret)
return make_response(jsonify(ret), status)
| 2.359375 | 2 |
src/count_targets.py | kahvel/MAProject | 0 | 12920 | from main import readData, getTrueLabels, binariseLabels, removePacketsAfterChange
label_data = list()
label_data.append(readData("..\\data\\test5_targets_1.csv"))
label_data.append(readData("..\\data\\test5_targets_2.csv"))
label_data.append(readData("..\\data\\test5_targets_3.csv"))
labels = [getTrueLabels(label) for label in label_data]
binarised_labels = dict()
binarised_labels[1] = [binariseLabels(label, 1) for label in labels]
binarised_labels[2] = [binariseLabels(label, 2) for label in labels]
binarised_labels[3] = [binariseLabels(label, 3) for label in labels]
for target in [1,2,3]:
for dataset in [0,1,2]:
_, binarised_labels[target][dataset] =\
removePacketsAfterChange(binarised_labels[target][dataset], binarised_labels[target][dataset], label_data[dataset], 256)
for target in [1,2,3]:
for dataset in [0,1,2]:
print "Dataset:", str(dataset+1), "Target:", str(target), "Count:", str(sum(binarised_labels[target][dataset]))
| 2.546875 | 3 |
src/imagine/goal_sampler.py | jordyantunes/Imagine | 20 | 12921 | import numpy as np
from mpi4py import MPI
from src.imagine.goal_generator.simple_sentence_generator import SentenceGeneratorHeuristic
from src import logger
class GoalSampler:
def __init__(self,
policy_language_model,
reward_language_model,
goal_dim,
one_hot_encoder,
params):
self.policy_language_model = policy_language_model
self.reward_language_model = reward_language_model
self.goal_dim = goal_dim
self.params = params
self.nb_feedbacks = 0
self.nb_positive_feedbacks = 0
self.nb_negative_feedbacks = 0
self.feedback2id = dict()
self.id2feedback = dict()
self.id2oracleid = dict()
self.feedback2one_hot = dict()
self.id2one_hot = dict()
self.feedback_memory = dict(memory_id=[],
string=[],
iter_discovery=[],
target_counter=[],
reached_counter=[],
oracle_id=[],
f1_score=[],
policy_encoding=[],
reward_encoding=[],
imagined=[],
)
self.imagined_goals = dict(string=[],
competence=[],
lp=[])
self.one_hot_encoder = one_hot_encoder
self.goal_generator = SentenceGeneratorHeuristic(params['train_descriptions'],
params['test_descriptions'],
sentences=None,
method=params['conditions']['imagination_method'])
self.nb_discovered_goals = 0
self.score_target_goals = None
self.perceived_learning_progress = None
self.perceived_competence = None
self.feedback_stats = None
self.rank = MPI.COMM_WORLD.Get_rank()
self.num_cpus = params['experiment_params']['n_cpus']
self.rollout_batch_size = params['experiment_params']['rollout_batch_size']
self.not_imagined_goal_ids = np.array([])
self.imagined_goal_ids = np.array([])
def store_reward_function(self, reward_function):
self.reward_function = reward_function
def update_embeddings(self):
# embeddings must be updated when the language model is udpated
for i, goal_str in enumerate(self.feedback_memory['string']):
if self.reward_language_model is not None:
reward_encoding = self.reward_language_model.encode(goal_str)
self.feedback_memory['reward_encoding'][i] = reward_encoding.copy()
policy_encoding = self.policy_language_model.encode(goal_str)
self.feedback_memory['policy_encoding'][i] = policy_encoding.copy()
def add_entries_to_feedback_memory(self, str_list, episode_count, imagined):
for goal_str in str_list:
if goal_str not in self.feedback2id.keys():
memory_id = self.nb_discovered_goals
if goal_str in self.params['train_descriptions']:
oracle_id = self.params['train_descriptions'].index(goal_str)
else:
oracle_id = None
one_hot = self.one_hot_encoder.encode(goal_str.lower().split(" "))
self.feedback2one_hot[goal_str] = one_hot
self.id2one_hot[memory_id] = one_hot
if self.reward_language_model is not None:
reward_encoding = self.reward_language_model.encode(goal_str)
self.feedback_memory['reward_encoding'].append(reward_encoding.copy())
policy_encoding = self.policy_language_model.encode(goal_str)
self.feedback2id[goal_str] = memory_id
self.id2oracleid[memory_id] = oracle_id
self.id2feedback[memory_id] = goal_str
self.feedback_memory['memory_id'].append(memory_id)
self.feedback_memory['oracle_id'].append(oracle_id)
self.feedback_memory['string'].append(goal_str)
self.feedback_memory['target_counter'].append(0)
self.feedback_memory['reached_counter'].append(0)
self.feedback_memory['iter_discovery'].append(episode_count)
self.feedback_memory['f1_score'].append(0)
self.feedback_memory['policy_encoding'].append(policy_encoding.copy())
self.feedback_memory['imagined'].append(imagined)
self.nb_discovered_goals += 1
elif goal_str in self.feedback2id.keys() and not imagined: # if goal previously imagined is discovered later, change its status
ind = self.feedback_memory['string'].index(goal_str)
if self.feedback_memory['imagined'][ind] == 1:
self.feedback_memory['imagined'][ind] = 0
logger.info('Goal already imagined:', goal_str)
def update_discovered_goals(self,
new_goals_str,
episode_count,
epoch):
# only done in cpu 0
self.add_entries_to_feedback_memory(str_list=new_goals_str,
episode_count=episode_count,
imagined=0)
# Decide whether to generate new goals
goal_invention = self.params['conditions']['goal_invention']
imagined = False
if 'from_epoch' in goal_invention:
from_epoch = int(goal_invention.split('_')[-1])
if epoch > from_epoch:
imagined = True
if len(new_goals_str) > 0 and imagined:
new_imagined_goals = []
inds_not_imagined = np.argwhere(np.array(self.feedback_memory['imagined']) == 0).flatten()
self.goal_generator.update_model(np.array(self.feedback_memory['string'])[inds_not_imagined])
generated_goals = self.goal_generator.generate_sentences(n='all')
for gen_g in generated_goals:
if gen_g not in self.imagined_goals['string']:
self.imagined_goals['string'].append(gen_g)
self.imagined_goals['competence'].append(0)
self.imagined_goals['lp'].append(0)
new_imagined_goals.append(gen_g)
self.add_entries_to_feedback_memory(str_list=new_imagined_goals,
episode_count=episode_count,
imagined=1)
def update(self,
current_episode,
all_episodes,
partner_available,
goals_reached_str,
goals_not_reached_str):
imagined_inds = np.argwhere(np.array(self.feedback_memory['imagined']) == 1).flatten()
not_imagined_inds = np.argwhere(np.array(self.feedback_memory['imagined']) == 0).flatten()
self.not_imagined_goal_ids = np.array(self.feedback_memory['memory_id'])[not_imagined_inds]
self.imagined_goal_ids = np.array(self.feedback_memory['memory_id'])[imagined_inds]
# only done in cpu 0
n_episodes = len(all_episodes)
attempted_goals_ids = []
exploit = []
for ep in all_episodes:
exploit.append(ep['exploit'])
attempted_goals_ids.append(ep['g_id'])
if partner_available:
# if partner is available, simply encodes what it said
assert n_episodes == len(goals_reached_str) == len(goals_not_reached_str) == len(exploit) == len(attempted_goals_ids)
# Get indexes in the order of discovery of the attempted goals, reached_goals, not reached_goals
goals_reached_ids = []
goals_not_reached_ids = []
for i in range(n_episodes):
goals_reached_ids.append([])
goals_not_reached_ids.append([])
for goal_str in goals_reached_str[i]:
goals_reached_ids[-1].append(self.feedback2id[goal_str])
for goal_str in goals_not_reached_str[i]:
goals_not_reached_ids[-1].append(self.feedback2id[goal_str])
else:
goals_reached_ids = []
goals_not_reached_ids = []
final_obs = np.array([ep['obs'][-1] for ep in all_episodes])
# test 50 goals for each episode
discovered_goal_ids = np.array(self.feedback_memory['memory_id'])
not_imagined_ind = np.argwhere(np.array(self.feedback_memory['imagined']) == 0).flatten()
discovered_goal_ids = discovered_goal_ids[not_imagined_ind]
n_attempts = min(50, len(discovered_goal_ids))
goals_to_try = np.random.choice(discovered_goal_ids, size=n_attempts, replace=False)
obs = np.repeat(final_obs, n_attempts, axis=0)
goals = np.tile(goals_to_try, final_obs.shape[0])
rewards = self.reward_function.predict(state=obs, goal_ids=goals)[0]
for i in range(len(all_episodes)):
pos_goals = goals_to_try[np.where(rewards[i * n_attempts: (i + 1) * n_attempts] == 0)].tolist()
goals_reached_ids.append(pos_goals)
neg_goals = goals_to_try[np.where(rewards[i * n_attempts: (i + 1) * n_attempts] == -1)].tolist()
goals_not_reached_ids.append(neg_goals)
return goals_reached_ids, goals_not_reached_ids
def share_info_to_all_cpus(self):
# share data across cpus
self.feedback_memory = MPI.COMM_WORLD.bcast(self.feedback_memory, root=0)
self.feedback2id = MPI.COMM_WORLD.bcast(self.feedback2id, root=0)
self.id2oracleid = MPI.COMM_WORLD.bcast(self.id2oracleid, root=0)
self.id2feedback = MPI.COMM_WORLD.bcast(self.id2feedback, root=0)
self.feedback2one_hot = MPI.COMM_WORLD.bcast(self.feedback2one_hot, root=0)
self.nb_discovered_goals = MPI.COMM_WORLD.bcast(self.nb_discovered_goals, root=0)
self.imagined_goals = MPI.COMM_WORLD.bcast(self.imagined_goals, root=0)
self.one_hot_encoder = MPI.COMM_WORLD.bcast(self.one_hot_encoder, root=0)
def sample_targets(self, epoch):
"""
Sample targets for all cpus and all batch, then scatter to the different cpus
"""
# Decide whether to exploit or not
exploit = True if np.random.random() < 0.1 else False
strategy = 'random'
goal_invention = self.params['conditions']['goal_invention']
imagined = False
if 'from_epoch' in goal_invention:
from_epoch = int(goal_invention.split('_')[-1])
if epoch > from_epoch:
imagined = np.random.random() < self.params['conditions']['p_imagined']
if self.rank == 0:
all_goals_str = []
all_goals_encodings = []
all_goals_ids = []
for i in range(self.num_cpus):
goals_str = []
goals_encodings = []
goals_ids = []
for j in range(self.rollout_batch_size):
# when there is no goal in memory, sample random goal from standard normal distribution
if len(self.feedback_memory['memory_id']) == 0:
goals_encodings.append(np.random.normal(size=self.goal_dim))
goals_str.append('Random Goal')
goals_ids.append(-1)
else:
if strategy == 'random':
if imagined and self.imagined_goal_ids.size > 0:
ind = np.random.choice(self.imagined_goal_ids)
else:
ind = np.random.choice(self.not_imagined_goal_ids)
else:
raise NotImplementedError
goals_encodings.append(self.feedback_memory['policy_encoding'][ind])
goals_str.append(self.id2feedback[ind])
goals_ids.append(ind)
all_goals_str.append(goals_str)
all_goals_encodings.append(goals_encodings)
all_goals_ids.append(goals_ids)
else:
all_goals_str = []
all_goals_encodings = []
all_goals_ids = []
goals_str = MPI.COMM_WORLD.scatter(all_goals_str, root=0)
goals_encodings = MPI.COMM_WORLD.scatter(all_goals_encodings, root=0)
goals_ids = MPI.COMM_WORLD.scatter(all_goals_ids, root=0)
return exploit, goals_str, goals_encodings, goals_ids, imagined
class EvalGoalSampler:
def __init__(self, policy_language_model, one_hot_encoder, params):
self.descriptions = params['train_descriptions']
self.nb_descriptions = len(self.descriptions)
self.count = 0
self.policy_language_model = policy_language_model
self.rollout_batch_size = params['evaluation_rollout_params']['rollout_batch_size']
self.params = params
def reset(self):
self.count = 0
def sample(self, method='robin'):
# print(self.descriptions[self.count])
goals_str = []
goals_encodings = []
goals_ids = []
if method == 'robin':
ind = self.count
elif method == 'random':
ind = np.random.randint(self.nb_descriptions)
else:
raise NotImplementedError
for _ in range(self.rollout_batch_size):
g_str = self.descriptions[ind]
goals_str.append(g_str)
policy_encoding = self.policy_language_model.encode(g_str).flatten()
goals_encodings.append(policy_encoding)
goals_ids.append(ind)
self.count += 1
return True, goals_str, goals_encodings, goals_ids
| 2.203125 | 2 |
src/actionsold.py | Grumpy-Old-Tech/WorkshopAssistant | 0 | 12922 | #!/usr/bin/env python
#This is different from AIY Kit's actions
#Copying and Pasting AIY Kit's actions commands will not work
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from gmusicapi import Mobileclient
from googletrans import Translator
from gtts import gTTS
import requests
import os
import os.path
import RPi.GPIO as GPIO
import time
import re
import subprocess
import json
import urllib.request
import pafy
#API Key for YouTube and KS Search Engine
google_cloud_api_key='ENTER-YOUR-GOOGLE-CLOUD-API-KEY-HERE'
#YouTube API Constants
DEVELOPER_KEY = google_cloud_api_key
YOUTUBE_API_SERVICE_NAME = 'youtube'
YOUTUBE_API_VERSION = 'v3'
playshell = None
| 1.75 | 2 |
openpype/tools/settings/settings/widgets/window.py | dangerstudios/OpenPype | 0 | 12923 | from Qt import QtWidgets, QtGui
from .categories import (
CategoryState,
SystemWidget,
ProjectWidget
)
from .widgets import ShadowWidget
from .. import style
class MainWidget(QtWidgets.QWidget):
widget_width = 1000
widget_height = 600
def __init__(self, user_role, parent=None):
super(MainWidget, self).__init__(parent)
self.setObjectName("MainWidget")
self.setWindowTitle("OpenPype Settings")
self.resize(self.widget_width, self.widget_height)
stylesheet = style.load_stylesheet()
self.setStyleSheet(stylesheet)
self.setWindowIcon(QtGui.QIcon(style.app_icon_path()))
header_tab_widget = QtWidgets.QTabWidget(parent=self)
studio_widget = SystemWidget(user_role, header_tab_widget)
project_widget = ProjectWidget(user_role, header_tab_widget)
tab_widgets = [
studio_widget,
project_widget
]
header_tab_widget.addTab(studio_widget, "System")
header_tab_widget.addTab(project_widget, "Project")
layout = QtWidgets.QVBoxLayout(self)
layout.setContentsMargins(5, 5, 5, 5)
layout.setSpacing(0)
layout.addWidget(header_tab_widget)
self.setLayout(layout)
self._shadow_widget = ShadowWidget("Working...", self)
for tab_widget in tab_widgets:
tab_widget.saved.connect(self._on_tab_save)
tab_widget.state_changed.connect(self._on_state_change)
self.tab_widgets = tab_widgets
def _on_tab_save(self, source_widget):
for tab_widget in self.tab_widgets:
tab_widget.on_saved(source_widget)
def _on_state_change(self):
any_working = False
for widget in self.tab_widgets:
if widget.state is CategoryState.Working:
any_working = True
break
if (
(any_working and self._shadow_widget.isVisible())
or (not any_working and not self._shadow_widget.isVisible())
):
return
self._shadow_widget.setVisible(any_working)
# Process events to apply shadow widget visibility
app = QtWidgets.QApplication.instance()
if app:
app.processEvents()
def reset(self):
for tab_widget in self.tab_widgets:
tab_widget.reset()
| 2.09375 | 2 |
azkaban_zip_uploader/tests/lambda_handler_tests.py | uk-gov-mirror/dwp.aws-azkaban | 0 | 12924 | <gh_stars>0
import lambda_handler
from unittest import TestCase
from mock import call, patch, Mock
from datetime import datetime
import boto3
import json
from botocore.stub import Stubber
import urllib3
mock_s3_client = boto3.client('s3')
s3_stubber = Stubber(mock_s3_client)
list_objects_response = {
'IsTruncated': False,
'Contents': [
{
'Key': 'return1.zip',
'LastModified': datetime(2015, 1, 1),
'ETag': 'string',
'Size': 123,
'StorageClass': 'STANDARD',
'Owner': {
'DisplayName': 'string',
'ID': 'string'
}
},
{
'Key': 'do_not_return.txt',
'LastModified': datetime(2015, 1, 1),
'ETag': 'string',
'Size': 123,
'StorageClass': 'STANDARD',
'Owner': {
'DisplayName': 'string',
'ID': 'string'
}
},
{
'Key': 'return2.zip',
'LastModified': datetime(2015, 1, 1),
'ETag': 'string',
'Size': 123,
'StorageClass': 'STANDARD',
'Owner': {
'DisplayName': 'string',
'ID': 'string'
}
},
],
'Name': 'string',
'EncodingType': 'url',
'KeyCount': 123,
'ContinuationToken': 'string'
}
s3_stubber.add_response('list_objects_v2', list_objects_response)
s3_stubber.activate()
mock_sm_client = boto3.client('secretsmanager')
sm_stubber = Stubber(mock_sm_client)
mock_secret_value_response = {
'ARN': 'arn:aws:secretsmanager:eu-west-7:123456789012:secret:tutorials/MyFirstSecret-jiObOV',
'Name': 'string',
'VersionId': 'EXAMPLE1-90ab-cdef-fedc-ba987EXAMPLE',
'SecretBinary': b'{"azkaban_username": "test_user", "azkaban_password": "<PASSWORD>"}',
'CreatedDate': datetime(2015, 1, 1)
}
sm_stubber.add_response('get_secret_value', mock_secret_value_response)
sm_stubber.add_response('get_secret_value', mock_secret_value_response)
sm_stubber.activate()
data_non_fail = json.dumps({
"status" : "error",
"message" : "Project already exists.",
}).encode('utf-8')
http_non_fail_error= Mock()
http_non_fail_error.data = data_non_fail
data_fail = json.dumps({
"error" : "error",
"message" : "Other message.",
}).encode('utf-8')
http_raise_error = Mock()
http_raise_error.data = data_fail
http_status_error = Mock()
http_status_error.data = "non JSON error response".encode('utf-8')
http_status_error.status = 418
session_data = json.dumps({
"status" : "success",
"session.id" : "test-session-id-12345432"
}).encode('utf-8')
http_session = Mock()
http_session.data = session_data
http_session.status = 200
class LambdaHandlerTests(TestCase):
def test_get_files_from_s3(self):
result = lambda_handler.get_files_from_s3("bucket_id", "s3_dir", mock_s3_client)
assert result == ['return1.zip', 'return2.zip']
@patch('lambda_handler.create_project')
@patch('urllib3.PoolManager')
def test_upload_to_azkaban_api_error_in_response(self, mock_http, mock_create_project):
mock_http.request.return_value = http_raise_error
with self.assertRaises(urllib3.exceptions.ResponseError) as context:
lambda_handler.upload_to_azkaban_api('zip_file', 'zip_file_name', 'session_id', mock_http, 'azkaban_url')
mock_http.request.assert_called_once()
self.assertTrue(str(context.exception) == "Failure uploading zip_file_name to Azkaban API - Error in API response body.")
@patch('lambda_handler.create_project')
@patch('urllib3.PoolManager')
def test_upload_to_azkaban_api_non_200_status(self, mock_http, mock_create_project):
mock_http.request.return_value = http_status_error
with self.assertRaises(urllib3.exceptions.ResponseError) as context:
lambda_handler.upload_to_azkaban_api('zip_file', 'zip_file_name', 'session_id', mock_http, 'azkaban_url')
mock_http.request.assert_called_once()
self.assertTrue(str(context.exception) == "Failure uploading zip_file_name to Azkaban API - non 200 status returned.")
@patch('urllib3.PoolManager')
def test_create_project_error_handling_error_path(self, mock_http):
mock_http.request.return_value = http_raise_error
with self.assertRaises(urllib3.exceptions.ResponseError) as context:
lambda_handler.create_project('azkaban_url', mock_http, 'session_id', 'test_project')
mock_http.request.assert_called_once()
self.assertTrue(str(context.exception) == 'Other message.')
@patch('urllib3.PoolManager')
def test_create_project_error_handling_happy_path(self, mock_http):
mock_http.request.return_value = http_non_fail_error
lambda_handler.create_project('azkaban_url', mock_http, 'session_id', 'test_project')
mock_http.request.assert_called_once()
@patch('lambda_handler.os.getenv')
@patch('urllib3.PoolManager')
@patch('lambda_handler.boto3')
def test_establish_azkaban_session_raise_error(self, mock_boto3, mock_http, mock_getenv):
mock_boto3.client.return_value = mock_sm_client
mock_http.request.return_value = http_non_fail_error
mock_getenv.side_effect = ["www.test_url.com", "test_secret"]
with self.assertRaises(urllib3.exceptions.ResponseError) as context:
lambda_handler.establish_azkaban_session(mock_http)
mock_http.request.assert_called_once()
self.assertTrue(str(context.exception) == 'Failure establising Azkaban API session.')
@patch('lambda_handler.os.getenv')
@patch('urllib3.PoolManager')
@patch('lambda_handler.boto3')
def test_establish_azkaban_session(self, mock_boto3, mock_http, mock_getenv):
mock_boto3.client.return_value = mock_sm_client
mock_http.request.return_value = http_session
mock_getenv.side_effect = ["www.test_url.com", "test_secret"]
result = lambda_handler.establish_azkaban_session(mock_http)
assert result == "test-session-id-12345432"
| 2.078125 | 2 |
cripts/usernames/username.py | lakiw/cripts | 2 | 12925 | import uuid
from mongoengine import Document, StringField, ListField, UUIDField
from django.conf import settings
from cripts.core.cripts_mongoengine import CriptsBaseAttributes, CriptsSourceDocument
from cripts.core.cripts_mongoengine import CriptsActionsDocument
class UserName(CriptsBaseAttributes, CriptsSourceDocument, CriptsActionsDocument,
Document):
"""
UserName class.
"""
meta = {
"collection": settings.COL_USERNAMES,
"cripts_type": 'UserName',
"latest_schema_version": 1,
"schema_doc": {
'name': 'The actual username',
'username_id': 'An ID corresponding to the username since using the raw username as the key can run into little bobby tables issues',
'description': 'Description of the e-mail address',
'datasets': ('List [] of datasets this username'
' appeared in'),
'source': ('List [] of sources who provided information about this'
' username'),
},
"jtable_opts": {
'details_url': 'cripts.usernames.views.username_detail',
'details_url_key': 'username_id',
'default_sort': "name",
'searchurl': 'cripts.usernames.views.usernames_listing',
'fields': [ "name", "created",
"source", "id", "username_id"],
'jtopts_fields': [ "name",
"created",
"source",
"favorite",
"id", "username_id"],
'hidden_fields': ["username_id", "id"],
'linked_fields': ["source", ],
'details_link': 'name',
'no_sort': []
}
}
name = StringField(required=True)
description = StringField(required=True)
username_id = UUIDField(binary=True, required=True, default=uuid.uuid4)
datasets = ListField(required=False)
| 2.265625 | 2 |
sweetpea/tests/test_encoding_diagram.py | anniecherk/sweetpea-py | 1 | 12926 | import pytest
import operator as op
from sweetpea import fully_cross_block
from sweetpea.primitives import Factor, DerivedLevel, WithinTrial, Transition, Window
from sweetpea.encoding_diagram import __generate_encoding_diagram
color = Factor("color", ["red", "blue"])
text = Factor("text", ["red", "blue"])
con_level = DerivedLevel("con", WithinTrial(op.eq, [color, text]))
inc_level = DerivedLevel("inc", WithinTrial(op.ne, [color, text]))
con_factor = Factor("congruent?", [con_level, inc_level])
color_repeats_factor = Factor("color repeats?", [
DerivedLevel("yes", Transition(lambda colors: colors[0] == colors[1], [color])),
DerivedLevel("no", Transition(lambda colors: colors[0] != colors[1], [color]))
])
text_repeats_factor = Factor("text repeats?", [
DerivedLevel("yes", Transition(lambda colors: colors[0] == colors[1], [text])),
DerivedLevel("no", Transition(lambda colors: colors[0] != colors[1], [text]))
])
design = [color, text, con_factor]
crossing = [color, text]
blk = fully_cross_block(design, crossing, [])
def test_generate_encoding_diagram():
assert __generate_encoding_diagram(blk) == "\
----------------------------------------------\n\
| Trial | color | text | congruent? |\n\
| # | red blue | red blue | con inc |\n\
----------------------------------------------\n\
| 1 | 1 2 | 3 4 | 5 6 |\n\
| 2 | 7 8 | 9 10 | 11 12 |\n\
| 3 | 13 14 | 15 16 | 17 18 |\n\
| 4 | 19 20 | 21 22 | 23 24 |\n\
----------------------------------------------\n"
def test_generate_encoding_diagram_with_transition():
block = fully_cross_block([color, text, color_repeats_factor],
[color, text],
[])
assert __generate_encoding_diagram(block) == "\
--------------------------------------------------\n\
| Trial | color | text | color repeats? |\n\
| # | red blue | red blue | yes no |\n\
--------------------------------------------------\n\
| 1 | 1 2 | 3 4 | |\n\
| 2 | 5 6 | 7 8 | 17 18 |\n\
| 3 | 9 10 | 11 12 | 19 20 |\n\
| 4 | 13 14 | 15 16 | 21 22 |\n\
--------------------------------------------------\n"
def test_generate_encoding_diagram_with_constraint_and_multiple_transitions():
block = fully_cross_block([color, text, con_factor, color_repeats_factor, text_repeats_factor],
[color, text],
[])
assert __generate_encoding_diagram(block) == "\
-------------------------------------------------------------------------------\n\
| Trial | color | text | congruent? | color repeats? | text repeats? |\n\
| # | red blue | red blue | con inc | yes no | yes no |\n\
-------------------------------------------------------------------------------\n\
| 1 | 1 2 | 3 4 | 5 6 | | |\n\
| 2 | 7 8 | 9 10 | 11 12 | 25 26 | 31 32 |\n\
| 3 | 13 14 | 15 16 | 17 18 | 27 28 | 33 34 |\n\
| 4 | 19 20 | 21 22 | 23 24 | 29 30 | 35 36 |\n\
-------------------------------------------------------------------------------\n"
def test_generate_encoding_diagram_with_constraint_and_multiple_transitions_in_different_order():
block = fully_cross_block([text_repeats_factor, color, color_repeats_factor, text, con_factor],
[color, text],
[])
assert __generate_encoding_diagram(block) == "\
-------------------------------------------------------------------------------\n\
| Trial | text repeats? | color | color repeats? | text | congruent? |\n\
| # | yes no | red blue | yes no | red blue | con inc |\n\
-------------------------------------------------------------------------------\n\
| 1 | | 1 2 | | 3 4 | 5 6 |\n\
| 2 | 25 26 | 7 8 | 31 32 | 9 10 | 11 12 |\n\
| 3 | 27 28 | 13 14 | 33 34 | 15 16 | 17 18 |\n\
| 4 | 29 30 | 19 20 | 35 36 | 21 22 | 23 24 |\n\
-------------------------------------------------------------------------------\n"
def test_generate_encoding_diagram_with_windows():
color3 = Factor("color3", ["red", "blue", "green"])
yes_fn = lambda colors: colors[0] == colors[1] == colors[2]
no_fn = lambda colors: not yes_fn(colors)
color3_repeats_factor = Factor("color3 repeats?", [
DerivedLevel("yes", Window(yes_fn, [color3], 3, 1)),
DerivedLevel("no", Window(no_fn, [color3], 3, 1))
])
block = fully_cross_block([color3_repeats_factor, color3, text], [color3, text], [])
assert __generate_encoding_diagram(block) == "\
---------------------------------------------------------\n\
| Trial | color3 repeats? | color3 | text |\n\
| # | yes no | red blue green | red blue |\n\
---------------------------------------------------------\n\
| 1 | | 1 2 3 | 4 5 |\n\
| 2 | | 6 7 8 | 9 10 |\n\
| 3 | 31 32 | 11 12 13 | 14 15 |\n\
| 4 | 33 34 | 16 17 18 | 19 20 |\n\
| 5 | 35 36 | 21 22 23 | 24 25 |\n\
| 6 | 37 38 | 26 27 28 | 29 30 |\n\
---------------------------------------------------------\n"
def test_generate_encoding_diagram_with_window_with_stride():
congruent_bookend = Factor("congruent bookend?", [
DerivedLevel("yes", Window(lambda colors, texts: colors[0] == texts[0], [color, text], 1, 3)),
DerivedLevel("no", Window(lambda colors, texts: colors[0] == texts[0], [color, text], 1, 3))
])
block = fully_cross_block([color, text, congruent_bookend], [color, text], [])
assert __generate_encoding_diagram(block) == "\
------------------------------------------------------\n\
| Trial | color | text | congruent bookend? |\n\
| # | red blue | red blue | yes no |\n\
------------------------------------------------------\n\
| 1 | 1 2 | 3 4 | 17 18 |\n\
| 2 | 5 6 | 7 8 | |\n\
| 3 | 9 10 | 11 12 | |\n\
| 4 | 13 14 | 15 16 | 19 20 |\n\
------------------------------------------------------\n"
congruent_bookend = Factor("congruent bookend?", [
DerivedLevel("yes", Window(lambda colors, texts: colors[0] == texts[0], [color, text], 2, 2)),
DerivedLevel("no", Window(lambda colors, texts: colors[0] == texts[0], [color, text], 2, 2))
])
block = fully_cross_block([color, text, congruent_bookend], [color, text], [])
assert __generate_encoding_diagram(block) == "\
------------------------------------------------------\n\
| Trial | color | text | congruent bookend? |\n\
| # | red blue | red blue | yes no |\n\
------------------------------------------------------\n\
| 1 | 1 2 | 3 4 | |\n\
| 2 | 5 6 | 7 8 | 17 18 |\n\
| 3 | 9 10 | 11 12 | |\n\
| 4 | 13 14 | 15 16 | 19 20 |\n\
------------------------------------------------------\n"
| 2.28125 | 2 |
Source_Code/Python/ConductedTest/case_generator.py | fenglwh/instruments | 0 | 12927 | <gh_stars>0
import json
from labinstrument.SS.CMW500.CMW500_WIFI.CMW500_WIFI import *
if __name__ == '__main__':
new_config_name='emm'
new_config=CMW_WIFI(17).get_parameters()
config=json.load(open('config.txt'))
config[new_config_name]=new_config
json.dump(config,open('config.txt','w')) | 1.84375 | 2 |
internal-export-file/export-report-pdf/src/export-report-pdf.py | aakloul/connectors | 0 | 12928 | import yaml
import os
import time
import datetime
from pycti.utils.constants import StixCyberObservableTypes
from weasyprint import HTML
from pycti import OpenCTIConnectorHelper, get_config_variable
from jinja2 import Environment, FileSystemLoader
class ExportReportPdf:
def __init__(self):
# Instantiate the connector helper from config
config_file_path = os.path.dirname(os.path.abspath(__file__)) + "/config.yml"
config = (
yaml.load(open(config_file_path), Loader=yaml.FullLoader)
if os.path.isfile(config_file_path)
else {}
)
self.helper = OpenCTIConnectorHelper(config)
# ExportReportPdf specific config settings
self.primary_color = get_config_variable(
"EXPORT_REPORT_PDF_PRIMARY_COLOR",
["export_report_pdf", "primary_color"],
config,
)
self.secondary_color = get_config_variable(
"EXPORT_REPORT_PDF_SECONDARY_COLOR",
["export_report_pdf", "secondary_color"],
config,
)
self.current_dir = os.path.abspath(os.path.dirname(__file__))
self.set_colors()
self.company_address_line_1 = get_config_variable(
"EXPORT_REPORT_PDF_COMPANY_ADDRESS_LINE_1",
["export_report_pdf", "company_address_line_1"],
config,
)
self.company_address_line_2 = get_config_variable(
"EXPORT_REPORT_PDF_COMPANY_ADDRESS_LINE_2",
["export_report_pdf", "company_address_line_2"],
config,
)
self.company_address_line_3 = get_config_variable(
"EXPORT_REPORT_PDF_COMPANY_ADDRESS_LINE_3",
["export_report_pdf", "company_address_line_3"],
config,
)
self.company_phone_number = get_config_variable(
"EXPORT_REPORT_PDF_COMPANY_PHONE_NUMBER",
["export_report_pdf", "company_phone_number"],
config,
)
self.company_email = get_config_variable(
"EXPORT_REPORT_PDF_COMPANY_EMAIL",
["export_report_pdf", "company_email"],
config,
)
self.company_website = get_config_variable(
"EXPORT_REPORT_PDF_COMPANY_WEBSITE",
["export_report_pdf", "company_website"],
config,
)
self.indicators_only = get_config_variable(
"EXPORT_REPORT_PDF_INDICATORS_ONLY",
["export_report_pdf", "indicators_only"],
config,
)
self.defang_urls = get_config_variable(
"EXPORT_REPORT_PDF_DEFANG_URLS",
["export_report_pdf", "defang_urls"],
config,
)
def _process_message(self, data):
file_name = data["file_name"]
# TODO this can be implemented to filter every entity and observable
# max_marking = data["max_marking"]
entity_type = data["entity_type"]
if entity_type != "Report":
raise ValueError(
f'This Connector can only process entities of type "Report" and not of type "{entity_type}".'
)
# Get the Report
report_dict = self.helper.api.report.read(id=data["entity_id"])
# Extract values for inclusion in output pdf
report_marking = report_dict.get("objectMarking", None)
if report_marking:
report_marking = report_marking[-1]["definition"]
report_name = report_dict["name"]
report_description = report_dict.get("description", "No description available.")
report_confidence = report_dict["confidence"]
report_id = report_dict["id"]
report_external_refs = [
external_ref_dict["url"]
for external_ref_dict in report_dict["externalReferences"]
]
report_objs = report_dict["objects"]
report_date = datetime.datetime.now().strftime("%b %d %Y")
context = {
"report_name": report_name,
"report_description": report_description,
"report_marking": report_marking,
"report_confidence": report_confidence,
"report_external_refs": report_external_refs,
"report_date": report_date,
"company_address_line_1": self.company_address_line_1,
"company_address_line_2": self.company_address_line_2,
"company_address_line_3": self.company_address_line_3,
"company_phone_number": self.company_phone_number,
"company_email": self.company_email,
"company_website": self.company_website,
"entities": {},
"observables": {},
}
# Process each STIX Object
for report_obj in report_objs:
obj_entity_type = report_obj["entity_type"]
obj_id = report_obj["standard_id"]
# Handle StixCyberObservables entities
if obj_entity_type == "StixFile" or StixCyberObservableTypes.has_value(
obj_entity_type
):
observable_dict = self.helper.api.stix_cyber_observable.read(id=obj_id)
# If only include indicators and
# the observable doesn't have an indicator, skip it
if self.indicators_only and not observable_dict["indicators"]:
self.helper.log_info(
f"Skipping {obj_entity_type} observable with value {observable_dict['observable_value']} as it was not an Indicator."
)
continue
if obj_entity_type not in context["observables"]:
context["observables"][obj_entity_type] = []
# Defang urls
if self.defang_urls and obj_entity_type == "Url":
observable_dict["observable_value"] = observable_dict[
"observable_value"
].replace("http", "hxxp", 1)
context["observables"][obj_entity_type].append(observable_dict)
# Handle all other entities
else:
reader_func = self.get_reader(obj_entity_type)
if reader_func is None:
self.helper.log_error(
f'Could not find a function to read entity with type "{obj_entity_type}"'
)
continue
entity_dict = reader_func(id=obj_id)
if obj_entity_type not in context["entities"]:
context["entities"][obj_entity_type] = []
context["entities"][obj_entity_type].append(entity_dict)
# Render html with input variables
env = Environment(loader=FileSystemLoader(self.current_dir))
template = env.get_template("resources/report.html")
html_string = template.render(context)
# Generate pdf from html string
pdf_contents = HTML(string=html_string, base_url="resources").write_pdf()
# Upload the output pdf
self.helper.log_info(f"Uploading: {file_name}")
self.helper.api.stix_domain_object.add_file(
id=report_id,
file_name=file_name,
data=pdf_contents,
mime_type="application/pdf",
)
return "Export done"
def set_colors(self):
with open(
os.path.join(self.current_dir, "resources/report.css.template"), "r"
) as f:
new_css = f.read()
new_css = new_css.replace("<primary_color>", self.primary_color)
new_css = new_css.replace("<secondary_color>", self.secondary_color)
with open(os.path.join(self.current_dir, "resources/report.css"), "w") as f:
f.write(new_css)
def get_reader(self, entity_type):
"""
Returns the function to use for calling the OpenCTI to
read data for a particular entity type.
entity_type: a str representing the entity type, i.e. Indicator
returns: a function or None if entity type is not supported
"""
reader = {
"Stix-Domain-Object": self.helper.api.stix_domain_object.read,
"Attack-Pattern": self.helper.api.attack_pattern.read,
"Campaign": self.helper.api.campaign.read,
"Note": self.helper.api.note.read,
"Observed-Data": self.helper.api.observed_data.read,
"Organization": self.helper.api.identity.read,
"Opinion": self.helper.api.opinion.read,
"Report": self.helper.api.report.read,
"Sector": self.helper.api.identity.read,
"System": self.helper.api.identity.read,
"Course-Of-Action": self.helper.api.course_of_action.read,
"Identity": self.helper.api.identity.read,
"Indicator": self.helper.api.indicator.read,
"Individual": self.helper.api.identity.read,
"Infrastructure": self.helper.api.infrastructure.read,
"Intrusion-Set": self.helper.api.intrusion_set.read,
"Malware": self.helper.api.malware.read,
"Threat-Actor": self.helper.api.threat_actor.read,
"Tool": self.helper.api.tool.read,
"Vulnerability": self.helper.api.vulnerability.read,
"Incident": self.helper.api.incident.read,
"City": self.helper.api.location.read,
"Country": self.helper.api.location.read,
"Region": self.helper.api.location.read,
"Position": self.helper.api.location.read,
"Location": self.helper.api.location.read,
}
return reader.get(entity_type, None)
# Start the main loop
def start(self):
self.helper.listen(self._process_message)
if __name__ == "__main__":
try:
connector_export_report_pdf = ExportReportPdf()
connector_export_report_pdf.start()
except Exception as e:
print(e)
time.sleep(10)
exit(0)
| 2.28125 | 2 |
Lyft-Dental/payments/pay/urls.py | Abhik1998/Lyft-sample_project | 1 | 12929 | from django.urls import path
from .views import initiate_payment, callback
urlpatterns = [
path('', initiate_payment, name='pay'),
path('callback/', callback, name='callback'),
]
| 1.539063 | 2 |
ibmsecurity/isam/base/overview.py | zone-zero/ibmsecurity | 46 | 12930 | <reponame>zone-zero/ibmsecurity
def get(isamAppliance, check_mode=False, force=False):
"""
Retrieve an overview of updates and licensing information
"""
return isamAppliance.invoke_get("Retrieve an overview of updates and licensing information",
"/updates/overview")
def get_licensing_info(isamAppliance, check_mode=False, force=False):
"""
Retrieve the licensing information
"""
return isamAppliance.invoke_get("Retrieve the licensing information",
"/lum/is_licensed")
| 1.992188 | 2 |
src/sweetrpg_library_api/application/config.py | paulyhedral/sweetrpg-library-api | 0 | 12931 | # -*- coding: utf-8 -*-
__author__ = "<NAME> <<EMAIL>>"
"""
config.py
- settings for the flask application object
"""
import os
import redis
from sweetrpg_library_api.application import constants
class BaseConfig(object):
DEBUG = bool(os.environ.get(constants.DEBUG) or True)
PORT = os.environ.get(constants.PORT) or 5000
# ASSETS_DEBUG = True
LOG_LEVEL = os.environ.get(constants.LOG_LEVEL) or "INFO"
DB_HOST = os.environ[constants.DB_HOST]
# DB_PORT = os.environ.get(constants.DB_PORT) or "27017"
DB_USERNAME = os.environ[constants.DB_USER]
DB_PASSWORD = os.environ[constants.DB_PW]
DB_NAME = os.environ[constants.DB_NAME]
DB_OPTS = os.environ.get(constants.DB_OPTS)
DB_URL = f"mongodb+srv://{DB_USERNAME}:{DB_PASSWORD}@{DB_HOST}/{DB_NAME}?{DB_OPTS}"
MONGODB_ALIAS_CONNECTION = "default"
MONGODB_URI = DB_URL
MONGODB_SETTINGS = {
"host": DB_URL,
"connect": False,
}
# used for encryption and session management
# SECRET_KEY = os.environ.get('SECRET_KEY') or hashlib.sha256(f"{random.random()}".encode('utf-8')).hexdigest()
# CSRF_TOKEN = os.environ.get('CSRF_TOKEN') or hashlib.sha256(f"{random.random()}".encode('utf-8')).hexdigest()
CACHE_REDIS_HOST = os.environ[constants.REDIS_HOST]
CACHE_REDIS_PORT = int(os.environ.get(constants.REDIS_PORT) or 6379)
# CACHE_REDIS_DB = int(os.environ.get(constants.REDIS_DB) or 7)
SESSION_TYPE = "redis"
SESSION_REDIS = redis.from_url(
f"redis://{os.environ[constants.REDIS_HOST]}:{int(os.environ.get(constants.REDIS_PORT) or 6379)}")
# SEGMENT_WRITE_KEY = os.environ.get(constants.SEGMENT_WRITE_KEY)
SERVER_NAME = os.environ.get(constants.SERVER_NAME)
| 2.125 | 2 |
frames.py | mppc12/special_subject_tea | 0 | 12932 | <filename>frames.py<gh_stars>0
import pandas as pd
from group import Group
class Frames:
def __init__(self, frame=None):
self.cleanups = Cleanup()
self.groups = Group()
class Cleanup:
def __init__(self, frame=None):
self.frame = frame
def __call__(self, frame):
self.frame = frame
return self
def dropcol(self):
column = ['貨品號列', '重量(公噸)', '英文貨名', '數量(限11碼貨品)', '數量單位']
frame = self.frame.drop(column, axis=1, inplace=False)
return frame
def droprow(self):
rowitem = ['普洱茶,每包不超過3公斤',
'普洱茶,每包超過3公斤',
'茶或馬黛茶之萃取物、精、濃縮物及以茶、馬黛茶之萃取物、精、濃縮物或以茶、馬黛茶為主要成分之調製品']
frame = self.frame[self.frame['中文貨名'].isin(rowitem) == False]
return frame
def modifydate(self):
rc_to_vi = {'92年':'2003', '93年':'2004', '94年':'2005', '95年':'2006',
'96年':'2007', '97年':'2008', '98年':'2009', '99年':'2010',
'100年':'2011', '101年':'2012', '102年':'2013', '103年':'2014',
'104年':'2015', '105年':'2016', '106年':'2017', '107年':'2018',
'108年':'2019'}
frame = self.frame.replace(rc_to_vi, inplace = False)
return frame
def dtypeint(self):
dtypes = ['重量(公斤)', '美元(千元)']
for i in dtypes:
self.frame[i] = pd.to_numeric(self.frame[i])
frame = self.frame
return frame
def modifyitem(self):
item = {'其他綠茶(未發酵),每包超過3公斤': '綠茶(未發酵),每包超過3公斤',
'薰芬綠茶,每包超過3公斤' : '綠茶(未發酵),每包超過3公斤'}
frame = self.frame.replace(item, inplace = False)
return frame
| 2.96875 | 3 |
msm/mycroft_skills_manager.py | forslund/mycroft-skills-manager | 0 | 12933 | <reponame>forslund/mycroft-skills-manager
# Copyright (c) 2018 Mycroft AI, Inc.
#
# This file is part of Mycroft Skills Manager
# (see https://github.com/MatthewScholefield/mycroft-light).
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Install, remove, update and track the skills on a device
MSM can be used on the command line but is also used by Mycroft core daemons.
"""
import time
import logging
import shutil
from functools import wraps
from glob import glob
from multiprocessing.pool import ThreadPool
from os import path
from typing import Dict, List
from xdg import BaseDirectory
from msm import GitException
from msm.exceptions import (
AlreadyInstalled,
AlreadyRemoved,
MsmException,
MultipleSkillMatches,
RemoveException,
SkillNotFound
)
from msm.skill_entry import SkillEntry
from msm.skill_repo import SkillRepo
from msm.skill_state import (
initialize_skill_state,
get_skill_state,
write_device_skill_state,
load_device_skill_state,
device_skill_state_hash
)
from msm.util import cached_property, MsmProcessLock
LOG = logging.getLogger(__name__)
CURRENT_SKILLS_DATA_VERSION = 2
ONE_DAY = 86400
def save_device_skill_state(func):
"""Decorator to overwrite the skills.json file when skill state changes.
The methods decorated with this function are executed in threads. So,
this contains some funky logic to keep the threads from stepping on one
another.
"""
@wraps(func)
def func_wrapper(self, *args, **kwargs):
will_save = False
if not self.saving_handled:
will_save = self.saving_handled = True
try:
ret = func(self, *args, **kwargs)
finally:
if will_save:
self.write_device_skill_state()
# Always restore saving_handled flag
if will_save:
self.saving_handled = False
return ret
return func_wrapper
class MycroftSkillsManager(object):
SKILL_GROUPS = {'default', 'mycroft_mark_1', 'picroft', 'kde',
'respeaker', 'mycroft_mark_2', 'mycroft_mark_2pi'}
def __init__(self, platform='default', old_skills_dir=None,
skills_dir=None, repo=None, versioned=True):
self.platform = platform
# Keep this variable alive for a while, is used to move skills from the
# old config based location to XDG
self.old_skills_dir = path.expanduser(old_skills_dir or '') or None
self.skills_dir = (skills_dir or
BaseDirectory.save_data_path('mycroft/skills'))
self.repo = repo or SkillRepo()
self.versioned = versioned
self.lock = MsmProcessLock()
# Property placeholders
self._all_skills = None
self._default_skills = None
self._local_skills = None
self._device_skill_state = None
self.saving_handled = False
self.device_skill_state_hash = ''
with self.lock:
self._init_skills_data()
def clear_cache(self):
"""Completely clear the skills cache."""
self._device_skill_state = None
self._invalidate_skills_cache()
@cached_property(ttl=ONE_DAY)
def all_skills(self):
"""Getting a list of skills can take a while so cache it.
The list method is called several times in this class and in core.
Skill data on a device just doesn't change that frequently so
getting a fresh list that many times does not make a lot of sense.
The cache will expire every hour to pick up any changes in the
mycroft-skills repo.
Skill installs and updates will invalidate the cache, which will
cause this property to refresh next time is is referenced.
The list method can be called directly if a fresh skill list is needed.
"""
if self._all_skills is None:
self._all_skills = self._get_all_skills()
return self._all_skills
def _get_all_skills(self):
LOG.info('building SkillEntry objects for all skills')
self._refresh_skill_repo()
remote_skills = self._get_remote_skills()
all_skills = self._merge_remote_with_local(remote_skills)
return all_skills
def list(self):
"""Load a list of SkillEntry objects from both local and remote skills
It is necessary to load both local and remote skills at
the same time to correctly associate local skills with the name
in the repo and remote skills with any custom path that they
have been downloaded to.
The return value of this function is cached in the all_skills property.
Only call this method if you need a fresh version of the SkillEntry
objects.
"""
all_skills = self._get_all_skills()
self._invalidate_skills_cache(new_value=all_skills)
return all_skills
def _refresh_skill_repo(self):
"""Get the latest mycroft-skills repo code."""
try:
self.repo.update()
except GitException as e:
if not path.isdir(self.repo.path):
raise
LOG.warning('Failed to update repo: {}'.format(repr(e)))
def _get_remote_skills(self):
"""Build a dictionary of skills in mycroft-skills repo keyed by id"""
remote_skills = []
for name, _, url, sha in self.repo.get_skill_data():
skill_dir = SkillEntry.create_path(self.skills_dir, url, name)
sha = sha if self.versioned else ''
remote_skills.append(
SkillEntry(name, skill_dir, url, sha, msm=self)
)
return {skill.id: skill for skill in remote_skills}
def _merge_remote_with_local(self, remote_skills):
"""Merge the skills found in the repo with those installed locally."""
all_skills = []
# First move locally installed skills from old to new location
# TODO: get rid of this at some point
if self.old_skills_dir:
for old_skill_dir in glob(path.join(self.old_skills_dir, '*/')):
skill_name = old_skill_dir.rstrip('/').rsplit('/', 1)[1]
new_skill_path = self.skills_dir + "/" + skill_name
if not path.isdir(new_skill_path):
shutil.move(old_skill_dir, self.skills_dir +
"/" + skill_name)
for skill_file in glob(path.join(self.skills_dir, '*', '__init__.py')):
skill = SkillEntry.from_folder(path.dirname(skill_file), msm=self,
use_cache=False)
if skill.id in remote_skills:
skill.attach(remote_skills.pop(skill.id))
all_skills.append(skill)
all_skills.extend(remote_skills.values())
return all_skills
@property
def local_skills(self):
"""Property containing a dictionary of local skills keyed by name."""
if self._local_skills is None:
self._local_skills = {
s.name: s for s in self.all_skills if s.is_local
}
return self._local_skills
@property
def default_skills(self):
if self._default_skills is None:
default_skill_groups = self.list_all_defaults()
try:
default_skill_group = default_skill_groups[self.platform]
except KeyError:
LOG.error(
'No default skill list found for platform "{}". '
'Using base list.'.format(self.platform)
)
default_skill_group = default_skill_groups.get('default', [])
self._default_skills = {s.name: s for s in default_skill_group}
return self._default_skills
def list_all_defaults(self): # type: () -> Dict[str, List[SkillEntry]]
"""Generate dictionary of default skills in all default skill groups"""
all_skills = {skill.name: skill for skill in self.all_skills}
default_skills = {group: [] for group in self.SKILL_GROUPS}
for group_name, skill_names in self.repo.get_default_skill_names():
group_skills = []
for skill_name in skill_names:
try:
group_skills.append(all_skills[skill_name])
except KeyError:
LOG.warning('No such default skill: ' + skill_name)
default_skills[group_name] = group_skills
return default_skills
def _init_skills_data(self):
"""Initial load of the skill state that occurs upon instantiation.
If the skills state was upgraded after it was loaded, write the
updated skills state to disk.
"""
try:
del(self.device_skill_state['upgraded'])
except KeyError:
self.device_skill_state_hash = device_skill_state_hash(
self.device_skill_state
)
else:
self.write_device_skill_state()
@property
def device_skill_state(self):
"""Dictionary representing the state of skills on a device."""
if self._device_skill_state is None:
self._device_skill_state = load_device_skill_state()
skills_data_version = self._device_skill_state.get('version', 0)
if skills_data_version < CURRENT_SKILLS_DATA_VERSION:
self._upgrade_skills_data()
else:
self._sync_device_skill_state()
return self._device_skill_state
def _upgrade_skills_data(self):
"""Upgrade the contents of the device skills state if needed."""
if self._device_skill_state.get('version', 0) == 0:
self._upgrade_to_v1()
if self._device_skill_state['version'] == 1:
self._upgrade_to_v2()
def _upgrade_to_v1(self):
"""Upgrade the device skills state to version one."""
self._device_skill_state.update(blacklist=[], version=1, skills=[])
for skill in self.local_skills.values():
skill_data = self._device_skill_state.get(skill.name, {})
try:
origin = skill_data['origin']
except KeyError:
origin = self._determine_skill_origin(skill)
beta = skill_data.get('beta', False)
skill_state = initialize_skill_state(
skill.name,
origin,
beta,
skill.skill_gid
)
skill_state['installed'] = skill_data.get('installed', 0)
if isinstance(skill_state['installed'], bool):
skill_state['installed'] = 0
skill_state['updated'] = skill_data.get('updated', 0)
self._device_skill_state['skills'].append(skill_state)
self._device_skill_state.update(upgraded=True)
def _upgrade_to_v2(self):
"""Upgrade the device skills state to version 2.
This adds the skill_gid field to skill entries.
"""
self._update_skill_gid()
self._device_skill_state.update(version=2, upgraded=True)
def _sync_device_skill_state(self):
"""Sync device's skill state with with actual skills on disk."""
self._add_skills_to_state()
self._remove_skills_from_state()
self._update_skill_gid()
def _add_skills_to_state(self):
"""Add local skill to state if it is not already there."""
skill_names = [s['name'] for s in self._device_skill_state['skills']]
for skill in self.local_skills.values():
if skill.name not in skill_names:
origin = self._determine_skill_origin(skill)
skill_state = initialize_skill_state(
skill.name,
origin,
False,
skill.skill_gid
)
self._device_skill_state['skills'].append(skill_state)
def _remove_skills_from_state(self):
"""Remove skills from state that no longer exist in the filesystem."""
skills_to_remove = []
for skill in self._device_skill_state['skills']:
is_not_local = skill['name'] not in self.local_skills
is_installed_state = skill['installation'] == 'installed'
if is_not_local and is_installed_state:
skills_to_remove.append(skill)
for skill in skills_to_remove:
self._device_skill_state['skills'].remove(skill)
def _update_skill_gid(self):
for skill in self._device_skill_state['skills']:
try:
local_skill = self.local_skills[skill['name']]
except KeyError:
skill['skill_gid'] = ''
else:
skill['skill_gid'] = local_skill.skill_gid
def _determine_skill_origin(self, skill):
if skill.name in self.default_skills:
origin = 'default'
elif skill.url:
origin = 'cli'
else:
origin = 'non-msm'
return origin
def write_device_skill_state(self, data=None):
"""Write device's skill state to disk if it has been modified."""
data = data or self.device_skill_state
if device_skill_state_hash(data) != self.device_skill_state_hash:
write_device_skill_state(data)
self.device_skill_state_hash = device_skill_state_hash(data)
@save_device_skill_state
def install(self, param, author=None, constraints=None, origin=''):
"""Install by url or name"""
if isinstance(param, SkillEntry):
skill = param
else:
skill = self.find_skill(param, author)
skill_state = initialize_skill_state(
skill.name,
origin,
skill.is_beta,
skill.skill_gid
)
try:
skill.install(constraints)
except AlreadyInstalled:
log_msg = 'Skill {} already installed - ignoring install request'
LOG.info(log_msg.format(skill.name))
skill_state = None
raise
except MsmException as e:
skill_state.update(
installation='failed',
status='error',
failure_message=str(e)
)
raise
else:
skill_state.update(
installed=time.time(),
installation='installed',
status='active',
beta=skill.is_beta
)
finally:
# Store the entry in the list
if skill_state is not None:
self.device_skill_state['skills'].append(skill_state)
self._invalidate_skills_cache()
@save_device_skill_state
def remove(self, param, author=None):
"""Remove by url or name"""
if isinstance(param, SkillEntry):
skill = param
else:
skill = self.find_skill(param, author)
try:
skill.remove()
except AlreadyRemoved:
LOG.info('Skill {} has already been removed'.format(skill.name))
raise
except RemoveException:
LOG.exception('Failed to remove skill ' + skill.name)
raise
else:
remaining_skills = []
for skill_state in self.device_skill_state['skills']:
if skill_state['name'] != skill.name:
remaining_skills.append(skill_state)
self.device_skill_state['skills'] = remaining_skills
self._invalidate_skills_cache()
def update_all(self):
def update_skill(skill):
entry = get_skill_state(skill.name, self.device_skill_state)
if entry:
entry['beta'] = skill.is_beta
if skill.update():
self._invalidate_skills_cache()
self._device_skill_state = None
if entry:
entry['updated'] = time.time()
return self.apply(update_skill, self.local_skills.values())
@save_device_skill_state
def update(self, skill=None, author=None):
"""Update all downloaded skills or one specified skill."""
if skill is None:
return self.update_all()
else:
if isinstance(skill, str):
skill = self.find_skill(skill, author)
skill_state = get_skill_state(skill.name, self.device_skill_state)
if skill_state:
skill_state['beta'] = skill.is_beta
if skill.update():
# On successful update update the update value
if skill_state:
skill_state['updated'] = time.time()
self._invalidate_skills_cache()
@save_device_skill_state
def apply(self, func, skills, max_threads=20):
"""Run a function on all skills in parallel"""
def run_item(skill):
try:
func(skill)
return True
except MsmException as e:
LOG.error('Error running {} on {}: {}'.format(
func.__name__, skill.name, repr(e)
))
return False
except Exception:
LOG.exception('Error running {} on {}:'.format(
func.__name__, skill.name
))
with ThreadPool(max_threads) as tp:
return tp.map(run_item, skills)
@save_device_skill_state
def install_defaults(self):
"""Installs the default skills, updates all others"""
def install_or_update_skill(skill):
if skill.is_local:
self.update(skill)
else:
self.install(skill, origin='default')
return self.apply(
install_or_update_skill,
self.default_skills.values()
)
def _invalidate_skills_cache(self, new_value=None):
"""Reset the cached skill lists in case something changed.
The cached_property decorator builds a _cache instance attribute
storing a dictionary of cached values. Deleting from this attribute
invalidates the cache.
"""
LOG.info('invalidating skills cache')
if hasattr(self, '_cache') and 'all_skills' in self._cache:
del self._cache['all_skills']
self._all_skills = None if new_value is None else new_value
self._local_skills = None
self._default_skills = None
def find_skill(self, param, author=None, skills=None):
# type: (str, str, List[SkillEntry]) -> SkillEntry
"""Find skill by name or url"""
if param.startswith('https://') or param.startswith('http://'):
repo_id = SkillEntry.extract_repo_id(param)
for skill in self.all_skills:
if skill.id == repo_id:
return skill
name = SkillEntry.extract_repo_name(param)
skill_directory = SkillEntry.create_path(self.skills_dir, param)
return SkillEntry(name, skill_directory, param, msm=self)
else:
skill_confs = {
skill: skill.match(param, author)
for skill in skills or self.all_skills
}
best_skill, score = max(skill_confs.items(), key=lambda x: x[1])
LOG.info('Best match ({}): {} by {}'.format(
round(score, 2), best_skill.name, best_skill.author)
)
if score < 0.3:
raise SkillNotFound(param)
low_bound = (score * 0.7) if score != 1.0 else 1.0
close_skills = [
skill for skill, conf in skill_confs.items()
if conf >= low_bound and skill != best_skill
]
if close_skills:
raise MultipleSkillMatches([best_skill] + close_skills)
return best_skill
| 1.578125 | 2 |
ietf/utils/resources.py | wpjesus/codematch | 1 | 12934 | <reponame>wpjesus/codematch
# Autogenerated by the mkresources management command 2014-11-13 05:39
from tastypie.resources import ModelResource
from tastypie.fields import CharField
from tastypie.constants import ALL
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from ietf import api
from ietf.utils.models import DumpInfo
class UserResource(ModelResource):
username = CharField()
class Meta:
queryset = User.objects.all()
serializer = api.Serializer()
class ContentTypeResource(ModelResource):
username = CharField()
class Meta:
queryset = ContentType.objects.all()
serializer = api.Serializer()
class DumpInfoResource(ModelResource):
class Meta:
queryset = DumpInfo.objects.all()
serializer = api.Serializer()
#resource_name = 'dumpinfo'
filtering = {
"date": ALL,
"host": ALL,
}
api.utils.register(DumpInfoResource())
| 1.90625 | 2 |
maskrcnn_benchmark/data/datasets/concat_dataset.py | dukebw/maskrcnn-benchmark | 0 | 12935 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import bisect
import numpy as np
from torch.utils.data.dataset import ConcatDataset as _ConcatDataset
class ConcatDataset(_ConcatDataset):
"""
Same as torch.utils.data.dataset.ConcatDataset, but exposes an extra
method for querying the sizes of the image
"""
def __init__(self, datasets, uniform_datasets):
_ConcatDataset.__init__(self, datasets)
self.uniform_datasets = uniform_datasets
def get_idxs(self, idx):
if self.uniform_datasets:
dataset_idx = np.random.randint(len(self.cumulative_sizes))
if dataset_idx == 0:
low = 0
else:
low = self.cumulative_sizes[dataset_idx - 1]
sample_idx = np.random.randint(0, self.cumulative_sizes[dataset_idx] - low)
else:
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
return dataset_idx, sample_idx
def get_img_info(self, idx):
dataset_idx, sample_idx = self.get_idxs(idx)
return self.datasets[dataset_idx].get_img_info(sample_idx)
def __getitem__(self, idx):
if idx < 0:
if -idx > len(self):
raise ValueError("absolute value of index should not exceed dataset length")
idx = len(self) + idx
dataset_idx, sample_idx = self.get_idxs(idx)
return self.datasets[dataset_idx][sample_idx]
| 2.734375 | 3 |
153_find_minimum_in_rotated_sorted_array.py | gengwg/leetcode | 2 | 12936 | # 153. Find Minimum in Rotated Sorted Array
#
# Suppose an array sorted in ascending order is rotated at some pivot unknown to you beforehand.
#
# (i.e., 0 1 2 4 5 6 7 might become 4 5 6 7 0 1 2).
#
# Find the minimum element.
#
# You may assume no duplicate exists in the array.
class Solution(object):
# http://bookshadow.com/weblog/2014/10/16/leetcode-find-minimum-rotated-sorted-array/
def findMin(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
l, r = 0, len(nums) - 1
while l < r:
m = (l + r) / 2
# if nums[m] <= nums[r]:
if nums[m] < nums[r]:
r = m
else:
l = m + 1
return nums[l]
# http://www.cnblogs.com/zuoyuan/p/4045742.html
def findMin(self, nums):
l, r = 0, len(nums) - 1
while l < r and nums[l] > nums[r]:
m = (l + r) / 2
if nums[m] < nums[r]:
r = m
else:
l = m + 1
return nums[l]
if __name__ == '__main__':
print Solution().findMin([4, 5, 6, 7, 0, 1, 2])
| 3.5625 | 4 |
tests/bridge/test_bridge.py | shuklaayush/badger-system | 99 | 12937 | <reponame>shuklaayush/badger-system<gh_stars>10-100
import pytest
from brownie import (
accounts,
interface,
MockVault,
BadgerBridgeAdapter,
CurveSwapStrategy,
CurveTokenWrapper,
)
from helpers.constants import AddressZero
from helpers.registry import registry
from config.badger_config import badger_config
from scripts.systems.badger_system import connect_badger
from scripts.systems.bridge_system import connect_bridge
from scripts.systems.swap_system import connect_swap
# Curve lp tokens
RENBTC = "0x49849C98ae39Fff122806C06791Fa73784FB3675"
TBTC = "0x64eda51d3Ad40D56b9dFc5554E06F94e1Dd786Fd"
SBTC = "<KEY>"
# Bridge mock vaults for testing.
# Schema is (in token addr, vault name, vault symbol, vault token addr)
BRIDGE_VAULTS = [
# TODO: When bridge adapter addr is approved, can test
# directly against badger sett contracts.
{
"inToken": registry.tokens.renbtc,
"outToken": registry.tokens.renbtc,
"id": "native.renCrv",
"symbol": "bcrvrenBTC",
"token": RENBTC,
"address": "0x6dEf55d2e18486B9dDfaA075bc4e4EE0B28c1545",
"upgrade": True,
},
{
"inToken": registry.tokens.renbtc,
"outToken": registry.tokens.renbtc,
"id": "native.tbtcCrv",
"symbol": "bcrvtBTC",
"token": TBTC,
"address": "0xb9D076fDe463dbc9f915E5392F807315Bf940334",
"upgrade": True,
},
{
"inToken": registry.tokens.renbtc,
"outToken": registry.tokens.renbtc,
"id": "native.sbtcCrv",
"symbol": "bcrvsBTC",
"token": SBTC,
"address": "0xd04c48A53c111300aD41190D63681ed3dAd998eC",
"upgrade": True,
},
{
"inToken": registry.tokens.wbtc,
"outToken": registry.tokens.wbtc,
"id": "yearn.wbtc",
"symbol": "byvwBTC",
"token": registry.tokens.wbtc,
"address": "0x4b92d19c11435614cd49af1b589001b7c08cd4d5",
"upgrade": False,
},
]
# Tests mint/burn to/from crv sett.
# We create a mock vault for each pool token.
@pytest.mark.parametrize(
"vault",
BRIDGE_VAULTS,
)
def test_bridge_vault(vault):
badger = connect_badger(badger_config.prod_json)
bridge = connect_bridge(badger, badger_config.prod_json)
swap = connect_swap(badger_config.prod_json)
bridge.add_existing_swap(swap)
_deploy_bridge_mocks(badger, bridge)
slippage = 0.03
amount = 1 * 10 ** 8
v = vault["address"]
# TODO: Can interleave these mints/burns.
for accIdx in range(10, 12):
account = accounts[accIdx]
for i in range(0, 2):
balanceBefore = interface.IERC20(v).balanceOf(account)
bridge.adapter.mint(
vault["inToken"],
slippage * 10 ** 4,
account.address,
v,
amount,
# Darknode args hash/sig optional since gateway is mocked.
"",
"",
{"from": account},
)
balance = interface.IERC20(v).balanceOf(account)
assert balance > balanceBefore
interface.IERC20(v).approve(
bridge.adapter.address,
balance,
{"from": account},
)
# Approve mock gateway for transfer of underlying token for "mock" burns.
# NB: In the real world, burns don't require approvals as it's just
# an internal update the the user's token balance.
interface.IERC20(registry.tokens.renbtc).approve(
bridge.mocks.BTC.gateway, balance, {"from": bridge.adapter}
)
bridge.adapter.burn(
vault["outToken"],
v,
slippage * 10 ** 4,
account.address,
balance,
{"from": account},
)
assert interface.IERC20(v).balanceOf(account) == 0
# Tests swap router failures and wbtc mint/burn.
def test_bridge_basic_swap_fail():
renbtc = registry.tokens.renbtc
wbtc = registry.tokens.wbtc
badger = connect_badger(badger_config.prod_json)
bridge = connect_bridge(badger, badger_config.prod_json)
swap = connect_swap(badger_config.prod_json)
bridge.add_existing_swap(swap)
_upgrade_bridge(badger, bridge)
_deploy_bridge_mocks(badger, bridge)
# NB: If true, fails during router opimizeSwap() call, otherwise the underlying strategy fails.
for router_fail in [True, False]:
_deploy_swap_mocks(badger, bridge, swap, router_fail=router_fail)
# .1% slippage
slippage = 0.001
amount = 1 * 10 ** 8
for accIdx in range(10, 12):
account = accounts[accIdx]
for i in range(0, 2):
balanceBefore = interface.IERC20(renbtc).balanceOf(account)
# Test mints
bridge.adapter.mint(
wbtc,
slippage * 10 ** 4,
account.address,
AddressZero, # No vault.
amount,
# Darknode args hash/sig optional since gateway is mocked.
"",
"",
{"from": account},
)
assert interface.IERC20(renbtc).balanceOf(account) > balanceBefore
# NB: User should not receive any wbtc but rather renbtc as part
# of the fallback mechanism.
assert interface.IERC20(wbtc).balanceOf(account) == 0
# Tests swap router and wbtc mint/burn.
def test_bridge_basic():
renbtc = registry.tokens.renbtc
wbtc = registry.tokens.wbtc
badger = connect_badger(badger_config.prod_json)
bridge = connect_bridge(badger, badger_config.prod_json)
swap = connect_swap(badger_config.prod_json)
bridge.add_existing_swap(swap)
_deploy_bridge_mocks(badger, bridge)
router = swap.router
# 3% slippage
slippage = 0.03
amount = 1 * 10 ** 8
# Test estimating slippage from a random account for wbtc <-> renbtc swaps.
_assert_swap_slippage(
router,
renbtc,
wbtc,
amount,
slippage,
)
_assert_swap_slippage(
router,
wbtc,
renbtc,
amount,
slippage,
)
for accIdx in range(10, 12):
account = accounts[accIdx]
for i in range(0, 2):
balanceBefore = interface.IERC20(wbtc).balanceOf(account)
# Test mints
bridge.adapter.mint(
wbtc,
slippage * 10 ** 4,
account.address,
AddressZero, # No vault.
amount,
# Darknode args hash/sig optional since gateway is mocked.
"",
"",
{"from": account},
)
assert interface.IERC20(wbtc).balanceOf(account) > balanceBefore
# Test burns
balance = interface.IERC20(wbtc).balanceOf(account)
interface.IERC20(wbtc).approve(bridge.adapter, balance, {"from": account})
# Approve mock gateway for transfer of underlying token for "mock" burns.
# NB: In the real world, burns don't require approvals as it's
# just an internal update the the user's token balance.
interface.IERC20(renbtc).approve(
bridge.mocks.BTC.gateway,
balance,
{"from": bridge.adapter},
)
bridge.adapter.burn(
wbtc,
AddressZero, # No vault.
slippage * 10 ** 4,
account.address,
balance,
{"from": account},
)
assert interface.IERC20(wbtc).balanceOf(account) == 0
def test_bridge_sweep():
renbtc = registry.tokens.renbtc
wbtc = registry.tokens.wbtc
badger = connect_badger(badger_config.prod_json)
bridge = connect_bridge(badger, badger_config.prod_json)
# Send both renbtc and wbtc to bridge adapter and test sweep.
for (whale, token) in [
(registry.whales.renbtc.whale, interface.IERC20(renbtc)),
(registry.whales.wbtc.whale, interface.IERC20(wbtc)),
]:
token.transfer(
bridge.adapter,
token.balanceOf(whale),
{"from": whale},
)
# Can be called from any account, should always send to governance.
beforeBalance = token.balanceOf(badger.devMultisig)
bridge.adapter.sweep({"from": badger.devMultisig})
assert token.balanceOf(badger.devMultisig) > beforeBalance
def _assert_swap_slippage(router, fromToken, toToken, amountIn, slippage):
# Should be accessible from a random account.
account = accounts[8]
(strategyAddr, amountOut) = router.optimizeSwap.call(
fromToken,
toToken,
amountIn,
{"from": account},
)
assert (1 - (amountOut / amountIn)) < slippage
strategy = interface.ISwapStrategy(strategyAddr)
# Redundant slippage check, but just to be sure.
amountOut = strategy.estimateSwapAmount.call(
fromToken,
toToken,
amountIn,
{"from": account},
)
assert (1 - (amountOut / amountIn)) < slippage
def _deploy_bridge_mocks(badger, bridge):
# NB: Deploy/use mock gateway
bridge.deploy_mocks()
bridge.adapter.setRegistry(
bridge.mocks.registry,
{"from": badger.devMultisig},
)
def _deploy_swap_mocks(badger, bridge, swap, router_fail=False):
swap.deploy_mocks(router_fail=router_fail)
bridge.adapter.setRouter(swap.mocks.router, {"from": badger.devMultisig})
def _upgrade_swap(badger, swap):
badger.deploy_logic("CurveSwapStrategy", CurveSwapStrategy)
logic = badger.logic["CurveSwapStrategy"]
badger.devProxyAdmin.upgrade(
swap.strategies.curve,
logic,
{"from": badger.governanceTimelock},
)
def _upgrade_bridge(badger, bridge):
badger.deploy_logic("BadgerBridgeAdapter", BadgerBridgeAdapter)
logic = badger.logic["BadgerBridgeAdapter"]
badger.devProxyAdmin.upgrade(
bridge.adapter,
logic,
{"from": badger.governanceTimelock},
)
badger.deploy_logic("CurveTokenWrapper", CurveTokenWrapper)
logic = badger.logic["CurveTokenWrapper"]
bridge.adapter.setCurveTokenWrapper(logic, {"from": badger.devMultisig})
| 2.171875 | 2 |
babylon_server/babylon/config.py | ajponte/babylon | 0 | 12938 | import os
class Config:
# Statement for enabling the development environment
DEBUG = True
# Define the application directory
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
# Logging config.
LOG_DIR = "logs"
LOG_TYPE = ["LOG_TYPE", "watched"]
LOG_LEVEL = "DEBUG"
APP_LOG_NAME = "babylon_server.log"
# WWW_LOG_NAME is for log rotation, which is currently not set up.
# Log files sit in the `logs` directory.
WWW_LOG_NAME = "babylon_server.log"
LOG_MAX_BYTES = 100_000_000 # 100MB in bytes
LOG_COPIES = 5
# All the MySql options are under the assumption that the only database at this time is the
# `activity` database.
MYSQL_DATABASE_HOST = "localhost"
MYSQL_DATABASE_NAME = "activity"
MYSQL_DATABASE_PORT = "3308"
MYSQL_DATABASE_USER = "application"
MYSQL_DATABASE_PWD = "<PASSWORD>"
MYSQL_UNIX_SOCKET = "/var/run/mysqld/mysqld.sock"
SQLALCHEMY_DATABASE_URI = f'mysql+pymysql://{MYSQL_DATABASE_USER}:{MYSQL_DATABASE_PWD}@{MYSQL_DATABASE_HOST}:{MYSQL_DATABASE_PORT}/{MYSQL_DATABASE_NAME}?{MYSQL_UNIX_SOCKET}' # noqa
# Pool recycle is recommended for MySQL.
# See https://docs.sqlalchemy.org/en/14/core/pooling.html#setting-pool-recycle
SQLALCHEMY_POOL_RECYCLE = 3600
SQLALCHEMY_BINDS = {
'db2': 'mysql://user:pass@localhost/activity',
'db3': 'mysql://user:pass@localhost/user'
}
| 2.265625 | 2 |
etherscan_py/__init__.py | saltduck/etherscan_py | 6 | 12939 | """Top-level package for etherscan-py."""
__author__ = """<NAME>"""
__email__ = '<EMAIL>'
__version__ = '0.1.0'
| 1 | 1 |
realtime/realtime.py | mikerah13/python_samples | 0 | 12940 | from subprocess import Popen, PIPE
def run_command(command):
process = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE)
while True:
output = process.stdout.readline()
if output == '' and process.poll() is not None:
break
if output:
print output.strip()
rc = process.poll()
return rc
if __name__ == "__main__":
run_command("ping google.com")
| 2.96875 | 3 |
resources/model/agenda.py | diegohideky/climatempoworkshop | 0 | 12941 | from db_connection import db
class Agenda(db.Model):
__tablename__ = "agendas"
id = db.Column(db.Integer, primary_key=True)
date = db.Column(db.Date)
work_start = db.Column(db.Time)
work_end = db.Column(db.Time)
rest_start = db.Column(db.Time)
rest_end = db.Column(db.Time)
user_id = db.Column(db.Integer, db.ForeignKey('usuarios.id'))
user = db.relationship('User')
def __init__(self, date, work_start, work_end, rest_start, rest_end, user_id):
self.date = date
self.work_start = work_start
self.work_end = work_end
self.rest_start = rest_start
self.rest_end = rest_end
self.user_id = user_id
def update(self, date, work_start, work_end, rest_start, rest_end):
self.date = date
self.work_start = work_start
self.work_end = work_end
self.rest_start = rest_start
self.rest_end = rest_end | 2.359375 | 2 |
data/models.py | sarfarazstark/To-Do-Bot | 4 | 12942 | <filename>data/models.py
"""Database models"""
from sqlalchemy import orm
import sqlalchemy
from .db_session import SqlAlchemyBase
# Task database model
class Task(SqlAlchemyBase):
__tablename__ = 'tasks'
id = sqlalchemy.Column(
sqlalchemy.Integer, primary_key=True, autoincrement=True
)
user_id = sqlalchemy.Column(sqlalchemy.Integer)
title = sqlalchemy.Column(sqlalchemy.String)
days_of_the_week = sqlalchemy.Column(sqlalchemy.String)
# User database model
class User(SqlAlchemyBase):
__tablename__ = 'users'
telegram_id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
language_id = sqlalchemy.Column(sqlalchemy.Integer)
| 2.609375 | 3 |
graduated_site/migrations/0029_auto_20191218_2109.py | vbacaksiz/KTU-MEBSIS | 0 | 12943 | <gh_stars>0
# Generated by Django 3.0 on 2019-12-18 21:09
import ckeditor.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('graduated_site', '0028_auto_20191218_2028'),
]
operations = [
migrations.AlterField(
model_name='user_internship_post',
name='content',
field=ckeditor.fields.RichTextField(max_length=2000, null=True, verbose_name='İçerik'),
),
]
| 1.585938 | 2 |
tests/test_obj.py | runapp/M2Crypto | 58 | 12944 | <gh_stars>10-100
#!/usr/bin/env python
"""Unit tests for M2Crypto.m2 obj_* functions.
"""
from M2Crypto import ASN1, BIO, Rand, X509, m2, six
from tests import unittest
"""
These functions must be cleaned up and moved to some python module
Taken from CA managment code
"""
def x509_name2list(name):
for i in range(0, name.entry_count()):
yield X509.X509_Name_Entry(m2.x509_name_get_entry(name._ptr(), i),
_pyfree=0)
def x509_name_entry2tuple(entry):
bio = BIO.MemoryBuffer()
m2.asn1_string_print(bio._ptr(), m2.x509_name_entry_get_data(entry._ptr()))
return (
six.ensure_text(m2.obj_obj2txt(
m2.x509_name_entry_get_object(entry._ptr()), 0)),
six.ensure_text(bio.getvalue()))
def tuple2x509_name_entry(tup):
obj, data = tup
# TODO This is evil, isn't it? Shouldn't we use only official API?
# Something like X509.X509_Name.add_entry_by_txt()
_x509_ne = m2.x509_name_entry_create_by_txt(None, six.ensure_str(obj),
ASN1.MBSTRING_ASC,
six.ensure_str(data), len(data))
if not _x509_ne:
raise ValueError("Invalid object indentifier: %s" % obj)
return X509.X509_Name_Entry(_x509_ne, _pyfree=1) # Prevent memory leaks
class ObjectsTestCase(unittest.TestCase):
def callback(self, *args):
pass
def test_obj2txt(self):
self.assertEqual(m2.obj_obj2txt(m2.obj_txt2obj("commonName", 0), 1),
b"2.5.4.3", b"2.5.4.3")
self.assertEqual(m2.obj_obj2txt(m2.obj_txt2obj("commonName", 0), 0),
b"commonName", b"commonName")
def test_nid(self):
self.assertEqual(m2.obj_ln2nid("commonName"),
m2.obj_txt2nid("2.5.4.3"),
"ln2nid and txt2nid mismatch")
self.assertEqual(m2.obj_ln2nid("CN"),
0, "ln2nid on sn")
self.assertEqual(m2.obj_sn2nid("CN"),
m2.obj_ln2nid("commonName"),
"ln2nid and sn2nid mismatch")
self.assertEqual(m2.obj_sn2nid("CN"),
m2.obj_obj2nid(m2.obj_txt2obj("CN", 0)), "obj2nid")
self.assertEqual(m2.obj_txt2nid("__unknown"),
0, "__unknown")
def test_tuple2tuple(self):
tup = ("CN", "someCommonName")
tup1 = x509_name_entry2tuple(tuple2x509_name_entry(tup))
# tup1[0] is 'commonName', not 'CN'
self.assertEqual(tup1[1], tup[1], tup1)
self.assertEqual(x509_name_entry2tuple(tuple2x509_name_entry(tup1)),
tup1, tup1)
def test_unknown(self):
with self.assertRaises(ValueError):
tuple2x509_name_entry(("__unknown", "_"))
def test_x509_name(self):
n = X509.X509_Name()
# It seems this actually needs to be a real 2 letter country code
n.C = b'US'
n.SP = b'State or Province'
n.L = b'locality name'
n.O = b'orhanization name'
n.OU = b'org unit'
n.CN = b'common name'
n.Email = b'<EMAIL>'
n.serialNumber = b'1234'
n.SN = b'surname'
n.GN = b'given name'
n.givenName = b'name given'
self.assertEqual(len(n), 11, len(n))
# Thierry: this call to list seems extraneous...
tl = [x509_name_entry2tuple(x) for x in x509_name2list(n)]
self.assertEqual(len(tl), len(n), len(tl))
x509_n = m2.x509_name_new()
for o in [tuple2x509_name_entry(x) for x in tl]:
m2.x509_name_add_entry(x509_n, o._ptr(), -1, 0)
o._pyfree = 0 # Take care of underlying object
n1 = X509.X509_Name(x509_n)
self.assertEqual(n.as_text(), n1.as_text(), n1.as_text())
# Detailed OpenSSL error message is visible in Python error message:
def test_detailed_error_message(self):
from M2Crypto import SMIME, X509
s = SMIME.SMIME()
x509 = X509.load_cert('tests/recipient.pem')
sk = X509.X509_Stack()
sk.push(x509)
s.set_x509_stack(sk)
st = X509.X509_Store()
st.load_info('tests/recipient.pem')
s.set_x509_store(st)
p7, data = SMIME.smime_load_pkcs7('tests/sample-p7.pem')
self.assertIsInstance(p7, SMIME.PKCS7, p7)
try:
s.verify(p7, data)
except SMIME.PKCS7_Error as e:
six.assertRegex(self, str(e),
"unable to get local issuer certificate",
"Not received expected error message")
def suite():
t_suite = unittest.TestSuite()
t_suite.addTest(unittest.TestLoader().loadTestsFromTestCase(ObjectsTestCase))
return t_suite
if __name__ == '__main__':
Rand.load_file('randpool.dat', -1)
unittest.TextTestRunner().run(suite())
Rand.save_file('randpool.dat')
| 2.5 | 2 |
new_scraper.py | Baw25/HomeSavvy | 0 | 12945 | <reponame>Baw25/HomeSavvy
#!/bin/python
# -*- coding: utf-8 -*-
# Droplet Name: ubuntu-512mb-sfo2-01
# IP Address: 192.168.3.11
# Username: root
# Password: <PASSWORD>
# New Password: <PASSWORD>
# https://medium.com/@hoppy/how-to-test-or-scrape-javascript-rendered-websites-with-python-selenium-a-beginner-step-by-c137892216aa
from time import sleep
from random import randint
from selenium import webdriver
from pyvirtualdisplay import Display
class RealTassaSpider():
def __init__(self):
self.url_to_crawl = "https://app.realtaasa.com/homes"
self.url_login = "http://app.realtaasa.com/signin"
self.all_items = []
# Open headless chromedriver
def start_driver(self):
print 'starting driver...'
self.display = Display(visible=0, size=(800, 600))
self.display.start()
self.driver = webdriver.Chrome()
sleep(4)
# Close chromedriver
def close_driver(self):
print 'closing driver...'
self.display.stop()
self.driver.quit()
print 'closed!'
# Tell the browser to get a page
def get_page(self, url):
print 'getting page...'
self.driver.get(url)
sleep(randint(2,3))
# <button type="submit" class="input mbs button--primary">Continue</button>
# Getting past login
def login(self):
print 'getting pass the gate page...'
try:
form = self.driver.find_element_by_xpath('//*[@id="signInForm"]')
form.find_element_by_xpath('.//*[@id="email"]').send_keys('<EMAIL>')
form.find_element_by_xpath('.//*[@id="password"]').send_keys('<PASSWORD>')
form.find_element_by_xpath('.//*[@class="input.mbs.button--primary"]').click()
sleep(randint(3,5))
except Exception:
pass
def get_login_then_homes(self,url):
print 'logging in...'
self.driver.get(url_login)
print 'getting pass the gate page...'
try:
form = self.driver.find_element_by_xpath('//*[@id="signInForm"]')
form.find_element_by_xpath('.//*[@id="email"]').send_keys('<EMAIL>')
form.find_element_by_xpath('.//*[@id="password"]').send_keys('<PASSWORD>')
form.find_element_by_xpath('.//*[@class="input.mbs.button--primary"]').click()
sleep(randint(3,5))
except Exception:
pass
home_button = self.driver.find_element_by_xpath('//*[@id="nav-homes"]')
home_button.click()
# <div class="desk--ten-twelfths push--desk--one-twelfth">
url for mlax
address 1
address 2
Neighborhood 1
Building type
BedBath
Price
Coowners
Monthly cost
Tax savings
Down payment
Description
div#content --> main content area for all content
div.grid__item one-whole > span.grid__item > a#more-photos
div.one-whole > div.one-whole > div.prop-info > div.grid_item
h1.alpha --> address
div.beta --> address
div.beta --> neighborhood
div.grid__item.desk--one-third.lap--one-third.one-whole.pln --> select all
div.delta.mbn.tc-cove.fw-500 --> select all
div.delta.mbs --> select all
div.grid__item.one-whole.pln --> select all
def grab_a_tags(self):
print 'grabbing list of items...'
for a in self.driver.find_elements_by_xpath('//*[@class="desk--ten-twelfths push--desk--one-twelfth"]//a'):
data = self.process_elements(a)
if data:
self.all_items.append(data)
else:
pass
def process_elements(self, a):
url = ''
address_1 = ''
address_2 = ''
prd_price = ''
neighborhood = ''
building_type = ''
bedbath =''
price = ''
coowners = ''
monthly_cost = ''
tax_savings = ''
down_payment = ''
description = ''
try:
url = a.find_element_by_xpath('.//*[@id="more-photos"]').get_attribute('href')
address_1 = a.find_element_by_xpath('.//*[@class="alpha mbn fw-500"]').text
address_2 = a.find_element_by_xpath('.//*[@class="beta fw-300]').text
prd_price = a.find_element_by_xpath('.//*[@class="price ng-scope ng-binding"]').text
except Exception:
pass
if prd_image and prd_title and prd_price:
single_item_info = {
'image': prd_image.encode('UTF-8'),
'title': prd_title.encode('UTF-8'),
'price': prd_price.encode('UTF-8')
}
return single_item_info
else:
return False
def parse(self):
self.start_driver()
self.get_page(self.url_login)
self.login()
self.grab_a_tags()
self.close_driver()
if self.all_items:
return self.all_items
else:
return False
# Run spider
RealTassa = RealTassaSpider()
items_list = RealTassa.parse()
# Do something with the data touched
for item in items_list:
print item
| 3.125 | 3 |
gdsfactory/simulation/gmeep/__init__.py | gdsfactory/gdsfactory | 42 | 12946 | <reponame>gdsfactory/gdsfactory
from gdsfactory.simulation.gmeep.add_monitors import add_monitors
from gdsfactory.simulation.gmeep.get_simulation import get_simulation
from gdsfactory.simulation.gmeep.get_transmission_2ports import (
get_transmission_2ports,
plot2D,
plot3D,
)
from gdsfactory.simulation.gmeep.plot_xsection import plot_xsection
__all__ = [
"add_monitors",
"get_simulation",
"get_sparameters1x2",
"get_transmission_2ports",
"plot2D",
"plot3D",
"plot_xsection",
"plot_eigenmode",
]
__version__ = "0.0.2"
| 1.234375 | 1 |
coreos-ostree-importer/coreos_ostree_importer.py | dustymabe/fedora-coreos-releng-automation | 0 | 12947 | <filename>coreos-ostree-importer/coreos_ostree_importer.py<gh_stars>0
#!/usr/bin/python3
import boto3
import botocore
import fedora_messaging
import fedora_messaging.api
import hashlib
import json
import logging
import os
import subprocess
import sys
import tarfile
import tempfile
import traceback
# Set local logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
FEDORA_MESSAGING_TOPIC_LISTEN = (
"org.fedoraproject.prod.coreos.build.request.ostree-import"
)
FEDORA_MESSAGING_TOPIC_RESPOND = FEDORA_MESSAGING_TOPIC_LISTEN + ".finished"
# We are processing the org.fedoraproject.prod.coreos.build.request.ostree-import topic
# https://apps.fedoraproject.org/datagrepper/raw?topic=org.fedoraproject.prod.coreos.build.request.ostree-import&delta=100000
# The schema was originally designed in:
# https://github.com/coreos/fedora-coreos-tracker/issues/198#issuecomment-513944390
EXAMPLE_MESSAGE_BODY = json.loads("""
{
"build_id": "30.20190905.0",
"stream": "testing",
"basearch": "x86_64",
"commit": "s3://fcos-builds/prod/streams/testing/builds/30.20190905.0/x86_64/ostree-commit.tar",
"checksum": "sha256:d01db6939e7387afa2492ac8e2591c53697fc21cf16785585f7f1ac0de692863",
"ostree_ref": "fedora/x86_64/coreos/testing",
"ostree_checksum": "b4beca154dab3696fd04f32ddab818102caa9247ec3192403adb9aaecc991bd9",
"target_repo": "prod"
}
"""
)
KNOWN_OSTREE_REPOS = {
"prod": "/mnt/koji/ostree/repo",
"compose": "/mnt/koji/compose/ostree/repo",
}
# Given a repo (and thus an input JSON) analyze existing koji tag set
# and tag in any missing packages
class Consumer(object):
def __init__(self):
# Check the possible repos to make sure they exist
for path in KNOWN_OSTREE_REPOS.values():
if not ostree_repo_exists(path):
raise Exception(f"OSTree repo does not exist at {path}")
logger.info(
"Processing messages with topic: %s" % FEDORA_MESSAGING_TOPIC_LISTEN
)
def __call__(self, message: fedora_messaging.api.Message):
# Catch any exceptions and don't raise them further because
# it will cause /usr/bin/fedora-messaging to crash and we'll
# lose the traceback logs from the container
try:
self.process(message)
logger.info("Sending SUCCESS message")
send_message(msg=message.body, status="SUCCESS")
except Exception as e:
logger.error("Caught Exception!")
logger.error("###################################")
traceback.print_exc()
logger.error("###################################")
logger.error("Replying with a FAILURE message...")
send_message(msg=message.body, status="FAILURE")
logger.error("\t continuing...")
pass
def process(self, message: fedora_messaging.api.Message):
logger.debug(message.topic)
logger.debug(message.body)
# Grab the raw message body and parse out pieces
msg = message.body
basearch = msg["basearch"]
build_id = msg["build_id"]
checksum = msg["checksum"]
commit_url = msg["commit"]
ostree_checksum = msg["ostree_checksum"]
ostree_ref = msg["ostree_ref"]
stream = msg["stream"]
target_repo = msg["target_repo"]
# Qualify arguments
if not checksum.startswith("sha256:"):
raise Exception("checksum value must start with sha256:")
if target_repo not in KNOWN_OSTREE_REPOS.keys():
raise Exception(f"Provided target repo is unknown: {target_repo}")
sha256sum = checksum[7:]
target_repo_path = KNOWN_OSTREE_REPOS[target_repo]
source_repo_path = None
# Detect if the commit already exists in the target repo
# NOTE: We assume here that an import won't be requested twice for
# the same commit (i.e. someone adds detached metadata and
# then does a second import request).
if ostree_commit_exists(target_repo_path, ostree_checksum):
logger.info(
f"Commit {ostree_checksum} already exists in the target repo. "
"Skipping import"
)
return
# Import the OSTree commit to the specified repo. We'll use
# a temporary directory to untar the repo into.
with tempfile.TemporaryDirectory() as tmpdir:
# If the target repo is the prod repo the commit could
# already have been imported into the compose repo. If it
# is already in the compose repo then let's just pull-local
# from there to save downloading all from the net again.
if target_repo == "prod" and ostree_commit_exists(
repo=KNOWN_OSTREE_REPOS["compose"], commit=ostree_checksum
):
logger.info("Commit exists in compose repo. Importing from there")
source_repo_path = KNOWN_OSTREE_REPOS["compose"]
else:
# Grab the file from s3 and then pull local
untar_file_from_s3(url=commit_url, tmpdir=tmpdir, sha256sum=sha256sum)
source_repo_path = tmpdir
# one more sanity check: make sure buildid == version
assert_commit_has_version(
repo=source_repo_path, commit=ostree_checksum, version=build_id
)
# Import the commit into the target repo
ostree_pull_local(
commit=ostree_checksum,
dstrepo=target_repo_path,
srcrepo=source_repo_path,
branch=ostree_ref,
)
def runcmd(cmd: list, **kwargs: int) -> subprocess.CompletedProcess:
try:
# default args to pass to subprocess.run
pargs = {"check": True, "capture_output": True}
logger.debug(f"Running command: {cmd}")
pargs.update(kwargs)
cp = subprocess.run(cmd, **pargs)
except subprocess.CalledProcessError as e:
logger.error("Command returned bad exitcode")
logger.error(f"COMMAND: {cmd}")
logger.error(f" STDOUT: {e.stdout.decode()}")
logger.error(f" STDERR: {e.stderr.decode()}")
raise e
return cp # subprocess.CompletedProcess
def send_message(msg: dict, status: str):
# Send back a message with all the original message body
# along with an additional `status:` header with either
# `SUCCESS` or `FAILURE`.
fedora_messaging.api.publish(
fedora_messaging.message.Message(
topic=FEDORA_MESSAGING_TOPIC_RESPOND, body={"status": status, **msg}
)
)
# https://stackoverflow.com/a/55542529
def get_sha256sum(filepath: str) -> str:
h = hashlib.sha256()
with open(filepath, "rb") as file:
while True:
# Reading is buffered, so we can read smaller chunks.
chunk = file.read(h.block_size)
if not chunk:
break
h.update(chunk)
return h.hexdigest()
def parse_s3_url(url: str) -> tuple:
if not url.startswith("s3://"):
raise Exception(f"Unable to parse the s3 url: {url}")
# Chop off s3:// and break into bucket / key
bucket, key = url[5:].split("/", 1)
return (bucket, key)
def untar_file_from_s3(url: str, tmpdir: str, sha256sum: str):
filename = "ostree.tar"
filepath = os.path.join(tmpdir, filename)
# Grab file from s3
logger.info(f"Downloading object from s3: {url}")
s3 = boto3.client("s3")
bucket, key = parse_s3_url(url)
s3.download_file(bucket, key, filepath)
# Verify file has correct checksum
calcuatedsum = get_sha256sum(filepath)
if sha256sum != calcuatedsum:
raise Exception("Checksums do not match: " f"{sha256sum} != {calcuatedsum}")
# Untar the file into the temporary directory
with tarfile.open(filepath) as tar:
tar.extractall(path=tmpdir)
def ostree_pull_local(srcrepo: str, dstrepo: str, branch: str, commit: str):
# verify the parent commit of the new commit is in the destination repo
# and also that the current branch in the repo points to it
branch_exists = ostree_branch_exists(repo=dstrepo, branch=branch)
parent = ostree_get_parent_commit(repo=srcrepo, commit=commit)
if branch_exists:
assert_branch_points_to_commit(repo=dstrepo, branch=branch, commit=parent)
# pull content
logger.info("Running ostree pull-local to perform import")
cmd = ["ostree", f"--repo={dstrepo}", "pull-local", srcrepo, commit]
runcmd(cmd)
# update branch
if branch_exists:
cmd = ["ostree", f"--repo={dstrepo}", "reset", branch, commit]
else:
cmd = ["ostree", f"--repo={dstrepo}", "refs", f"--create={branch}", commit]
logger.info(f"Updating branch {branch} -> {commit} in {dstrepo}")
runcmd(cmd)
# update summary file
logger.info("Updating summary file")
cmd = ["ostree", f"--repo={dstrepo}", "summary", "-u"]
runcmd(cmd)
def ostree_repo_exists(repo: str) -> bool:
if not os.path.exists(repo):
return False
cmd = ["ostree", f"--repo={repo}", "refs"]
if runcmd(cmd, check=False).returncode != 0:
logger.debug(f"OSTree repo does not exist at {repo}")
return False
return True
def ostree_commit_exists(repo: str, commit: str) -> bool:
cmd = ["ostree", f"--repo={repo}", "show", commit]
return runcmd(cmd, check=False).returncode == 0
def ostree_branch_exists(repo: str, branch: str) -> bool:
cmd = ["ostree", f"--repo={repo}", "rev-parse", branch]
return runcmd(cmd, check=False).returncode == 0
def ostree_get_parent_commit(repo: str, commit: str) -> str:
cmd = ["ostree", f"--repo={repo}", "rev-parse", f"{commit}^"]
return runcmd(cmd, check=True).stdout.strip().decode()
def assert_branch_points_to_commit(repo: str, branch: str, commit: str):
cmd = ["ostree", f"--repo={repo}", "rev-parse", branch]
cp = runcmd(cmd, check=True)
detected = cp.stdout.strip().decode()
logger.debug(f"{branch} points to {detected}")
if commit != detected:
raise Exception(f"{branch} points to {detected}. Expected {commit}")
def assert_commit_has_version(repo: str, commit: str, version: str):
cmd = ["ostree", f"--repo={repo}", "show", commit, "--print-metadata-key=version"]
cp = runcmd(cmd, check=True)
embeddedversion = cp.stdout.replace(b"'", b"").strip().decode()
if version != embeddedversion:
raise Exception(
"Embedded commit version does not match buildid "
f"{version} != {embeddedversion}"
)
# The code in this file is expected to be run through fedora messaging
# However, you can run the script directly for testing purposes. The
# below code allows us to do that and also fake feeding data to the
# call by updating the json text below.
if __name__ == "__main__":
sh = logging.StreamHandler()
sh.setFormatter(
logging.Formatter("%(asctime)s %(levelname)s %(name)s - %(message)s")
)
logger.addHandler(sh)
m = fedora_messaging.api.Message(
topic="org.fedoraproject.prod.coreos.build.request.ostree-import",
body=EXAMPLE_MESSAGE_BODY,
)
c = Consumer()
c.__call__(m)
| 1.609375 | 2 |
deepscm/datasets/medical/ukbb.py | mobarakol/deepscm | 0 | 12948 | <gh_stars>0
from torch.utils.data.dataset import Dataset
import numpy as np
import pandas as pd
import os
import nibabel as nib
from nilearn.image import resample_img
import torch
class UKBBDataset(Dataset):
def __init__(self, csv_path, base_path='/vol/biobank/12579/brain/rigid_to_mni/images', crop_type=None, crop_size=(64, 64, 64), downsample: float = 2.5):#(64, 64, 64)
super().__init__()
self.csv_path = csv_path
df = pd.read_csv(csv_path)
self.num_items = len(df)
self.metrics = {col: torch.as_tensor(df[col]).float() for col in df.columns}
self.base_path = base_path
self.filename = 'T1_unbiased_brain_rigid_to_mni.nii.gz'
self.crop_size = np.array(crop_size)
self.downsample = downsample
def __len__(self):
return self.num_items
def __getitem__(self, index):
item = {col: values[index] for col, values in self.metrics.items()}
mri_path = os.path.join(self.base_path,str(int(item['eid'])),self.filename)
try:
img = nib.load(mri_path)#.get_data()
except:
index += 1
item = {col: values[index] for col, values in self.metrics.items()}
mri_path = os.path.join(self.base_path,str(int(item['eid'])),self.filename)
img = nib.load(mri_path)#.get_data()
downsampled_nii = resample_img(img, target_affine=np.eye(3)*self.downsample, interpolation='linear')
img = downsampled_nii.dataobj
init_pos = np.round(np.array(img.shape)/2-self.crop_size/2).astype(int)
end_pos = init_pos+self.crop_size
min_ = np.min(img)
max_ = np.max(img)
img = (img - min_) / (max_ - min_)
item['image'] = np.expand_dims(img[init_pos[0]:end_pos[0],init_pos[1]:end_pos[1],init_pos[2]:end_pos[2]], axis=0)
return item
| 2.265625 | 2 |
external/mmdetection/detection_tasks/extension/utils/pipelines.py | bes-dev/training_extensions | 44 | 12949 | # Copyright (C) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
import copy
import numpy as np
from mmdet.datasets.builder import PIPELINES
from ..datasets import get_annotation_mmdet_format
@PIPELINES.register_module()
class LoadImageFromOTEDataset:
"""
Pipeline element that loads an image from a OTE Dataset on the fly. Can do conversion to float 32 if needed.
Expected entries in the 'results' dict that should be passed to this pipeline element are:
results['dataset_item']: dataset_item from which to load the image
results['dataset_id']: id of the dataset to which the item belongs
results['index']: index of the item in the dataset
:param to_float32: optional bool, True to convert images to fp32. defaults to False
"""
def __init__(self, to_float32: bool = False):
self.to_float32 = to_float32
def __call__(self, results):
dataset_item = results['dataset_item']
img = dataset_item.numpy
shape = img.shape
assert img.shape[0] == results['height'], f"{img.shape[0]} != {results['height']}"
assert img.shape[1] == results['width'], f"{img.shape[1]} != {results['width']}"
filename = f"Dataset item index {results['index']}"
results['filename'] = filename
results['ori_filename'] = filename
results['img'] = img
results['img_shape'] = shape
results['ori_shape'] = shape
# Set initial values for default meta_keys
results['pad_shape'] = shape
num_channels = 1 if len(shape) < 3 else shape[2]
results['img_norm_cfg'] = dict(
mean=np.zeros(num_channels, dtype=np.float32),
std=np.ones(num_channels, dtype=np.float32),
to_rgb=False)
results['img_fields'] = ['img']
if self.to_float32:
results['img'] = results['img'].astype(np.float32)
return results
@PIPELINES.register_module()
class LoadAnnotationFromOTEDataset:
"""
Pipeline element that loads an annotation from a OTE Dataset on the fly.
Expected entries in the 'results' dict that should be passed to this pipeline element are:
results['dataset_item']: dataset_item from which to load the annotation
results['ann_info']['label_list']: list of all labels in the project
"""
def __init__(self, min_size : int, with_bbox: bool = True, with_label: bool = True, with_mask: bool = False, with_seg: bool = False,
poly2mask: bool = True, with_text: bool = False, domain=None):
self.with_bbox = with_bbox
self.with_label = with_label
self.with_mask = with_mask
self.with_seg = with_seg
self.poly2mask = poly2mask
self.with_text = with_text
self.domain = domain
self.min_size = min_size
@staticmethod
def _load_bboxes(results, ann_info):
results['bbox_fields'].append('gt_bboxes')
results['gt_bboxes'] = copy.deepcopy(ann_info['bboxes'])
return results
@staticmethod
def _load_labels(results, ann_info):
results['gt_labels'] = copy.deepcopy(ann_info['labels'])
return results
@staticmethod
def _load_masks(results, ann_info):
results['mask_fields'].append('gt_masks')
results['gt_masks'] = copy.deepcopy(ann_info['masks'])
return results
def __call__(self, results):
dataset_item = results['dataset_item']
label_list = results['ann_info']['label_list']
ann_info = get_annotation_mmdet_format(dataset_item, label_list, self.domain, self.min_size)
if self.with_bbox:
results = self._load_bboxes(results, ann_info)
if results is None or len(results['gt_bboxes']) == 0:
return None
if self.with_label:
results = self._load_labels(results, ann_info)
if self.with_mask:
results = self._load_masks(results, ann_info)
return results
| 2.453125 | 2 |
Leetcode/Competition/180_1.py | ZR-Huang/AlgorithmPractices | 1 | 12950 | <gh_stars>1-10
from typing import List
class Solution:
def luckyNumbers (self, matrix: List[List[int]]) -> List[int]:
m = len(matrix)
n = len(matrix[0])
ans = []
for i in range(m):
row_min_index = 0
row_min = 10**5+1
for j in range(n):
if matrix[i][j] < row_min:
row_min = matrix[i][j]
row_min_index = j
is_max = True
for k in range(m):
if matrix[k][row_min_index] > row_min:
is_max = False
break
if is_max:
ans.append(row_min)
return ans
print(Solution().luckyNumbers([[3,7,8],[9,11,13],[15,16,17]]))
print(Solution().luckyNumbers([[1,10,4,2],[9,3,8,7],[15,16,17,12]]))
print(Solution().luckyNumbers([[7,8],[1,2]])) | 3.1875 | 3 |
GUITests/UC10.sikuli/UC10.py | gabrielganzer/EZGas | 0 | 12951 | <reponame>gabrielganzer/EZGas
# UC10 - Evaluate price
#
# User U exists and has valid account
# We create two Users, User1_UC10, User2_UC10 and one new gasStation GasStationUC10
#
# Registered on a 1920x1080p, Google Chrome 100% zoom
### SETUP
#User1
click("1590678880209.png")
click("1590678953637.png")
wait(2)
type("1590829373120.png", "User1_UC10" + Key.TAB + "<EMAIL>" + Key.TAB + "user1")
click("1590679157604.png")
click("1590788841790.png")
wait(2)
# User2
click("1590678880209.png")
wait(2)
click("1590678953637.png")
wait(2)
type("1590829373120.png", "User2_UC10" + Key.TAB + "<EMAIL>" + Key.TAB + "user2")
click("1590679157604.png")
click("1590788841790.png")
# Admin creates a new GasStation
click("1590678880209-1.png")
wait(3)
type("1590829943940.png", "<EMAIL>" + Key.TAB + "admin" )
click("1590784293656.png")
wait(2)
click("1590784369122.png")
wait(2)
wheel(WHEEL_DOWN, 6)
wait(2)
type("1590830169812.png", "GasStation_UC10" + Key.TAB + "Torino, corso duca")
wait( "add_UC10.png" , 20)
type(Key.DOWN + Key.ENTER)
type("1590830389386.png", Key.DOWN + Key.DOWN + Key.ENTER)
click("1590830256446.png")
click("1590830265272.png")
wait(2)
click("1590785166092.png")
wait(3)
type(Key.HOME)
click("1590788397797.png")
wait(2)
click("1590828906996.png")
wait(2)
click("1590788458524.png")
# User1 searches the gasStation
click("1590678880209.png")
wait(3)
type("1590829943940.png", "<EMAIL>" + Key.TAB + "user1" )
click("1590784293656.png")
wait(2)
wheel(WHEEL_DOWN, 6)
type("1590931278631.png" , "<NAME>" )
wait( "add_UC10.png" , 20)
type(Key.DOWN + Key.ENTER)
wait(2)
click("1590922172004.png")
wait(2)
wheel(WHEEL_DOWN, 4)
wait(2)
click(Pattern("1590922374562.png").targetOffset(543,-4))
wheel(WHEEL_DOWN, 4)
wait(2)
click(Pattern("1590930530512.png").targetOffset(73,1))
type("1.5")
click(Pattern("1590930568512.png").targetOffset(73,0))
type("1.4")
click("1590834482526.png")
wait(3)
type(Key.HOME)
wait(3)
click("1590788458524.png")
# User2 login and evaluate prices
wait(2)
click("1590678880209.png")
wait(3)
type("1590829943940.png", "<EMAIL>" + Key.TAB + "user2" )
click("1590784293656.png")
wait(2)
wheel(WHEEL_DOWN, 4)
wait(2)
type("1590918242822-1.png" , "Torino, cor<NAME>" )
wait( "add_UC10.png" , 20)
type(Key.DOWN + Key.ENTER)
wait(2)
click("1590918499196.png")
wheel(WHEEL_DOWN, 3)
click(Pattern("1591638408351.png").targetOffset(1068,-3))
# User2 clicks on the green button if the price is correct, otherwise clicks on the red button
# If User clicks the green button, the User1 trustlevel increases +1, otherwise it decreases -1
#
wait(3)
type(Key.HOME)
click("1590788458524.png")
wait(2)
# Admin deletes users and gasStation
click("1590678880209-1.png")
wait(3)
type("1590829943940.png", "<EMAIL>@<EMAIL>" + Key.TAB + "admin" )
click("1590784293656.png")
wait(2)
click("1590784369122.png")
wait(2)
wheel(WHEEL_DOWN, 10)
wait(2)
click(Pattern("1590931822851.png").targetOffset(905,-27))
wait(2)
wheel(WHEEL_UP, 15)
wait(2)
click(Pattern("1590931876805.png").targetOffset(560,-4))
wait(2)
click(Pattern("1590931914901.png").targetOffset(556,-10))
wait(2)
click("1590788397797.png")
wait(2)
click("1590828906996.png")
wait(2)
click("1590788458524.png")
wait(2)
| 1.71875 | 2 |
basic/migrations/0003_entrypoint_entry_function.py | kgdunn/django-peer-review-system | 0 | 12952 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-27 16:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('basic', '0002_auto_20170727_1741'),
]
operations = [
migrations.AddField(
model_name='entrypoint',
name='entry_function',
field=models.CharField(default='', help_text='Django function, with syntax: "app_name.function_name"', max_length=100),
),
]
| 1.734375 | 2 |
test_net_with_srgan.py | jasonlai777/Faster-R-CNN | 0 | 12953 | <reponame>jasonlai777/Faster-R-CNN<gh_stars>0
# --------------------------------------------------------
# Pytorch Multi-GPU Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>, <NAME>, based on code from Ross Girshick
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import os
import sys
import numpy as np
import argparse
import pprint
import pdb
import time
import cv2
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import pickle
from roi_data_layer.roidb import combined_roidb
from roi_data_layer.roibatchLoader import roibatchLoader
from model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from model.rpn.bbox_transform import clip_boxes
# from model.nms.nms_wrapper import nms
from model.roi_layers import nms
from model.rpn.bbox_transform import bbox_transform_inv
from model.utils.net_utils import save_net, load_net, vis_detections
from model.faster_rcnn.vgg16 import vgg16
from model.faster_rcnn.resnet import resnet
from PIL import Image
from torchvision.utils import save_image
import cv2
from torch.utils.data import DataLoader
from srgan_datasets import *
from srgan import *
import torch.nn.functional as F
from datasets.voc_eval import parse_rec
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
classes = ('__background__', # always index 0
'A.bes(H)','A.bes(T)','A.bes','A.bic(H)','A.bic(T)','A.bic',
'A.fuj(H)','A.fuj(T)','A.fuj','B.xyl(H)','B.xyl(T)','B.xyl',
'C.ele(H)','C.ele(T)','C.ele','M.ent(H)','M.ent(T)','M.ent',
'M.gra(H)','M.gra(T)','M.gra','M.inc(H)','M.inc(T)','M.inc',
'P.cof(H)','P.cof(T)','P.cof','P.vul(H)','P.vul(T)','P.vul',
'P.spe(H)','P.spe(T)','P.spe','H.sp(H)','H.sp(T)','H.sp',
'M.ams(H)' ,'M.ams(T)','M.ams'
)###################
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_argument('--dataset', dest='dataset',
help='training dataset',
default='pascal_voc', type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default='cfgs/res101.yml', type=str)
parser.add_argument('--net', dest='net',
help='vgg16, res50, res101, res152',
default='res101', type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--load_dir', dest='load_dir',
help='directory to load models', default="models",
type=str)
parser.add_argument('--cuda', dest='cuda',
help='whether use CUDA',
action='store_true')
parser.add_argument('--ls', dest='large_scale',
help='whether use large imag scale',
action='store_true')
parser.add_argument('--mGPUs', dest='mGPUs',
help='whether use multiple GPUs',
action='store_true')
parser.add_argument('--cag', dest='class_agnostic',
help='whether perform class_agnostic bbox regression',
action='store_true')
parser.add_argument('--parallel_type', dest='parallel_type',
help='which part of model to parallel, 0: all, 1: model before roi pooling',
default=0, type=int)
parser.add_argument('--checksession', dest='checksession',
help='checksession to load model',
default=1, type=int)
parser.add_argument('--checkepoch', dest='checkepoch',
help='checkepoch to load network',
default=1, type=int)
parser.add_argument('--checkpoint', dest='checkpoint',
help='checkpoint to load network',
default=10021, type=int)
parser.add_argument('--vis', dest='vis',
help='visualization mode',
action='store_true')
args = parser.parse_args()
return args
def parse_args_for_srgan():
os.makedirs("srgan/images", exist_ok=True)
os.makedirs("srgan/saved_models", exist_ok=True)
parser = argparse.ArgumentParser()
parser.add_argument("--epoch", type=int, default=500 , help="epoch to start training from")
parser.add_argument("--n_epochs", type=int, default=501, help="number of epochs of training")
parser.add_argument("--dataset_name", type=str, default="img_align_celeba", help="name of the dataset")
parser.add_argument("--batch_size", type=int, default=1, help="size of the batches")
parser.add_argument("--lr", type=float, default=0.00001, help="adam: learning rate")
parser.add_argument("--b1", type=float, default=0.5, help="adam: decay of first order momentum of gradient")
parser.add_argument("--b2", type=float, default=0.999, help="adam: decay of second order momentum of gradient")
parser.add_argument("--decay_epoch", type=int, default=100, help="epoch from which to start lr decay")
#parser.add_argument("--n_cpu", type=int, default=8, help="number of cpu threads to use during batch generation")
parser.add_argument("--hr_height", type=int, default=1024, help="high res. image height")
parser.add_argument("--hr_width", type=int, default=1024, help="high res. image width")
parser.add_argument("--channels", type=int, default=3, help="number of image channels")
parser.add_argument("--sample_interval", type=int, default=50, help="interval between saving image samples")
parser.add_argument("--checkpoint_interval", type=int, default=100, help="interval between model checkpoints")
opt = parser.parse_args([])
return opt
lr = cfg.TRAIN.LEARNING_RATE
momentum = cfg.TRAIN.MOMENTUM
weight_decay = cfg.TRAIN.WEIGHT_DECAY
def load_gt_box(annopath,
imagesetfile,
classname,
cachedir):
if not os.path.isdir(cachedir):
os.mkdir(cachedir)
cachefile = os.path.join(cachedir, '%s_annots.pkl' % imagesetfile)
# read list of images
with open(imagesetfile, 'r') as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
if not os.path.isfile(cachefile):
# load annotations
recs = {}
for i, imagename in enumerate(imagenames):
recs[imagename] = parse_rec(annopath.format(imagename))
if i % 100 == 0:
print('Reading annotation for {:d}/{:d}'.format(
i + 1, len(imagenames)))
# save
print('Saving cached annotations to {:s}'.format(cachefile))
with open(cachefile, 'wb') as f:
pickle.dump(recs, f)
else:
# load
with open(cachefile, 'rb') as f:
try:
recs = pickle.load(f)
except:
recs = pickle.load(f, encoding='bytes')
#print(recs)
# extract gt objects for this class
class_recs = {}
npos = 0
for imagename in imagenames:
R = [obj for obj in recs[imagename] if obj['name'] == classname]
bbox = np.array([x['bbox'] for x in R])
difficult = np.array([x['difficult'] for x in R]).astype(np.bool)
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[imagename] = {'bbox': bbox,
'difficult': difficult,
'det': det}
#print(class_recs)
#print(len(class_recs))
return class_recs
def iou(bb1, bb2):#########################
""" check if overlap"""
#assert bb1[0] < bb1[2]
#assert bb1[1] < bb1[3]
#assert bb2[0] < bb2[2]
#assert bb2[1] < bb2[3]
# determine the coordinates of the intersection rectangle
#print(bb1[0], bb2[0])
x_left = max(bb1[0], bb2[0])
y_top = max(bb1[1], bb2[1])
x_right = min(bb1[2], bb2[2])
y_bottom = min(bb1[3], bb2[3])
iw = x_right - x_left
ih = y_bottom - y_top
inters = iw * ih
# union
uni = ((bb1[2]-bb1[0])*(bb1[3]-bb1[1]) + (bb2[2]-bb2[0])*(bb2[3]-bb2[1]) - inters)
overlaps = inters / uni
return overlaps
def Area(vertex):
width = vertex[2] - vertex[0]
height = vertex[3] - vertex[1]
area = width*height
return area
if __name__ == '__main__':
args = parse_args()
args_sr = parse_args_for_srgan()
print('Called with args:')
print(args)
if torch.cuda.is_available() and not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
np.random.seed(cfg.RNG_SEED)
if args.dataset == "pascal_voc":
args.imdb_name = "voc_2007_trainval"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "pascal_voc_0712":
args.imdb_name = "voc_2007_trainval+voc_2012_trainval"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "coco":
args.imdb_name = "coco_2014_train+coco_2014_valminusminival"
args.imdbval_name = "coco_2014_minival"
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "imagenet":
args.imdb_name = "imagenet_train"
args.imdbval_name = "imagenet_val"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "vg":
args.imdb_name = "vg_150-50-50_minitrain"
args.imdbval_name = "vg_150-50-50_minival"
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
args.cfg_file = "cfgs/{}_ls.yml".format(args.net) if args.large_scale else "cfgs/{}.yml".format(args.net)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
print('Using config:')
pprint.pprint(cfg)
cfg.TRAIN.USE_FLIPPED = False
imdb, roidb, ratio_list, ratio_index = combined_roidb(args.imdbval_name, False)
imdb.competition_mode(on=True)
print('{:d} roidb entries'.format(len(roidb)))
input_dir = args.load_dir + "/" + args.net + "/" + args.dataset
if not os.path.exists(input_dir):
raise Exception('There is no input directory for loading network from ' + input_dir)
load_name = os.path.join(input_dir,
'faster_rcnn_{}_{}_{}.pth'.format(args.checksession, args.checkepoch, args.checkpoint))
# initilize the network here.
if args.net == 'vgg16':
fasterRCNN = vgg16(imdb.classes, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res101':
fasterRCNN = resnet(imdb.classes, 101, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res50':
fasterRCNN = resnet(imdb.classes, 50, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res152':
fasterRCNN = resnet(imdb.classes, 152, pretrained=False, class_agnostic=args.class_agnostic)
else:
print("network is not defined")
pdb.set_trace()
fasterRCNN.create_architecture()
print("load checkpoint %s" % (load_name))
checkpoint = torch.load(load_name)
fasterRCNN.load_state_dict(checkpoint['model'])
if 'pooling_mode' in checkpoint.keys():
cfg.POOLING_MODE = checkpoint['pooling_mode']
print('load model successfully!')
# initilize the tensor holder here.
im_data = torch.FloatTensor(1)
im_info = torch.FloatTensor(1)
num_boxes = torch.LongTensor(1)
gt_boxes = torch.FloatTensor(1)
# ship to cuda
if args.cuda:
im_data = im_data.cuda()
im_info = im_info.cuda()
num_boxes = num_boxes.cuda()
gt_boxes = gt_boxes.cuda()
# make variable
im_data = Variable(im_data)
im_info = Variable(im_info)
num_boxes = Variable(num_boxes)
gt_boxes = Variable(gt_boxes)
if args.cuda:
cfg.CUDA = True
if args.cuda:
fasterRCNN.cuda()
start = time.time()
max_per_image = 100
vis = args.vis
if vis:
thresh = 0.0
else:
thresh = 0.0
save_name = 'faster_rcnn_10'
num_images = len(imdb.image_index)
all_boxes = [[[] for _ in range(num_images)]
for _ in range(imdb.num_classes)]
output_dir = get_output_dir(imdb, save_name)
dataset = roibatchLoader(roidb, ratio_list, ratio_index, 1, \
imdb.num_classes, training=False, normalize = False)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1,
shuffle=False, num_workers=0,
pin_memory=True)
data_iter = iter(dataloader)
_t = {'im_detect': time.time(), 'misc': time.time()}
det_file = os.path.join(output_dir, 'detections.pkl')
fasterRCNN.eval()
empty_array = np.transpose(np.array([[],[],[],[],[]]), (1,0))
for i in range(num_images):
data = next(data_iter)
with torch.no_grad():
im_data.resize_(data[0].size()).copy_(data[0])
im_info.resize_(data[1].size()).copy_(data[1])
gt_boxes.resize_(data[2].size()).copy_(data[2])
num_boxes.resize_(data[3].size()).copy_(data[3])
#print(im_data.shape)
#print(im_info.shape)
#print(gt_boxes)
#print(num_boxes)
det_tic = time.time()
rois, cls_prob, bbox_pred, \
rpn_loss_cls, rpn_loss_box, \
RCNN_loss_cls, RCNN_loss_bbox, \
rois_label = fasterRCNN(im_data, im_info, gt_boxes, num_boxes)
scores = cls_prob.data
boxes = rois.data[:, :, 1:5]
if cfg.TEST.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = bbox_pred.data
if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:
# Optionally normalize targets by a precomputed mean and stdev
if args.class_agnostic:
box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \
+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()
box_deltas = box_deltas.view(1, -1, 4)
else:
box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \
+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()
box_deltas = box_deltas.view(1, -1, 4 * len(imdb.classes))
pred_boxes = bbox_transform_inv(boxes, box_deltas, 1)
pred_boxes = clip_boxes(pred_boxes, im_info.data, 1)
else:
# Simply repeat the boxes, once for each class
pred_boxes = np.tile(boxes, (1, scores.shape[1]))
pred_boxes /= data[1][0][2].item()
scores = scores.squeeze()
pred_boxes = pred_boxes.squeeze()
det_toc = time.time()
detect_time = det_toc - det_tic
misc_tic = time.time()
#print(scores[:,1:3].shape)
#print(pred_boxes[:,4:12].shape)
############################## decline head-tail overlapping
new_pred_boxes = torch.cuda.FloatTensor(300, 160).zero_()
new_scores = torch.cuda.FloatTensor(300,40).zero_()
for k in range(13):
b = torch.cat((pred_boxes[:,12*k+4:12*k+8],pred_boxes[:,12*k+8:12*k+12]),0)
s = torch.cat((scores[:,3*k+1],scores[:,3*k+2]),0)
keep = nms(b, s, 0.2)
#new head class
idx = [g for g in range(len(keep)) if keep[g] <300]
new_pred_boxes[:len(keep[idx]),12*k+4:12*k+8] = b[keep[idx]]
new_scores[:len(keep[idx]),3*k+1] = s[keep[idx]]
#new tail class
idx = [g for g in range(len(keep)) if keep[g] >=300]
new_pred_boxes[:len(keep[idx]),12*k+8:12*k+12] = b[keep[idx]]
new_scores[:len(keep[idx]),3*k+2] = s[keep[idx]]
#new full length class = original
new_pred_boxes[:,12*k+12:12*k+16] = pred_boxes[:,12*k+12:12*k+16]
new_scores[:,3*k+3] = scores[:,3*k+3]
if vis:
im = cv2.imread(imdb.image_path_at(i))
im2show = np.copy(im)
for j in range(1, imdb.num_classes):
inds = torch.nonzero(new_scores[:,j]>thresh).view(-1)
# if there is det
if inds.numel() > 0:
cls_scores = new_scores[:,j][inds]
_, order = torch.sort(cls_scores, 0, True)
if args.class_agnostic:
cls_boxes = new_pred_boxes[inds, :]
else:
cls_boxes = new_pred_boxes[inds][:, j * 4:(j + 1) * 4]
#print(cls_boxes.shape)
#print(cls_scores.unsqueeze(1).shape)
cls_dets = torch.cat((cls_boxes, cls_scores.unsqueeze(1)), 1)
# cls_dets = torch.cat((cls_boxes, cls_scores), 1)
cls_dets = cls_dets[order]
keep = nms(cls_boxes[order, :], cls_scores[order], cfg.TEST.NMS)
cls_dets = cls_dets[keep.view(-1).long()]
if vis:
im2show = vis_detections(im2show, imdb.classes[j], cls_dets.cpu().numpy(), 0.3)
all_boxes[j][i] = cls_dets.cpu().numpy()
else:
all_boxes[j][i] = empty_array
#print(exist_classes)
#for k, j in enumerate(exist_classes):
# all_boxes[j][i] = exist_dets[k]
#print(all_boxes)
# Limit to max_per_image detections *over all classes*
if max_per_image > 0:
#print(all_boxes[3][i][:,-1])
image_scores = np.hstack([all_boxes[j][i][:,-1]
for j in range(1, imdb.num_classes)])
if len(image_scores) > max_per_image:
image_thresh = np.sort(image_scores)[-max_per_image]
for j in xrange(1, imdb.num_classes):
keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
all_boxes[j][i] = all_boxes[j][i][keep, :]
misc_toc = time.time()
nms_time = misc_toc - misc_tic
sys.stdout.write('im_detect: {:d}/{:d} {:.3f}s {:.3f}s \r' \
.format(i + 1, num_images, detect_time, nms_time))
sys.stdout.flush()
if vis:
cv2.imwrite('result.png', im2show)
pdb.set_trace()
#cv2.imshow('test', im2show)
#cv2.waitKey(0)
#print(all_boxes[1][0][0])
print(torch.cuda.current_device())
with torch.cuda.device(torch.cuda.current_device()):
torch.cuda.empty_cache()
#################################### filter imgs need to do SRGAN-preprocessing
annopath = '/home/jason/faster-rcnn.pytorch-1.0/data/VOCdevkit2007/VOC2007/Annotations/{:s}.xml'
imagesetfile = '/home/jason/faster-rcnn.pytorch-1.0/data/VOCdevkit2007/VOC2007/ImageSets/Main/test.txt'
cachedir = '/home/jason/faster-rcnn.pytorch-1.0/data/VOCdevkit2007/annotations_cache'
image_file = '/home/jason/faster-rcnn.pytorch-1.0/data/VOCdevkit2007/VOC2007/JPEGImages'
f = open(imagesetfile)
new_indexes = []
img_ids = []
new_gt_boxes = []
for line in f:
img_ids.append(line.splitlines())
img_ids = np.squeeze(img_ids)
for i in range(num_images):
for j in range(1, imdb.num_classes):
gt_boxes_1 = load_gt_box(annopath,imagesetfile,classes[j],cachedir)
if not np.any(all_boxes[j][i]):
continue
if len(gt_boxes_1[img_ids[i]]['bbox']) == 0:
continue
else:# 1 GT box in single image for a single class
gt_b = gt_boxes_1[img_ids[i]]['bbox']
#print(gt_b)
z = 0
for m in range(len(all_boxes[j][i])):
for n in range(len(gt_b)):
det_b = [int(l) for l in all_boxes[j][i][m][:4]]
#print(all_boxes[j][i][m][4], iou(det_b, gt_b[n]), imdb.image_index[j])
if all_boxes[j][i][m][4] > 0.5 and all_boxes[j][i][m][4] < 0.8 \
and iou(det_b, gt_b[n]) > 0.5 and classes[j][-1]==")":
print("srgan beginning......")
new_indexes.append(img_ids[i]+"_"+classes[j]+"_"+str(z))
print(len(new_indexes))#, all_boxes[j][i][m][4], iou(det_b, gt_b[n]))
img_path = os.path.join(image_file, img_ids[i]+".JPG")
img = Image.open(img_path)
img = np.asarray(img)
quaterx = int(img.shape[1]*1/4)
quatery = int(img.shape[0]*1/4)
x1_padding = 0
y1_padding = 0
x2_padding = 0
y2_padding = 0
print(img.shape)
if Area(det_b) >= Area(gt_b[n]):
x1, y1, x2, y2 = det_b
print("det_b: " + str(det_b))
if x1 > quaterx:
x1-=quaterx
x1_padding = quaterx
else:
x1 = 0
x1_padding = x1
if x2 < img.shape[0]-quaterx:
x2+= quaterx
x2_padding = quaterx
else:
x2 = img.shape[0]-1
x2_padding = img.shape[0] - x2-1
if y1 > quatery:
y1 -=quatery
y1_padding = quatery
else:
y1 = 0
y1_padding = y1
if y2 < img.shape[1]-quatery:
y2+=quatery
y2_padding = quatery
else:
y2= img.shape[1]-1
y2_padding = img.shape[1] - y2-1
else:
x1, y1, x2, y2 = gt_b[n]
print("gt_b: "+str(gt_b))
if x1 > quaterx:
x1-=quaterx
x1_padding = quaterx
else:
x1 = 0
x1_padding = x1
if x2 < img.shape[0]-quaterx:
x2+= quaterx
x2_padding = quaterx
else:
x2 = img.shape[0]-1
x2_padding = img.shape[0] - x2-1
if y1 > quatery:
y1 -=quatery
y1_padding = quatery
else:
y1 = 0
y1_padding = y1
if y2 < img.shape[1]-quatery:
y2+=quatery
y2_padding = quatery
else:
y2= img.shape[1]-1
y2_padding = img.shape[1] - y2-1
x1, y1, x2, y2= int(x1),int(y1),int(x2), int(y2)
new_gt_boxes.append([x1_padding, y1_padding, x2-x1-x1_padding-x2_padding, \
y2-y1-y1_padding-y2_padding])# whole photo
srgan_in = img[y1:y2 ,x1:x2 ,:]
srgan_in = srgan_in[...,::-1]#rgb->bgr
print(x1,y1,x2,y2,srgan_in.shape)
cv2.imwrite(os.path.join("srgan/srgan_input", img_ids[i]+"_"+classes[j]+"_"+str(z)+".JPG"), srgan_in)
print("save input: %s" %(img_ids[i]+"_"+classes[j]+"_"+str(z)))
z+=1
all_boxes[j][i][m] = np.append(gt_b[n], 1.0)# turn original pred box to gt box
with torch.cuda.device(torch.cuda.current_device()):
torch.cuda.empty_cache()
dataloader = DataLoader(
ImageDataset("srgan/srgan_input", hr_shape=(1024,1024)),
batch_size=1,
shuffle=True,
num_workers=0,
)
#gan_output = srgan(args_sr, dataloader)
srgan(args_sr, dataloader)
#print("length of data: %d"%len(gan_output))
print("srgan finish......")
with torch.cuda.device(torch.cuda.current_device()):
torch.cuda.empty_cache()
# re-test srgan output
dataloader1 = DataLoader(
ImageDataset("srgan/srgan_output", hr_shape=(1024,1024)),
batch_size=1,
shuffle=True,
num_workers=0,
)
all_boxes_1 = [[[] for _ in range(len(dataloader1))]
for _ in range(imdb.num_classes)]
for i, gan_img in enumerate(dataloader1):
#for i in range(len(dataloader1)):
#gan_img = gan_output[i]
#print(gan_img)
arr = np.append(gan_img["origin_size"][0][0].numpy(), gan_img["origin_size"][1][0].numpy())
gan_img_os = F.interpolate(gan_img['hr'], size=(arr[0],arr[1]), mode='bilinear')
r = 600 / gan_img_os.shape[2]
gan_info = np.array([[gan_img_os.shape[2], gan_img_os.shape[3], r]])
with torch.no_grad():
gan_img_600 = F.interpolate(gan_img_os, scale_factor=r, mode="bilinear").cuda()
gan_info = torch.from_numpy(gan_info).cuda()
gt_boxes
num_boxes
#print(gan_img.shape)
#print(gan_info.shape)
#print(gt_boxes)
#print(num_boxes)
det_tic = time.time()
rois_1, cls_prob_1, bbox_pred_1, \
rpn_loss_cls_1, rpn_loss_box_1, \
RCNN_loss_cls_1, RCNN_loss_bbox_1, \
rois_label_1 = fasterRCNN(gan_img_600, gan_info, gt_boxes, num_boxes)
scores_1 = cls_prob_1.data
boxes_1 = rois_1.data[:, :, 1:5]
#print(data)
if cfg.TEST.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = bbox_pred_1.data
if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:
# Optionally normalize targets by a precomputed mean and stdev
if args.class_agnostic:
box_deltas_1 = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \
+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()
box_deltas_1 = box_deltas.view(1, -1, 4)
else:
box_deltas_1 = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \
+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()
box_deltas_1 = box_deltas.view(1, -1, 4 * len(imdb.classes))
pred_boxes_1 = bbox_transform_inv(boxes, box_deltas_1, 1)
pred_boxes_1 = clip_boxes(pred_boxes_1, gan_info.data, 1)
else:
# Simply repeat the boxes, once for each class
pred_boxes_1 = np.tile(boxes_1, (1, scores.shape[1]))
pred_boxes_1 /= data[1][0][2].item()
scores_1 = scores_1.squeeze()
pred_boxes_1 = pred_boxes_1.squeeze()
det_toc = time.time()
detect_time = det_toc - det_tic
misc_tic = time.time()
############################## decline head-tail overlapping
new_pred_boxes = torch.cuda.FloatTensor(300, 160).zero_()
new_scores = torch.cuda.FloatTensor(300,40).zero_()
for k in range(13):
b = torch.cat((pred_boxes_1[:,12*k+4:12*k+8],pred_boxes_1[:,12*k+8:12*k+12]),0)
s = torch.cat((scores_1[:,3*k+1],scores_1[:,3*k+2]),0)
keep = nms(b, s, 0.2)
#new head class
idx = [g for g in range(len(keep)) if keep[g] <300]
new_pred_boxes[:len(keep[idx]),12*k+4:12*k+8] = b[keep[idx]]
new_scores[:len(keep[idx]),3*k+1] = s[keep[idx]]
#new tail class
idx = [g for g in range(len(keep)) if keep[g] >=300]
new_pred_boxes[:len(keep[idx]),12*k+8:12*k+12] = b[keep[idx]]
new_scores[:len(keep[idx]),3*k+2] = s[keep[idx]]
#new full length class = original
new_pred_boxes[:,12*k+12:12*k+16] = pred_boxes[:,12*k+12:12*k+16]
new_scores[:,3*k+3] = scores[:,3*k+3]
if vis:
im = cv2.imread(imdb.image_path_at(i))
im2show = np.copy(im)
for j in range(1, imdb.num_classes):
inds = torch.nonzero(new_scores[:,j]>thresh).view(-1)
# if there is det
if inds.numel() > 0:
cls_scores_1 = new_scores[:,j][inds]
_, order = torch.sort(cls_scores_1, 0, True)
if args.class_agnostic:
cls_boxes_1 = new_pred_boxes[inds, :]
else:
cls_boxes_1 = new_pred_boxes[inds][:, j * 4:(j + 1) * 4]
cls_dets_1 = torch.cat((cls_boxes_1, cls_scores_1.unsqueeze(1)), 1)
cls_dets_1 = cls_dets_1[order]
keep = nms(cls_boxes_1[order, :], cls_scores_1[order], cfg.TEST.NMS)
cls_dets_1 = cls_dets_1[keep.view(-1).long()]
if vis:
im2show = vis_detections(im2show, imdb.classes[j], cls_dets.cpu().numpy(), 0.3)
all_boxes_1[j][i] = cls_dets.cpu().numpy()
else:
all_boxes_1[j][i] = empty_array
# Limit to max_per_image detections *over all classes*
if max_per_image > 0:
#print(all_boxes[3][i][:,-1])
image_scores = np.hstack([all_boxes_1[j][i][:,-1]
for j in range(1, imdb.num_classes)])
if len(image_scores) > max_per_image:
image_thresh = np.sort(image_scores)[-max_per_image]
for j in range(1, imdb.num_classes):
keep = np.where(all_boxes_1[j][i][:, -1] >= image_thresh)[0]
all_boxes_1[j][i] = all_boxes_1[j][i][keep, :]
misc_toc = time.time()
nms_time = misc_toc - misc_tic
sys.stdout.write('im_detect: {:d}/{:d} {:.3f}s {:.3f}s \r' \
.format(i + 1, len(dataloader1), detect_time, nms_time))
sys.stdout.flush()
torch.cuda.empty_cache()
with open(det_file, 'wb') as f:
pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)
print('Evaluating detections')
end = time.time()
#print(len(all_boxes))
#print(len(all_boxes_1[0]))
for a in range(len(all_boxes)):
all_boxes[a].extend(all_boxes_1[a])
print(len(all_boxes[a]))
print(new_indexes)
#print(new_gt_boxes)
imdb.evaluate_detections(all_boxes, output_dir, new_indexes, new_gt_boxes)
print("test time: %0.4fs" % (end - start))
| 1.59375 | 2 |
app.py | iio1989/oshite | 0 | 12954 | <gh_stars>0
from flask import Flask, render_template, request, redirect, url_for, Markup
import app_helper as apHelp
app = Flask(__name__)
@app.route('/')
def root():
return render_template('home.html')
# click convetBtn. get HttpParam.
@app.route('/post', methods=['GET', 'POST'])
def post():
if request.method == 'POST':
input_kana = request.form['input_kana']
converted_input_list = apHelp.getConvetedStr_kanaToOshite(input_kana)
# rendering for home.html.
return render_template('home.html',
input_kana=input_kana,
converted_input_list=converted_input_list,
fileType= apHelp.FILE_TYPE)
else: # error redirect.
return redirect(url_for('home'))
# click homeBtn from header.
@app.route('/home', methods=['GET', 'POST'])
def home():
return render_template('home.html')
# click aboutBtn from header.
@app.route('/about', methods=['GET', 'POST'])
def about():
return render_template('about.html')
# click historyBtn from header.
@app.route('/history', methods=['GET', 'POST'])
def history():
return render_template('history.html')
if __name__ == '__main__':
app.run(debug=True) | 2.453125 | 2 |
build/cls/tp/slices.py | amunoz1/mines | 1 | 12955 | """
Makes subdirectories with slices of seismic time or depth images.
For example, the directory with name "s3_84" contains a constant-i3
slice, where i3 = 84.
"""
from tputils import *
#setupForSubset("subz_401_4_600")
setupForSubset("subt_251_4_500")
seismicDir = getSeismicDir()
#############################################################################
def main(args):
#makeSlice3Z(96)
makeSlice3T(84)
makeSlice3T(73)
def makeSlice3T(i3):
subDir = "s3_"+str(i3)+"/"
File(seismicDir+subDir).mkdir()
for name in ["tpst"]:
x = readImage(name)
writeImage(subDir+name,x[i3])
display(x[i3])
def makeSlice3Z(i3):
subDir = "s3_"+str(i3)+"/"
File(seismicDir+subDir).mkdir()
for name in ["tpsz","tpgv","tpgd","tpgg","tpgp"]:
x = readImage(name)
writeImage(subDir+name,x[i3])
def display(s,g=None,cmin=0,cmax=0):
sp = SimplePlot(SimplePlot.Origin.UPPER_LEFT)
sp.addColorBar()
sp.getPlotPanel().setColorBarWidthMinimum(80)
pv = sp.addPixels(s)
pv.setInterpolation(PixelsView.Interpolation.NEAREST)
if g!=None:
pv = sp.addPixels(g)
pv.setInterpolation(PixelsView.Interpolation.NEAREST)
pv.setColorModel(ColorMap.getJet(0.3))
if cmin!=cmax:
pv.setClips(cmin,cmax)
#############################################################################
run(main)
| 2.96875 | 3 |
lldb/examples/summaries/cocoa/NSException.py | bytesnake/Enzyme | 427 | 12956 | <filename>lldb/examples/summaries/cocoa/NSException.py
"""
LLDB AppKit formatters
part of The LLVM Compiler Infrastructure
This file is distributed under the University of Illinois Open Source
License. See LICENSE.TXT for details.
"""
# summary provider for class NSException
import lldb.runtime.objc.objc_runtime
import lldb.formatters.metrics
import CFString
import lldb
import lldb.formatters.Logger
statistics = lldb.formatters.metrics.Metrics()
statistics.add_metric('invalid_isa')
statistics.add_metric('invalid_pointer')
statistics.add_metric('unknown_class')
statistics.add_metric('code_notrun')
class NSKnownException_SummaryProvider:
def adjust_for_architecture(self):
pass
def __init__(self, valobj, params):
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.sys_params = params
if not (self.sys_params.types_cache.id):
self.sys_params.types_cache.id = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeObjCID)
self.update()
def update(self):
logger = lldb.formatters.Logger.Logger()
self.adjust_for_architecture()
def offset_name(self):
logger = lldb.formatters.Logger.Logger()
return self.sys_params.pointer_size
def offset_reason(self):
logger = lldb.formatters.Logger.Logger()
return 2 * self.sys_params.pointer_size
def description(self):
logger = lldb.formatters.Logger.Logger()
name_ptr = self.valobj.CreateChildAtOffset(
"name", self.offset_name(), self.sys_params.types_cache.id)
reason_ptr = self.valobj.CreateChildAtOffset(
"reason", self.offset_reason(), self.sys_params.types_cache.id)
return 'name:' + CFString.CFString_SummaryProvider(
name_ptr, None) + ' reason:' + CFString.CFString_SummaryProvider(reason_ptr, None)
class NSUnknownException_SummaryProvider:
def adjust_for_architecture(self):
pass
def __init__(self, valobj, params):
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.sys_params = params
self.update()
def update(self):
logger = lldb.formatters.Logger.Logger()
self.adjust_for_architecture()
def description(self):
logger = lldb.formatters.Logger.Logger()
stream = lldb.SBStream()
self.valobj.GetExpressionPath(stream)
name_vo = self.valobj.CreateValueFromExpression(
"name", "(NSString*)[" + stream.GetData() + " name]")
reason_vo = self.valobj.CreateValueFromExpression(
"reason", "(NSString*)[" + stream.GetData() + " reason]")
if name_vo.IsValid() and reason_vo.IsValid():
return CFString.CFString_SummaryProvider(
name_vo, None) + ' ' + CFString.CFString_SummaryProvider(reason_vo, None)
return '<variable is not NSException>'
def GetSummary_Impl(valobj):
logger = lldb.formatters.Logger.Logger()
global statistics
class_data, wrapper = lldb.runtime.objc.objc_runtime.Utilities.prepare_class_detection(
valobj, statistics)
if wrapper:
return wrapper
name_string = class_data.class_name()
logger >> "class name is: " + str(name_string)
if name_string == 'NSException':
wrapper = NSKnownException_SummaryProvider(
valobj, class_data.sys_params)
statistics.metric_hit('code_notrun', valobj)
else:
wrapper = NSUnknownException_SummaryProvider(
valobj, class_data.sys_params)
statistics.metric_hit(
'unknown_class',
valobj.GetName() +
" seen as " +
name_string)
return wrapper
def NSException_SummaryProvider(valobj, dict):
logger = lldb.formatters.Logger.Logger()
provider = GetSummary_Impl(valobj)
if provider is not None:
if isinstance(
provider,
lldb.runtime.objc.objc_runtime.SpecialSituation_Description):
return provider.message()
try:
summary = provider.description()
except:
summary = None
logger >> "got summary " + str(summary)
if summary is None:
summary = '<variable is not NSException>'
return str(summary)
return 'Summary Unavailable'
def __lldb_init_module(debugger, dict):
debugger.HandleCommand(
"type summary add -F NSException.NSException_SummaryProvider NSException")
| 2.15625 | 2 |
pacman-arch/test/pacman/tests/upgrade084.py | Maxython/pacman-for-termux | 23 | 12957 | <reponame>Maxython/pacman-for-termux<filename>pacman-arch/test/pacman/tests/upgrade084.py
self.description = "Install a package ('any' architecture)"
p = pmpkg("dummy")
p.files = ["bin/dummy",
"usr/man/man1/dummy.1"]
p.arch = 'any'
self.addpkg(p)
self.option["Architecture"] = ['auto']
self.args = "-U %s" % p.filename()
self.addrule("PACMAN_RETCODE=0")
self.addrule("PKG_EXIST=dummy")
for f in p.files:
self.addrule("FILE_EXIST=%s" % f)
| 2.171875 | 2 |
Examples And Benchmarks/HTTP over Raw Connection/SERVERS/http_server__http_tools.py | FI-Mihej/FI-ASockIOCore | 0 | 12958 | from simple_network.tcp_app_server import *
import httptools
"""
Module Docstring
Docstrings: http://www.python.org/dev/peps/pep-0257/
"""
__author__ = 'ButenkoMS <<EMAIL>>'
# ======================================================================
# ===================GLOBAL SETTINGS FOR ALL TESTS======================
#
SERVER_KEYWORD = b'http server inline'
SERVER_ADDRESS = ('localhost', 25000)
BSC__USE_READ_WITH_FIXED_BUFFER = True # "Optimized for speed". Good for Named Clients.
# BSC__USE_READ_WITH_FIXED_BUFFER = False # "Optimized for memory". Good for big amount of Unknown Clients (raw,
# http, etc.) if you have small server.
BSC__SOCKET_READ_FIXED_BUFFER_SIZE = 1024 ** 2
BSC__USE_NODELAY_INET = True
BSC__REUSE_GATE_ADDR = True
BSC__REUSE_GATE_PORT = True
LINE_TRACE_ALLOWED = True
#
# ===================GLOBAL SETTINGS FOR ALL TESTS======================
# ======================================================================
class RawClientCheckerAllRaw(CheckIsRawConnection):
def __call__(self, app_server: ASockIOCore, client_info: Connection):
return True
def run_http_server():
io_iteration_timeout = 0.5
# ADD SERVER GATE CONNECTIONS
set_of_tcp_settings = set()
tcp_settings = ConnectionSettings(ConnectionDirectionRole.server, SERVER_ADDRESS, SERVER_KEYWORD)
set_of_tcp_settings.add(tcp_settings)
# CREATE SERVER
http_server = ASockIOCore(set_of_tcp_settings)
# SET SERVER SETTINGS
http_server.raw_checker_for_new_incoming_connections = RawClientCheckerAllRaw()
http_server.unknown_clients_are_allowed = True
http_server.should_get_client_addr_info_on_connection = False
http_server.use_speed_optimized_socket_read = BSC__USE_READ_WITH_FIXED_BUFFER
http_server.socket_read_fixed_buffer_size.result = BSC__SOCKET_READ_FIXED_BUFFER_SIZE
http_server.use_nodelay_inet = BSC__USE_NODELAY_INET
http_server.reuse_gate_addr = BSC__REUSE_GATE_ADDR
http_server.reuse_gate_port = BSC__REUSE_GATE_PORT
# START SERVER
with asock_io_core_connect(http_server, True, backlog=1000) as server:
http_server.need_to_auto_check_incoming_raw_connection = True
clients_per_transport_id = dict()
# RUN SERVER LOOP
while True:
io_iteration_result = server.io_iteration(io_iteration_timeout)
# CLIENT CONNECTED
for another_client_id in io_iteration_result.newly_connected_unknown_clients:
clients_per_transport_id[another_client_id] = HttpClientData(another_client_id, server)
# CLIENT HAVE DATA TO READ
for another_client_id in io_iteration_result.clients_have_data_to_read:
clients_per_transport_id[another_client_id].data_received()
# CLIENT CLOSED
for another_client_id in io_iteration_result.clients_with_disconnected_connection:
if clients_per_transport_id[another_client_id].socket_error():
del clients_per_transport_id[another_client_id]
print('Server had been Shut Down.')
# ==============================================================================================================
# !!!!! IMPORTANT !!!!!
# NEXT CODE SHOULD BE EQUIVALENT TO ASYNCIO HTTP SERVER'S CODE FROM "https://github.com/MagicStack/vmbench" PROJECT
# (BENCHMARKING TOOL FROM 'UVLOOP' DEVELOPERS) FOR FAIR COMPARISON, SO IT'S SO DIRTY.
# (IT'S ALMOST EQUIVALENT: IT DOES NOT HAVE FEW CRITICAL vmbench's BUGS)
_RESP_CACHE = {}
class HttpRequest:
__slots__ = ('_protocol', '_url', '_headers', '_version')
def __init__(self, protocol, url, headers, version):
self._protocol = protocol
self._url = url
self._headers = headers
self._version = version
class HttpResponse:
__slots__ = ('_protocol', '_request', '_headers_sent')
def __init__(self, protocol, request: HttpRequest):
self._protocol = protocol
self._request = request
self._headers_sent = False
def write(self, data):
self._protocol.output_list.append(b''.join([
'HTTP/{} 200 OK\r\n'.format(
self._request._version).encode('latin-1'),
b'Content-Type: text/plain\r\n',
'Content-Length: {}\r\n'.format(len(data)).encode('latin-1'),
b'\r\n',
data
]))
class HttpClientData:
__slots__ = ('server', 'output_list', 'transport_id',
'_current_request', '_current_parser',
'_current_url', '_current_headers', '_last_piece_of_data',
'_previous_piece_of_data')
def __init__(self, transport_id, server: ASockIOCore):
self.server = server
self.transport_id = transport_id
self.output_list = list()
self._current_parser = httptools.HttpRequestParser(self)
self._current_headers = list()
self._current_request = None
self._current_url = None
self._last_piece_of_data = None
self._previous_piece_of_data = None
def data_received(self):
try:
for message in self.server.get_messages_from_client(self.transport_id):
# print('IN {}: {}'.format(self.transport_id, bytes(message)))
self._current_parser.feed_data(message)
self.server.send_messages_to_client(self.transport_id, self.output_list)
except Exception as err:
print('EXCEPTION:', err)
self.server.mark_client_connection_as_should_be_closed_immediately(self.transport_id, False)
# raise err
del self.output_list[:]
# self.output_list.clear()
def socket_error(self):
self._current_request = self._current_parser = None
self.server.remove_client(self.transport_id)
return True
# =============================================
# ==== BEGIN of HttpRequestParser methods: ====
# def on_message_begin(self):
# pass
def on_url(self, url):
if self._current_url:
self._current_url += url
else:
self._current_url = url
# def on_status(self, data):
# pass
def on_header(self, name, value):
self._current_headers.append((name, value))
def on_headers_complete(self):
try:
self._current_request = HttpRequest(
self, self._current_url, self._current_headers,
self._current_parser.get_http_version())
self.handle(self._current_request, HttpResponse(self, self._current_request))
except:
print('ON HEADERS COMPLETE. ID: {}. Last: {}. Previous : {}.'.format(
self.transport_id, self._last_piece_of_data, self._previous_piece_of_data))
raise
# def on_body(self, data):
# pass
# def on_message_complete(self):
# pass
# def on_chunk_header(self):
# pass
# def on_chunk_complete(self):
# pass
# ==== END of HttpRequestParser methods====
# =========================================
def handle(self, request, response: HttpResponse):
parsed_url = httptools.parse_url(self._current_url)
payload_size = parsed_url.path.decode('ascii')[1:]
if not payload_size:
payload_size = 1024
else:
payload_size = int(payload_size)
resp = _RESP_CACHE.get(payload_size)
if resp is None:
resp = b'X' * payload_size
_RESP_CACHE[payload_size] = resp
response.write(resp)
self._current_request = None
self._current_url = None
self._current_headers = list()
# print('KEEP ALIVE:', self._current_parser.should_keep_alive())
if not self._current_parser.should_keep_alive():
self.server.mark_client_connection_as_ready_to_be_closed(self.transport_id, False)
if __name__ == '__main__':
run_http_server()
| 2.25 | 2 |
bamboomba_description/launch/robot_state_publisher.launch.py | RIF-Robotics/bamboomba | 0 | 12959 | <filename>bamboomba_description/launch/robot_state_publisher.launch.py<gh_stars>0
from os import path
from launch import LaunchDescription
from ament_index_python.packages import get_package_share_directory
from launch_ros.actions import Node
from launch.actions import DeclareLaunchArgument
from launch.substitutions import Command, LaunchConfiguration
def generate_launch_description():
use_sim_time = LaunchConfiguration('use_sim_time')
xacro_path = path.join(get_package_share_directory('bamboomba_description'),
'urdf', 'bamboomba.urdf.xacro')
robot_description = {'robot_description' : Command(['xacro', ' ', xacro_path])}
return LaunchDescription([
DeclareLaunchArgument(
'use_sim_time',
default_value='False',
description='Use simulation clock if true'),
Node(package='robot_state_publisher',
name='robot_state_publisher',
executable='robot_state_publisher',
output='screen',
parameters=[{'use_sim_time': use_sim_time},
robot_description]
),
])
| 2.25 | 2 |
tests/test__event.py | alpha-health-ai/pyformance | 4 | 12960 | <filename>tests/test__event.py
from pyformance.meters import Event, EventPoint
from tests import TimedTestCase
class EventTestCase(TimedTestCase):
def setUp(self):
super(EventTestCase, self).setUp()
self.event = Event(
clock=TimedTestCase.clock,
key="test_event",
tags={"name", "value"}
)
def tearDown(self):
super(EventTestCase, self).tearDown()
def test_add_event_and_read_it(self):
mock_values = {"value": 1}
self.event.add(mock_values)
events = self.event.get_events()
self.assertEqual(events, [EventPoint(
time=self.clock.time(),
values=mock_values
)])
def test_clear_event_clears_events(self):
self.event.add({"value": 1})
self.event.clear()
self.assertEqual(len(self.event.get_events()), 0)
def test_get_event_returns_shallow_copy(self):
mock_values = {"value": 1}
self.event.add(mock_values)
events = self.event.get_events()
self.assertEqual(len(events), 1)
# make sure the returned object is not a reference(important for thread safety)
self.event.clear()
self.assertEqual(len(events), 1)
| 2.59375 | 3 |
problem_#43_30032019.py | vivek28111992/DailyCoding | 0 | 12961 | <gh_stars>0
"""
Good morning! Here's your coding interview problem for today.
This problem was asked by Amazon.
Implement a stack that has the following methods:
push(val), which pushes an element onto the stack
pop(), which pops off and returns the topmost element of the stack. If there are no elements in the stack, then it should throw an error or return null.
max(), which returns the maximum value in the stack currently. If there are no elements in the stack, then it should throw an error or return null.
Each method should run in constant time.
https://www.geeksforgeeks.org/design-a-stack-that-supports-getmin-in-o1-time-and-o1-extra-space/
https://www.geeksforgeeks.org/design-and-implement-special-stack-data-structure/
"""
# Class to make a Node
class Node:
# Constructor which assign argument to node's value
def __init__(self, value):
self.value = value
self.next = None
# This method returns the string representation of the object
def __str__(self):
return "Node({})".format(self.value)
# __repr__ is same as __str__
__repr__ = __str__
class Stack:
# Stack Constructor initialise top of stack and counter
def __init__(self):
self.top = None
self.maximum = None
self.count = 0
self.minimum = None
# This method returns the string representation of the object (stack).
def __str__(self):
temp = self.top
out = []
while temp:
out.append(str(temp.value))
temp = temp.next
out = '\n'.join(out)
return ('Top {} \n\nStack :\n{}'.format(self.top, out))
# __repr__ is same as __str__
__repr__ = __str__
# This method is used to get minimum element of stack
def getMin(self):
if self.top is None:
return "Stack is Empty"
else:
print("Minimum element in the stack is: {}".format(self.minimum.value))
# This method is used to get minimum element of stack
def getMax(self):
if self.top is None:
return "Stack is Empty"
else:
print("Maximum element in the stack is: {}".format(self.maximum.value))
# Method to check if stack is Empty or not
def isEmpty(self):
# If top equals to None then stack is empty
if self.top == None:
return True
else:
# If top not equal to None then stack is empty
return False
def push(self, value):
if self.top is None:
self.top = Node(value)
self.top.value = value
self.minimum = Node(value)
self.minimum.value = value
self.maximum = Node(value)
self.maximum.value = value
elif value < self.minimum.value:
new_node = Node(value)
new_node_min = Node(value)
new_node_max = Node(self.maximum.value)
new_node.next = self.top
new_node_max.next = self.maximum
new_node_min.next = self.minimum
self.top = new_node
self.top.value = value
self.maximum = new_node_max
self.maximum.value = value
self.minimum = new_node_min
self.minimum.value = value
elif value > self.maximum.value:
new_node = Node(value)
new_node_max = Node(value)
new_node_min = Node(self.minimum.value)
new_node.next = self.top
new_node_max.next = self.maximum
new_node_min.next = self.minimum
self.top = new_node
self.top.value = value
self.maximum = new_node_max
self.maximum.value = value
self.minimum = new_node_min
self.minimum.value = value
else:
new_node = Node(value)
new_node_max = Node(self.maximum.value)
new_node_min = Node(self.minimum.value)
new_node.next = self.top
new_node_max.next = self.maximum
new_node_min.next = self.minimum
self.maximum = new_node_max
self.maximum.value = value
self.minimum = new_node_min
self.minimum.value = value
self.top = new_node
self.top.value = value
print("Number Inserted: {}".format(value))
# This method is used to pop top of stack
def pop(self):
if self.top is None:
print("Stack is empty")
else:
removedNode = self.top.value
self.top = self.top.next
self.minimum = self.minimum.next
self.maximum = self.maximum.next
print("Top Most Element Removed : {}".format(removedNode))
stack = Stack()
stack.push(3)
stack.push(5)
stack.getMin()
stack.getMax()
stack.push(2)
stack.push(1)
stack.getMin()
stack.getMax()
stack.pop()
stack.getMin()
stack.getMax()
stack.pop()
| 3.890625 | 4 |
scrapy/contrib/linkextractors/lxmlhtml.py | emschorsch/scrapy | 1 | 12962 | """
Link extractor based on lxml.html
"""
import lxml.html
from scrapy.link import Link
from scrapy.utils.python import unique as unique_list
class LxmlParserLinkExtractor(object):
def __init__(self, tag="a", attr="href", process=None, unique=False):
self.scan_tag = tag if callable(tag) else lambda t: t == tag
self.scan_attr = attr if callable(attr) else lambda a: a == attr
self.process_attr = process if callable(process) else lambda v: v
self.unique = unique
self.links = []
def _extract_links(self, response_text, response_url):
html = lxml.html.fromstring(response_text)
html.make_links_absolute(response_url)
for e, a, l, p in html.iterlinks():
if self.scan_tag(e.tag):
if self.scan_attr(a):
link = Link(self.process_attr(l), text=e.text)
self.links.append(link)
links = unique_list(self.links, key=lambda link: link.url) \
if self.unique else self.links
return links
def extract_links(self, response):
return self._extract_links(response.body, response.url)
| 3.03125 | 3 |
src/meetings/admin.py | Yalnyra/office-meeting-reservation | 0 | 12963 | <reponame>Yalnyra/office-meeting-reservation
from django.contrib import admin
from .models import Meeting
admin.site.register(Meeting)
| 1.242188 | 1 |
app/templates/init.py | arudmin/generator-flask-heroku | 0 | 12964 | from flask import Flask, url_for
import os
app = Flask(__name__)
app.config['DEBUG'] = True
app.config['SECRET_KEY'] = 'SECRET_KEY_CH1ng3me'
# Determines the destination of the build. Only usefull if you're using Frozen-Flask
app.config['FREEZER_DESTINATION'] = os.path.dirname(os.path.abspath(__file__))+'/../build'
# Function to easily find your assets
# In your template use <link rel=stylesheet href="{{ static('filename') }}">
<%= appName %>.jinja_env.globals['static'] = (
lambda filename: url_for('static', filename = filename)
)
from <%= appName %> import views
| 1.929688 | 2 |
sigmod2021-exdra-p523/experiments/code/other/l2svm.py | damslab/reproducibility | 4 | 12965 |
import numpy as np
import argparse
from sklearn.svm import LinearSVR
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_regression
parser = argparse.ArgumentParser()
parser.add_argument('-x', '--datapath', type=str, required=True)
parser.add_argument('-y', '--labels', type=str, required=True)
parser.add_argument('-v', '--verbose', type=bool, default=False)
parser.add_argument('-o', '--outputpath', type=str, required=True)
args = parser.parse_args()
X = np.load(args.datapath, allow_pickle=True)
y = np.load(args.labels, allow_pickle=True)
# http://scikit-learn.sourceforge.net/stable/modules/generated/sklearn.svm.LinearSVC.html#sklearn.svm.LinearSVC
regr = make_pipeline(StandardScaler(),
LinearSVR(verbose=args.verbose, tol = 1e-5, max_iter = 30))
regr.fit(X,y)
np.savetxt(args.outputpath, regr.named_steps['linearsvr'].coef_, delimiter=",")
| 2.625 | 3 |
libs/fm_mission_planner/python/fm_mission_planner/target_viz.py | ethz-asl/mav_findmine | 3 | 12966 | <filename>libs/fm_mission_planner/python/fm_mission_planner/target_viz.py
#!/usr/bin/env python
# MIT License
#
# Copyright (c) 2020 <NAME>, ASL, ETH Zurich, Switzerland
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import rospy
import rospkg
import pandas as pd
import pymap3d as pm
import os
import numpy as np
from matplotlib import cm
from matplotlib import colors
from sensor_msgs.msg import NavSatFix
from visualization_msgs.msg import Marker, MarkerArray
# Load target list from CSV, receive home point from ROS msgs and publish target points to RVIZ.
class TargetViz():
def __init__(self):
self.df_targets = None
self.loadRosParameters()
self.subscribeToTopics()
self.advertiseTopics()
self.loadTargetTable()
self.main()
def loadRosParameters(self):
rospack = rospkg.RosPack()
default_target_path = os.path.join(rospack.get_path('fm_mission_planner'), 'cfg/target_table.csv')
self.target_path = rospy.get_param("~target_table", default_target_path)
self.frame_id = rospy.get_param("~frame_id", 'enu')
def subscribeToTopics(self):
self.home_point_sub = rospy.Subscriber('home_point', NavSatFix, self.homePointCallback)
def advertiseTopics(self):
self.target_pub = rospy.Publisher('~targets', MarkerArray, latch=True)
def homePointCallback(self, msg):
self.lat0 = msg.latitude
self.lon0 = msg.longitude
self.alt0 = msg.altitude
rospy.loginfo_throttle(10.0, 'Received home point lat0: ' + str(self.lat0) + ' lon0: ' + str(self.lon0) + ' alt0: ' + str(self.alt0))
if self.df_targets is not None and len(self.df_targets):
self.convertToENU()
self.createColors()
self.createMarkerArray()
self.target_pub.publish(self.marker_array)
def loadTargetTable(self):
self.df_targets = pd.read_csv(self.target_path, sep=",")
rospy.loginfo('Loading ' + str(len(self.df_targets)) + ' target points.')
def convertToENU(self):
lat = self.df_targets['lat'].values
lon = self.df_targets['lon'].values
alt = np.squeeze(np.zeros((len(self.df_targets), 1)))
print lat
print lat.size
if lat.size == 1 and lon.size == 1 and alt.size == 1:
lat = np.array([lat])
lon = np.array([lon])
alt = np.array([alt])
self.east = []
self.north = []
self.up = []
for i in range(0, len(self.df_targets)):
east, north, up = pm.geodetic2enu(lat[i], lon[i], alt[i], self.lat0, self.lon0, self.alt0)
self.east.append(east)
self.north.append(north)
self.up.append(up)
def createColors(self):
types = self.df_targets['type'].values
color_map = cm.get_cmap('Set1')
norm = colors.Normalize(vmin=min(types), vmax=max(types))
self.colors = color_map(norm(types))
def createMarkerArray(self):
self.marker_array = MarkerArray()
for i in range(0, len(self.df_targets)):
marker = Marker()
marker.type = marker.SPHERE
marker.action = marker.ADD
marker.scale.x = 0.4
marker.scale.y = 0.4
marker.scale.z = 0.4
marker.color.r = self.colors[i, 0]
marker.color.g = self.colors[i, 1]
marker.color.b = self.colors[i, 2]
marker.color.a = self.colors[i, 3]
marker.pose.position.x = self.east[i]
marker.pose.position.y = self.north[i]
marker.pose.position.z = self.up[i]
marker.pose.orientation.w = 1.0
marker.header.frame_id = self.frame_id
marker.id = i
self.marker_array.markers.append(marker)
def main(self):
rospy.spin()
| 1.765625 | 2 |
capirca/lib/gcp_hf.py | PhillSimonds/capirca | 0 | 12967 | <reponame>PhillSimonds/capirca
"""Google Cloud Hierarchical Firewall Generator.
Hierarchical Firewalls (HF) are represented in a SecurityPolicy GCP resouce.
"""
import copy
import re
from typing import Dict, Any
from absl import logging
from capirca.lib import gcp
from capirca.lib import nacaddr
class ExceededCostError(gcp.Error):
"""Raised when the total cost of a policy is above the maximum."""
class DifferentPolicyNameError(gcp.Error):
"""Raised when headers in the same policy have a different policy name."""
class ApiVersionSyntaxMap:
"""Defines the syntax changes between different API versions.
http://cloud/compute/docs/reference/rest/v1/firewallPolicies/addRule
http://cloud/compute/docs/reference/rest/beta/organizationSecurityPolicies/addRule
"""
SYNTAX_MAP = {
'beta': {
'display_name': 'displayName',
'dest_ip_range': 'destIpRanges',
'src_ip_range': 'srcIpRanges',
'layer_4_config': 'layer4Configs'
},
'ga': {
'display_name': 'shortName',
'dest_ip_range': 'destIpRanges',
'src_ip_range': 'srcIpRanges',
'layer_4_config': 'layer4Configs'
}
}
class Term(gcp.Term):
"""Used to create an individual term."""
ACTION_MAP = {'accept': 'allow', 'next': 'goto_next'}
_MAX_TERM_COMMENT_LENGTH = 64
_TARGET_RESOURCE_FORMAT = 'https://www.googleapis.com/compute/v1/projects/{}/global/networks/{}'
_TERM_ADDRESS_LIMIT = 256
_TERM_TARGET_RESOURCES_LIMIT = 256
_TERM_DESTINATION_PORTS_LIMIT = 256
def __init__(self,
term,
address_family='inet',
policy_inet_version='inet',
api_version='beta'):
super().__init__(term)
self.address_family = address_family
self.term = term
self.skip = False
self._ValidateTerm()
self.api_version = api_version
# This is to handle mixed, where the policy_inet_version is mixed,
# but the term inet version is either inet/inet6.
# This is only useful for term name and priority.
self.policy_inet_version = policy_inet_version
def _ValidateTerm(self):
if self.term.destination_tag or self.term.source_tag:
raise gcp.TermError('Hierarchical Firewall does not support tags')
if len(self.term.target_resources) > self._TERM_TARGET_RESOURCES_LIMIT:
raise gcp.TermError(
'Term: %s target_resources field contains %s resources. It should not contain more than "%s".'
% (self.term.name, str(len(
self.term.target_resources)), self._TERM_TARGET_RESOURCES_LIMIT))
for proj, vpc in self.term.target_resources:
if not gcp.IsProjectIDValid(proj):
raise gcp.TermError(
'Project ID "%s" must be 6 to 30 lowercase letters, digits, or hyphens.'
' It must start with a letter. Trailing hyphens are prohibited.' %
proj)
if not gcp.IsVPCNameValid(vpc):
raise gcp.TermError('VPC name "%s" must start with a lowercase letter '
'followed by up to 62 lowercase letters, numbers, '
'or hyphens, and cannot end with a hyphen.' % vpc)
if self.term.source_port:
raise gcp.TermError('Hierarchical firewall does not support source port '
'restrictions.')
if self.term.option:
raise gcp.TermError('Hierarchical firewall does not support the '
'TCP_ESTABLISHED option.')
if len(self.term.destination_port) > self._TERM_DESTINATION_PORTS_LIMIT:
raise gcp.TermError(
'Term: %s destination_port field contains %s ports. It should not contain more than "%s".'
% (self.term.name, str(len(
self.term.destination_port)), self._TERM_DESTINATION_PORTS_LIMIT))
# Since policy_inet_version is used to handle 'mixed'.
# We should error out if the individual term's inet version (address_family)
# is anything other than inet/inet6, since this should never happen
# naturally. Something has gone horribly wrong if you encounter this error.
if self.address_family == 'mixed':
raise gcp.TermError(
'Hierarchical firewall rule has incorrect inet_version for rule: %s' %
self.term.name)
def ConvertToDict(self, priority_index):
"""Converts term to dict representation of SecurityPolicy.Rule JSON format.
Takes all of the attributes associated with a term (match, action, etc) and
converts them into a dictionary which most closely represents
the SecurityPolicy.Rule JSON format.
Args:
priority_index: An integer priority value assigned to the term.
Returns:
A dict term.
"""
if self.skip:
return {}
rules = []
# Identify if this is inet6 processing for a term under a mixed policy.
mixed_policy_inet6_term = False
if self.policy_inet_version == 'mixed' and self.address_family == 'inet6':
mixed_policy_inet6_term = True
term_dict = {
'action': self.ACTION_MAP.get(self.term.action[0], self.term.action[0]),
'direction': self.term.direction,
'priority': priority_index
}
# Get the correct syntax for API versions.
src_ip_range = ApiVersionSyntaxMap.SYNTAX_MAP[
self.api_version]['src_ip_range']
dest_ip_range = ApiVersionSyntaxMap.SYNTAX_MAP[
self.api_version]['dest_ip_range']
layer_4_config = ApiVersionSyntaxMap.SYNTAX_MAP[
self.api_version]['layer_4_config']
target_resources = []
for proj, vpc in self.term.target_resources:
target_resources.append(self._TARGET_RESOURCE_FORMAT.format(proj, vpc))
if target_resources: # Only set when non-empty.
term_dict['targetResources'] = target_resources
term_dict['enableLogging'] = self._GetLoggingSetting()
# This combo provides ability to identify the rule.
term_name = self.term.name
if mixed_policy_inet6_term:
term_name = gcp.GetIpv6TermName(term_name)
raw_description = term_name + ': ' + ' '.join(self.term.comment)
term_dict['description'] = gcp.TruncateString(raw_description,
self._MAX_TERM_COMMENT_LENGTH)
filtered_protocols = []
for proto in self.term.protocol:
# ICMP filtering by inet_version
# Since each term has inet_version, 'mixed' is correctly processed here.
if proto == 'icmp' and self.address_family == 'inet6':
logging.warning(
'WARNING: Term %s is being rendered for inet6, ICMP '
'protocol will not be rendered.', self.term.name)
continue
if proto == 'icmpv6' and self.address_family == 'inet':
logging.warning(
'WARNING: Term %s is being rendered for inet, ICMPv6 '
'protocol will not be rendered.', self.term.name)
continue
if proto == 'igmp' and self.address_family == 'inet6':
logging.warning(
'WARNING: Term %s is being rendered for inet6, IGMP '
'protocol will not be rendered.', self.term.name)
continue
filtered_protocols.append(proto)
# If there is no protocol left after ICMP/IGMP filtering, drop this term.
# But only do this for terms that originally had protocols.
# Otherwise you end up dropping the default-deny.
if self.term.protocol and not filtered_protocols:
return {}
protocols_and_ports = []
if not self.term.protocol:
# Empty protocol list means any protocol, but any protocol in HF is
# represented as "all"
protocols_and_ports = [{'ipProtocol': 'all'}]
else:
for proto in filtered_protocols:
# If the protocol name is not supported, use the protocol number.
if proto not in self._ALLOW_PROTO_NAME:
proto = str(self.PROTO_MAP[proto])
logging.info('INFO: Term %s is being rendered using protocol number',
self.term.name)
proto_ports = {'ipProtocol': proto}
if self.term.destination_port:
ports = self._GetPorts()
if ports: # Only set when non-empty.
proto_ports['ports'] = ports
protocols_and_ports.append(proto_ports)
if self.api_version == 'ga':
term_dict['match'] = {layer_4_config: protocols_and_ports}
else:
term_dict['match'] = {'config': {layer_4_config: protocols_and_ports}}
# match needs a field called versionedExpr with value FIREWALL
# See documentation:
# https://cloud.google.com/compute/docs/reference/rest/beta/organizationSecurityPolicies/addRule
term_dict['match']['versionedExpr'] = 'FIREWALL'
ip_version = self.AF_MAP[self.address_family]
if ip_version == 4:
any_ip = [nacaddr.IP('0.0.0.0/0')]
else:
any_ip = [nacaddr.IPv6('::/0')]
if self.term.direction == 'EGRESS':
daddrs = self.term.GetAddressOfVersion('destination_address', ip_version)
# If the address got filtered out and is empty due to address family, we
# don't render the term. At this point of term processing, the direction
# has already been validated, so we can just log and return empty rule.
if self.term.destination_address and not daddrs:
logging.warning(
'WARNING: Term %s is not being rendered for %s, '
'because there are no addresses of that family.', self.term.name,
self.address_family)
return []
# This should only happen if there were no addresses set originally.
if not daddrs:
daddrs = any_ip
destination_address_chunks = [
daddrs[x:x + self._TERM_ADDRESS_LIMIT]
for x in range(0, len(daddrs), self._TERM_ADDRESS_LIMIT)
]
for daddr_chunk in destination_address_chunks:
rule = copy.deepcopy(term_dict)
if self.api_version == 'ga':
rule['match'][dest_ip_range] = [
daddr.with_prefixlen for daddr in daddr_chunk
]
else:
rule['match']['config'][dest_ip_range] = [
daddr.with_prefixlen for daddr in daddr_chunk
]
rule['priority'] = priority_index
rules.append(rule)
priority_index += 1
else:
saddrs = self.term.GetAddressOfVersion('source_address', ip_version)
# If the address got filtered out and is empty due to address family, we
# don't render the term. At this point of term processing, the direction
# has already been validated, so we can just log and return empty rule.
if self.term.source_address and not saddrs:
logging.warning(
'WARNING: Term %s is not being rendered for %s, '
'because there are no addresses of that family.', self.term.name,
self.address_family)
return []
# This should only happen if there were no addresses set originally.
if not saddrs:
saddrs = any_ip
source_address_chunks = [
saddrs[x:x + self._TERM_ADDRESS_LIMIT]
for x in range(0, len(saddrs), self._TERM_ADDRESS_LIMIT)
]
for saddr_chunk in source_address_chunks:
rule = copy.deepcopy(term_dict)
if self.api_version == 'ga':
rule['match'][src_ip_range] = [
saddr.with_prefixlen for saddr in saddr_chunk
]
else:
rule['match']['config'][src_ip_range] = [
saddr.with_prefixlen for saddr in saddr_chunk
]
rule['priority'] = priority_index
rules.append(rule)
priority_index += 1
return rules
def __str__(self):
return ''
class HierarchicalFirewall(gcp.GCP):
"""A GCP Hierarchical Firewall policy."""
SUFFIX = '.gcphf'
_ANY_IP = {
'inet': nacaddr.IP('0.0.0.0/0'),
'inet6': nacaddr.IP('::/0'),
}
_PLATFORM = 'gcp_hf'
_SUPPORTED_AF = frozenset(['inet', 'inet6', 'mixed'])
# Beta is the default API version. GA supports IPv6 (inet6/mixed).
_SUPPORTED_API_VERSION = frozenset(['beta', 'ga'])
_DEFAULT_MAXIMUM_COST = 100
def _BuildTokens(self):
"""Build supported tokens for platform.
Returns:
Tuple containing both supported tokens and sub tokens.
"""
supported_tokens, _ = super()._BuildTokens()
supported_tokens |= {
'destination_tag', 'expiration', 'source_tag', 'translated',
'target_resources', 'logging'
}
supported_tokens -= {
'destination_address_exclude', 'expiration', 'icmp_type',
'source_address_exclude', 'verbatim'
}
supported_sub_tokens = {'action': {'accept', 'deny', 'next'}}
return supported_tokens, supported_sub_tokens
def _TranslatePolicy(self, pol, exp_info):
"""Translates a Capirca policy into a HF-specific data structure.
Takes in a POL file, parses each term and populates the policy
dict. Each term in this list is a dictionary formatted according to
HF's rule API specification. Additionally, checks for its quota.
Args:
pol: A Policy() object representing a given POL file.
exp_info: An int that specifies number of weeks until policy expiry.
Raises:
ExceededCostError: Raised when the cost of a policy exceeds the default
maximum cost.
HeaderError: Raised when the header cannot be parsed or a header option is
invalid.
DifferentPolicyNameError: Raised when a header policy name differs from
other in the same policy.
"""
self.policies = []
policy = {
'rules': [],
'type': 'FIREWALL'
}
is_policy_modified = False
counter = 1
total_cost = 0
for header, terms in pol.filters:
if self._PLATFORM not in header.platforms:
continue
filter_options = header.FilterOptions(self._PLATFORM)
is_policy_modified = True
# Get term direction if set.
direction = 'INGRESS'
for i in self._GOOD_DIRECTION:
if i in filter_options:
direction = i
filter_options.remove(i)
# Get the address family if set.
address_family = 'inet'
for i in self._SUPPORTED_AF:
if i in filter_options:
address_family = i
filter_options.remove(i)
# Get the compute API version if set.
api_version = 'beta'
for i in self._SUPPORTED_API_VERSION:
if i in filter_options:
api_version = i
filter_options.remove(i)
break
# Find the default maximum cost of a policy, an integer, if specified.
max_cost = self._DEFAULT_MAXIMUM_COST
for opt in filter_options:
try:
max_cost = int(opt)
filter_options.remove(opt)
break
except ValueError:
continue
if max_cost > 65536:
raise gcp.HeaderError(
'Default maximum cost cannot be higher than 65536')
display_name = ApiVersionSyntaxMap.SYNTAX_MAP[api_version]['display_name']
# Get policy name and validate it to meet displayName requirements.
policy_name = header.FilterName(self._PLATFORM)
if not policy_name:
raise gcp.HeaderError(
'Policy name was not specified in header')
filter_options.remove(policy_name)
if len(policy_name) > 63:
raise gcp.HeaderError(
'Policy name "%s" is too long; the maximum number of characters '
'allowed is 63' % (policy_name))
if not bool(re.match('^[a-z]([-a-z0-9]*[a-z0-9])?$', policy_name)):
raise gcp.HeaderError(
'Invalid string for displayName, "%s"; the first character must be '
'a lowercase letter, and all following characters must be a dash, '
'lowercase letter, or digit, except the last character, which '
'cannot be a dash.' % (policy_name))
if display_name in policy and policy[display_name] != policy_name:
raise DifferentPolicyNameError(
'Policy names that are from the same policy are expected to be '
'equal, but %s is different to %s' %
(policy[display_name], policy_name))
policy[display_name] = policy_name
# If there are remaining options, they are unknown/unsupported options.
if filter_options:
raise gcp.HeaderError(
'Unsupported or unknown filter options %s in policy %s ' %
(str(filter_options), policy_name))
# Handle mixed for each indvidual term as inet and inet6.
# inet/inet6 are treated the same.
term_address_families = []
if address_family == 'mixed':
term_address_families = ['inet', 'inet6']
else:
term_address_families = [address_family]
for term in terms:
if term.stateless_reply:
continue
if gcp.IsDefaultDeny(term):
if direction == 'EGRESS':
if address_family != 'mixed':
# Default deny also gets processed as part of terms processing.
# The name and priority get updated there.
term.destination_address = [self._ANY_IP[address_family]]
else:
term.destination_address = [
self._ANY_IP['inet'], self._ANY_IP['inet6']
]
else:
if address_family != 'mixed':
term.source_address = [self._ANY_IP[address_family]]
else:
term.source_address = [
self._ANY_IP['inet'], self._ANY_IP['inet6']
]
term.name = self.FixTermLength(term.name)
term.direction = direction
# Only generate the term if it's for the appropriate platform
if term.platform:
if self._PLATFORM not in term.platform:
continue
if term.platform_exclude:
if self._PLATFORM in term.platform_exclude:
continue
for term_af in term_address_families:
rules = Term(
term,
address_family=term_af,
policy_inet_version=address_family,
api_version=api_version).ConvertToDict(priority_index=counter)
if not rules:
continue
for dict_term in rules:
total_cost += GetRuleTupleCount(dict_term, api_version)
if total_cost > max_cost:
raise ExceededCostError(
'Policy cost (%d) for %s reached the '
'maximum (%d)' %
(total_cost, policy[display_name], max_cost))
policy['rules'].append(dict_term)
counter += len(rules)
self.policies.append(policy)
# Do not render an empty rules if no policies have been evaluated.
if not is_policy_modified:
self.policies = []
if total_cost > 0:
logging.info('Policy %s quota cost: %d',
policy[display_name], total_cost)
def GetRuleTupleCount(dict_term: Dict[str, Any], api_version):
"""Calculate the tuple count of a rule in its dictionary form.
Quota is charged based on how complex the rules are rather than simply
limiting the number of rules.
The cost of a rule is the number of distinct protocol:port combinations plus
the number of IP addresses plus the number of targets.
Note: The goal of this function is not to determine if a rule is valid, but
to calculate its tuple count regardless of correctness.
Args:
dict_term: A dict object.
api_version: A string indicating the api version.
Returns:
int: The tuple count of the rule.
"""
layer4_count = 0
layer_4_config = ApiVersionSyntaxMap.SYNTAX_MAP[api_version]['layer_4_config']
dest_ip_range = ApiVersionSyntaxMap.SYNTAX_MAP[api_version]['dest_ip_range']
src_ip_range = ApiVersionSyntaxMap.SYNTAX_MAP[api_version]['src_ip_range']
targets_count = len(dict_term.get('targetResources', []))
if api_version == 'ga':
config = dict_term.get('match', {})
else:
config = dict_term.get('match', {}).get('config', {})
addresses_count = len(
config.get(dest_ip_range, []) + config.get(src_ip_range, []))
for l4config in config.get(layer_4_config, []):
for _ in l4config.get('ports', []):
layer4_count += 1
if l4config.get('ipProtocol'):
layer4_count += +1
return addresses_count + layer4_count + targets_count
| 2.15625 | 2 |
src/caracara/_kits.py | LaudateCorpus1/caracara | 1 | 12968 | """Kits class defines the available Toolboxes."""
from enum import Enum
class Kits(Enum):
"""Enumerator for toolbox class name lookups."""
HOSTS = "HostsToolbox"
RTR = "RTRToolbox"
| 2.296875 | 2 |
hackerrank/algorithms/time_conversion.py | ontana/mystudy | 0 | 12969 | #!/bin/python3
# https://www.hackerrank.com/challenges/time-conversion
import sys
def timeConversion(s):
# Complete this function
ar = s.split(':')
tail = ar[-1][-2:].lower()
addition_hours = 0
if (tail == 'pm' and ar[0] != '12') or (tail == 'am' and ar[0] == '12'):
addition_hours = 12
hh = int((int(ar[0]) + addition_hours) % 24)
new_time = "%02d:" % hh
new_time += ':'.join(ar[1:])
new_time = new_time[:-2]
return new_time
s = input().strip()
result = timeConversion(s)
print(result) | 4.125 | 4 |
supplychainpy/demand/economic_order_quantity.py | supplybi/supplychainpy | 5 | 12970 | <filename>supplychainpy/demand/economic_order_quantity.py
from decimal import Decimal, getcontext, ROUND_HALF_UP
from supplychainpy.demand import analyse_uncertain_demand
from supplychainpy.demand.eoq import minimum_variable_cost, economic_order_quantity
class EconomicOrderQuantity(analyse_uncertain_demand.UncertainDemand):
__economic_order_quantity = Decimal(0)
analyse_uncertain_demand.UncertainDemand.__reorder_cost = Decimal(0)
__holding_cost = Decimal(0)
__min_variable_cost = Decimal(0)
__reorder_quantity = Decimal(0)
__unit_cost = 0.00
@property
def minimum_variable_cost(self) -> Decimal:
return self.__min_variable_cost
@property
def economic_order_quantity(self) -> Decimal:
return self.__economic_order_quantity
def __init__(self, reorder_quantity: float, holding_cost: float, reorder_cost: float, average_orders: float,
unit_cost: float, total_orders: float):
getcontext().prec = 2
getcontext().rounding = ROUND_HALF_UP
self.__reorder_quantity = Decimal(reorder_quantity)
self.__holding_cost = holding_cost
self.__reorder_cost = reorder_cost
self.__unit_cost = unit_cost
self.__min_variable_cost = minimum_variable_cost(total_orders, reorder_cost, unit_cost, holding_cost)
self.__economic_order_quantity = economic_order_quantity(total_orders, reorder_cost, unit_cost,holding_cost, reorder_quantity)
def _minimum_variable_cost(self, average_orders, reorder_cost, unit_cost) -> Decimal:
getcontext().prec = 2
getcontext().rounding = ROUND_HALF_UP
holding_cost = self.__holding_cost
step = float(0.2)
previous_eoq_variable_cost = Decimal(0)
Decimal(reorder_cost)
order_factor = float(0.002)
vc = 0.00
counter = 0
order_size = 0
while previous_eoq_variable_cost >= Decimal(vc):
previous_eoq_variable_cost = Decimal(vc)
# reorder cost * average demand all divided by order size + (demand size * holding cost)
if counter < 1:
order_size = self._order_size(average_orders=average_orders, reorder_cost=reorder_cost,
unit_cost=unit_cost, holding_cost=holding_cost,
order_factor=order_factor)
vc = self._variable_cost(float(average_orders), float(reorder_cost), float(order_size), float(unit_cost), float(holding_cost))
order_size += int(float(order_size) * step)
if counter < 1:
previous_eoq_variable_cost = Decimal(vc)
while counter == 0:
counter += 1
return Decimal(previous_eoq_variable_cost)
# probably missing the addition
def _economic_order_quantity(self, average_orders: float, reorder_cost: float, unit_cost: float) -> Decimal:
getcontext().prec = 2
getcontext().rounding = ROUND_HALF_UP
holding_cost = self.__holding_cost
reorder_quantity = int(self.__reorder_quantity)
step = float(0.2)
previous_eoq_variable_cost = Decimal(0)
eoq_variable_cost = Decimal(0)
Decimal(reorder_cost)
order_factor = float(0.002)
vc = 0.00
rc = 0.00
hc = 0.00
s = 0.00
counter = 0
order_size = 0
diff = Decimal(0)
while previous_eoq_variable_cost >= Decimal(vc):
previous_eoq_variable_cost = Decimal(vc)
# reorder cost * average demand all divided by order size + (demand size * holding cost)
if counter < 1:
order_size = self._order_size(average_orders=average_orders, reorder_cost=reorder_cost,
unit_cost=unit_cost, holding_cost=holding_cost,
order_factor=order_factor)
vc = self._variable_cost(float(average_orders), float(reorder_cost), float(order_size), float(unit_cost), float(holding_cost))
order_size += int(float(order_size) * step)
if counter < 1:
previous_eoq_variable_cost = Decimal(vc)
while counter == 0:
counter += 1
return Decimal(order_size)
@staticmethod
def _variable_cost(average_orders: float, reorder_cost: float, order_size: float, unit_cost: float,
holding_cost: float) -> float:
rc = lambda x, y, z: (x * y) / z
hc = lambda x, y, z: x * y * z
vc = rc(float(average_orders), float(reorder_cost), float(order_size)) + hc(float(unit_cost),
float(order_size),
float(holding_cost))
return vc
@staticmethod
def _order_size(average_orders: float, reorder_cost: float, unit_cost: float, holding_cost: float,
order_factor: float) -> float:
order_size_calc = lambda x, y, z, i, j: int(
(float(x) * float(y) * 2) / (float(z) * float(i)) * float(j) * float(0.5))
order_size = order_size_calc(average_orders, reorder_cost, unit_cost, holding_cost, order_factor)
return order_size
| 2.625 | 3 |
tests/test_arraymisc.py | XinYangDong/mmcv-0.2.10 | 1 | 12971 | <gh_stars>1-10
from __future__ import division
import mmcv
import numpy as np
import pytest
def test_quantize():
arr = np.random.randn(10, 10)
levels = 20
qarr = mmcv.quantize(arr, -1, 1, levels)
assert qarr.shape == arr.shape
assert qarr.dtype == np.dtype('int64')
for i in range(arr.shape[0]):
for j in range(arr.shape[1]):
ref = min(levels - 1,
int(np.floor(10 * (1 + max(min(arr[i, j], 1), -1)))))
assert qarr[i, j] == ref
qarr = mmcv.quantize(arr, -1, 1, 20, dtype=np.uint8)
assert qarr.shape == arr.shape
assert qarr.dtype == np.dtype('uint8')
with pytest.raises(ValueError):
mmcv.quantize(arr, -1, 1, levels=0)
with pytest.raises(ValueError):
mmcv.quantize(arr, -1, 1, levels=10.0)
with pytest.raises(ValueError):
mmcv.quantize(arr, 2, 1, levels)
def test_dequantize():
levels = 20
qarr = np.random.randint(levels, size=(10, 10))
arr = mmcv.dequantize(qarr, -1, 1, levels)
assert arr.shape == qarr.shape
assert arr.dtype == np.dtype('float64')
for i in range(qarr.shape[0]):
for j in range(qarr.shape[1]):
assert arr[i, j] == (qarr[i, j] + 0.5) / 10 - 1
arr = mmcv.dequantize(qarr, -1, 1, levels, dtype=np.float32)
assert arr.shape == qarr.shape
assert arr.dtype == np.dtype('float32')
with pytest.raises(ValueError):
mmcv.dequantize(arr, -1, 1, levels=0)
with pytest.raises(ValueError):
mmcv.dequantize(arr, -1, 1, levels=10.0)
with pytest.raises(ValueError):
mmcv.dequantize(arr, 2, 1, levels)
def test_joint():
arr = np.random.randn(100, 100)
levels = 1000
qarr = mmcv.quantize(arr, -1, 1, levels)
recover = mmcv.dequantize(qarr, -1, 1, levels)
assert np.abs(recover[arr < -1] + 0.999).max() < 1e-6
assert np.abs(recover[arr > 1] - 0.999).max() < 1e-6
assert np.abs((recover - arr)[(arr >= -1) & (arr <= 1)]).max() <= 1e-3
arr = np.clip(np.random.randn(100) / 1000, -0.01, 0.01)
levels = 99
qarr = mmcv.quantize(arr, -1, 1, levels)
recover = mmcv.dequantize(qarr, -1, 1, levels)
assert np.all(recover == 0)
| 1.898438 | 2 |
train.py | jmribeiro/NumPyNeuralNetworkFromScratch | 1 | 12972 | <filename>train.py
#!/usr/bin/env python
"""
Author: <NAME>
"""
import argparse
import numpy as np
from model import FeedForwardNetwork
from utils import load_ocr_dataset, plot
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Hyperparams
parser.add_argument('-epochs', default=20, type=int, help="Number of training epochs.")
parser.add_argument('-num_layers', default=2, type=int, help="Number of hidden layers.")
parser.add_argument('-hidden_size', default=64, type=int, help="Number of units per hidden layer.")
parser.add_argument('-activation', default="relu", type=str, help="Activation function for the hidden layers.")
parser.add_argument('-learning_rate', default=0.1, type=float, help="Learning rate for SGD optimizer.")
parser.add_argument('-l2_penalty', default=0.0, type=float, help="L2 penalty for SGD optimizer.")
parser.add_argument('-batch_size', default=32, type=int, help="Number of datapoints per SGD step.")
# Misc.
parser.add_argument('-data', default='ocr_dataset/letter.data', help="Path to letter.data OCR dataset.")
parser.add_argument('-save_plot', action="store_true", help="Whether or not to save the generated accuracies plot.")
opt = parser.parse_args()
# ############ #
# Load Dataset #
# ############ #
print("Loading OCR Dataset", end="", flush=True)
data = load_ocr_dataset(opt.data)
X_train, y_train = data["train"]
X_val, y_val = data["dev"]
X_test, y_test = data["test"]
num_features = X_train.shape[1]
num_classes = np.unique(y_train).size
print(" [Done]", flush=True)
# ########### #
# Setup Model #
# ########### #
print("Deploying model", end="", flush=True)
model = FeedForwardNetwork(
num_features, num_classes,
opt.num_layers, opt.hidden_size, opt.activation,
opt.learning_rate, opt.l2_penalty, opt.batch_size
)
print(" [Done]", flush=True)
# ################ #
# Train & Evaluate #
# ################ #
print("Training model", flush=True)
validation_accuracies, final_test_accuracy = model.fit(X_train, y_train, X_val, y_val, X_test, y_test, opt.epochs)
# #### #
# Plot #
# #### #
print("Plotting", end="", flush=True)
plot(opt.epochs, validation_accuracies, opt.save_plot)
print(" [Done]\nGoodbye.", flush=True)
| 2.875 | 3 |
varify/samples/views.py | chop-dbhi/varify | 6 | 12973 | from guardian.shortcuts import get_objects_for_user
from django.http import Http404, HttpResponseRedirect
from django.db.models import Count
from django.core.urlresolvers import reverse
from django.shortcuts import render, get_object_or_404
from vdw.samples.models import Sample, Project, Batch, Cohort
from .forms import CohortForm
def registry(request):
projects = get_objects_for_user(request.user, 'samples.view_project')
batch_count = Count('batches', distinct=True)
sample_count = Count('samples', distinct=True)
# Distinct count on batch necessary since the join inflates the numbers
projects = projects.annotate(sample_count=sample_count,
batch_count=batch_count)
staged_samples = \
Sample.objects.filter(published=False, project__in=projects) \
.select_related('batch', 'project')
return render(request, 'samples/registry.html', {
'projects': list(projects),
'staged_samples': list(staged_samples),
})
def project_registry(request, pk):
projects = get_objects_for_user(request.user, 'samples.view_project')
batch_count = Count('batches', distinct=True)
sample_count = Count('samples', distinct=True)
# Distinct count on batch necessary since the join inflates the numbers
try:
project = projects.annotate(sample_count=sample_count,
batch_count=batch_count).get(pk=pk)
except Project.DoesNotExist:
raise Http404
batches = Batch.objects.filter(project=project) \
.annotate(sample_count=Count('samples'))
return render(request, 'samples/project.html', {
'project': project,
'batches': batches,
})
def batch_registry(request, pk):
projects = get_objects_for_user(request.user, 'samples.view_project')
sample_count = Count('samples', distinct=True)
try:
batch = Batch.objects.annotate(sample_count=sample_count) \
.filter(project__in=projects).select_related('project').get(pk=pk)
except Batch.DoesNotExist:
raise Http404
samples = Sample.objects.filter(batch=batch)
return render(request, 'samples/batch.html', {
'batch': batch,
'project': batch.project,
'samples': samples,
})
def sample_registry(request, pk):
projects = get_objects_for_user(request.user, 'samples.view_project')
try:
sample = Sample.objects.filter(project__in=projects) \
.select_related('batch', 'project').get(pk=pk)
except Sample.DoesNotExist:
raise Http404
return render(request, 'samples/sample.html', {
'sample': sample,
'batch': sample.batch,
'project': sample.project,
})
def cohort_form(request, pk=None):
if request.user.has_perm('samples.change_cohort'):
cohorts = Cohort.objects.all()
cohort = get_object_or_404(Cohort, pk=pk) if pk else None
else:
cohorts = Cohort.objects.filter(user=request.user)
cohort = \
get_object_or_404(Cohort, pk=pk, user=request.user) if pk else None
# Apply permissions..
samples = Sample.objects.all()
if request.method == 'POST':
form = CohortForm(samples, data=request.POST, instance=cohort,
initial={'user': request.user})
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('cohorts'))
else:
form = CohortForm(samples, instance=cohort)
return render(request, 'samples/cohort-form.html', {
'form': form,
'cohort': cohort,
'cohorts': cohorts,
})
def cohort_delete(request, pk):
if request.user.has_perm('samples.change_cohort'):
cohort = get_object_or_404(Cohort, pk=pk)
else:
cohort = get_object_or_404(Cohort, pk=pk, user=request.user)
cohort.delete()
return HttpResponseRedirect(reverse('cohorts'))
| 2.015625 | 2 |
dumbai.py | CapKenway/dumbai | 0 | 12974 | import sys
from pprint import pprint
import os
#--------------------------------------------------------------------------#
class CsPP():
def __init__(self, domains):
self.domains = domains
self.maindict = {}
self.keyitems = []
pass
def check_if(self):
emptylist = []
for domainkey in list(self.domains.keys()):
if not domainkey in list(self.maindict.keys()):
emptylist.append(domainkey)
for listitem in emptylist:
self.maindict[listitem] = list(self.domains.values())[1]
pass
def not_belonging(self, key, lister):
templist = []
maindomain = self.domains[key]
for item in maindomain:
if not item in lister:
templist.append(item)
self.maindict[key] = templist
pass
def belonging(self, key, lister):
self.maindict.__setitem__(key, lister)
pass
def get_one_up(self, values):
self.keyitems.insert(self.keyitems.index(values[0]), values[1])
def get_one_down(self, values):
self.keyitems.reverse()
self.keyitems.insert(self.keyitems.index(values[1]), values[0])
self.keyitems.reverse()
def not_working_together(self, first, second):
firstlist = self.maindict[first]
secondlist = self.maindict[second]
for item in firstlist:
if item in secondlist:
firstlist.remove(item)
self.maindict[first] = firstlist
def backtrack(self, maindict, what_want = '', conditions = [], starter = ''):
csp_back = CsPP_Backend(domains = maindict, what_want = what_want, conditions = conditions, starter = starter)
return csp_back._backtrack()
pass
def left_to_right(self, maindict, path):
to_do = []
pathkeys = list(path.keys())
pathvalues = list(path.values())
mainkeys = list(maindict.keys())
mainvalues = list(maindict.values())
keylist = []
for key, values in zip(pathkeys, pathvalues):
keylist.append(key)
if len(values) > 1:
to_do.append(values[1:])
if len(to_do) != 0:
for i in range(0, len(to_do)):
popped = to_do.pop(i)
keylist.append(popped)
for item in keylist:
if keylist.count(item) > 1:
keylist.remove(item)
if type(item) == list:
keylist.remove(item)
valuestodict = []
for key in keylist:
if type(key) != list:
valuestodict.append(maindict[key])
else:
keylist.remove(key)
returndict = dict((key, values) for key, values in zip(keylist, valuestodict))
forprune = CsPP_Backend()
pruned = forprune._prune(returndict)
return pruned
def right_to_left(self, maindict, path):
tempkeys = list(path.keys())
tempvalues = list(path.values())
tempvalues.reverse()
tempkeys.reverse()
i = 0
flag = False
templist = []
removeditems = []
indexes = []
i = 0
templist.append(tempkeys[0])
for key in tempkeys:
for n in range(i, len(tempvalues)):
flag = False
for u in range(0, len(tempvalues[n])):
if len(tempvalues)!= 0 and key == tempvalues[n][u]:
i = n
templist.append(tempkeys[n])
flag = True
break
if flag:
break
for item in templist:
if templist.count(item) > 1:
templist.remove(item)
dictvalues = []
for tempval in templist:
dictvalues.append(maindict[tempval])
availdict = dict((key, val) for key, val in zip(templist, dictvalues))
removedvalues = []
for key in list(maindict.keys()):
if not key in list(availdict.keys()):
removeditems.append(key)
removedvalues.append(maindict[key])
removeddict = dict((key, val) for key, val in zip(removeditems, removedvalues))
forprune = CsPP_Backend()
pruned = forprune._prune(availdict)
for key in list(removeddict.keys()):
pruned[key] = []
return pruned
pass
#--------------------------------------------------------------------------#
class CsPP_Backend():
def __init__(self, *args, **kwargs):
self.domains = kwargs.get('domains')
self.conditions = kwargs.get('conditions')
self.what_want = kwargs.get('what_want')
self.starter = kwargs.get('starter')
pass
def _backtrack(self):
if self.what_want == 'mrv':
return self._highest_constraint(self.domains, self.starter)
elif self.what_want == 'lcv':
return self._minimum_constraint(self.domains, self.starter)
else:
return self.domains
def _minimum_constraint(self, domains, starter = ''):
low_constraint = None
if starter != '':
yet_lowest = len(domains[starter])
else:
yet_lowest = len(domains[list(domains.keys())[0]])
for key, val in zip(list(domains.keys()), list(domains.values())):
if yet_lowest > len(val):
yet_lowest = len(val)
low_constraint = key
return low_constraint
pass
def _highest_constraint(self, domains, starter = ''):
high_constraint = None
if starter != '':
yet_highest = len(domains[starter])
else:
yet_highest = len(domains[list(domains.keys())[0]])
for key, val in zip(list(domains.keys()), list(domains.values())):
if yet_highest < len(val):
yet_highest = len(val)
high_constraint = key
return high_constraint
pass
def _prune(self, domains):
emptydict = {}
pruneditems = []
for key, value in zip(list(domains.keys()), list(domains.values())):
for val in value:
if val in pruneditems:
continue
emptydict.__setitem__(key, val)
pruneditems.append(val)
break
for key in list(domains.keys()):
if not key in list(emptydict.keys()):
emptydict.__setitem__(key, [])
return emptydict
#--------------------------------------------------------------------------# | 2.90625 | 3 |
napari_plugin_test/widgets/warp_image_volume.py | krentzd/napari-test | 0 | 12975 | #!/usr/bin/env python3
# coding: utf-8
# Adapted from: https://github.com/zpincus/celltool/blob/master/celltool/numerics/image_warp.py
from scipy import ndimage
import numpy as np
from probreg import bcpd
import tifffile
import matplotlib.pyplot as plt
import napari
from magicgui import magic_factory, widgets
from napari.types import PointsData, ImageData
from typing_extensions import Annotated
def _make_inverse_warp(from_points, to_points, output_region, approximate_grid):
x_min, y_min, z_min, x_max, y_max, z_max = output_region
if approximate_grid is None: approximate_grid = 1
x_steps = (x_max - x_min) // approximate_grid
y_steps = (y_max - y_min) // approximate_grid
z_steps = (z_max - z_min) // approximate_grid
x, y, z = np.mgrid[x_min:x_max:x_steps*1j, y_min:y_max:y_steps*1j, z_min:z_max:z_steps*1j]
transform = _make_warp(to_points, from_points, x, y, z)
if approximate_grid != 1:
# linearly interpolate the zoomed transform grid
new_x, new_y, new_z = np.mgrid[x_min:x_max+1, y_min:y_max+1, z_min:z_max+1]
x_fracs, x_indices = np.modf((x_steps-1)*(new_x-x_min)/float(x_max-x_min))
y_fracs, y_indices = np.modf((y_steps-1)*(new_y-y_min)/float(y_max-y_min))
z_fracs, z_indices = np.modf((z_steps-1)*(new_z-z_min)/float(z_max-z_min))
x_indices = x_indices.astype(int)
y_indices = y_indices.astype(int)
z_indices = z_indices.astype(int)
x1 = 1 - x_fracs
y1 = 1 - y_fracs
z1 = 1 - z_fracs
ix1 = (x_indices+1).clip(0, x_steps-1)
iy1 = (y_indices+1).clip(0, y_steps-1)
iz1 = (z_indices+1).clip(0, z_steps-1)
transform_x = _trilinear_interpolation(0, transform, x1, y1, z1, x_fracs, y_fracs, z_fracs, x_indices, y_indices, z_indices, ix1, iy1, iz1)
transform_y = _trilinear_interpolation(1, transform, x1, y1, z1, x_fracs, y_fracs, z_fracs, x_indices, y_indices, z_indices, ix1, iy1, iz1)
transform_z = _trilinear_interpolation(2, transform, x1, y1, z1, x_fracs, y_fracs, z_fracs, x_indices, y_indices, z_indices, ix1, iy1, iz1)
transform = [transform_x, transform_y, transform_z]
return transform
def _trilinear_interpolation(d, t, x0, y0, z0, x1, y1, z1, ix0, iy0, iz0, ix1, iy1, iz1):
t000 = t[d][(ix0, iy0, iz0)]
t001 = t[d][(ix0, iy0, iz1)]
t010 = t[d][(ix0, iy1, iz0)]
t100 = t[d][(ix1, iy0, iz0)]
t011 = t[d][(ix0, iy1, iz1)]
t101 = t[d][(ix1, iy0, iz1)]
t110 = t[d][(ix1, iy1, iz0)]
t111 = t[d][(ix1, iy1, iz1)]
return t000*x0*y0*z0 + t001*x0*y0*z1 + t010*x0*y1*z0 + t100*x1*y0*z0 + t011*x0*y1*z1 + t101*x1*y0*z1 + t110*x1*y1*z0 + t111*x1*y1*z1
def _U(x):
_small = 1e-100
return (x**2) * np.where(x<_small, 0, np.log(x))
def _interpoint_distances(points):
xd = np.subtract.outer(points[:,0], points[:,0])
yd = np.subtract.outer(points[:,1], points[:,1])
zd = np.subtract.outer(points[:,2], points[:,2])
return np.sqrt(xd**2 + yd**2 + zd**2)
def _make_L_matrix(points):
n = len(points)
K = _U(_interpoint_distances(points))
P = np.ones((n, 4))
P[:,1:] = points
O = np.zeros((4, 4))
L = np.asarray(np.bmat([[K, P],[P.transpose(), O]]))
return L
def _calculate_f(coeffs, points, x, y, z):
w = coeffs[:-3]
a1, ax, ay, az = coeffs[-4:]
summation = np.zeros(x.shape)
for wi, Pi in zip(w, points):
summation += wi * _U(np.sqrt((x-Pi[0])**2 + (y-Pi[1])**2 + (z-Pi[2])**2))
return a1 + ax*x + ay*y +az*z + summation
def _make_warp(from_points, to_points, x_vals, y_vals, z_vals):
from_points, to_points = np.asarray(from_points), np.asarray(to_points)
err = np.seterr(divide='ignore')
L = _make_L_matrix(from_points)
V = np.resize(to_points, (len(to_points)+4, 3))
V[-3:, :] = 0
coeffs = np.dot(np.linalg.pinv(L), V)
print('L, V, coeffs', L.shape, V.shape, coeffs.shape)
x_warp = _calculate_f(coeffs[:,0], from_points, x_vals, y_vals, z_vals)
y_warp = _calculate_f(coeffs[:,1], from_points, x_vals, y_vals, z_vals)
z_warp = _calculate_f(coeffs[:,2], from_points, x_vals, y_vals, z_vals)
np.seterr(**err)
return [x_warp, y_warp, z_warp]
@magic_factory
def make_image_warping(
viewer: "napari.viewer.Viewer",
moving_image: ImageData,
fixed_image: ImageData,
moving_points: PointsData,
transformed_points: PointsData,
interpolation_order: Annotated[int, {"min": 0, "max": 10, "step": 1}]=1,
approximate_grid: Annotated[int, {"min": 1, "max": 10, "step": 1}]=1
):
from napari.qt import thread_worker
pbar = widgets.ProgressBar()
pbar.range = (0, 0) # unknown duration
make_image_warping.insert(0, pbar) # add progress bar to the top of widget
# this function will be called after we return
def _add_data(return_value, self=make_image_warping):
data, kwargs = return_value
viewer.add_image(data, **kwargs)
self.pop(0).hide() # remove the progress bar
@thread_worker(connect={"returned": _add_data})
def _warp_images(from_points, to_points, image, output_region, interpolation_order=5, approximate_grid=10):
print('Entered warp_images')
transform = _make_inverse_warp(from_points, to_points, output_region, approximate_grid)
warped_image = ndimage.map_coordinates(np.asarray(image), transform, order=interpolation_order)
kwargs = dict(
name='warped_image'
)
return (warped_image, kwargs)
print('Warping image volume')
assert len(moving_points) == len(transformed_points), 'Moving and transformed points must be of same length.'
output_region = (0, 0, 0, int(fixed_image.shape[0] / 1), int(fixed_image.shape[1] / 1), int(fixed_image.shape[2] / 1))
print(output_region)
_warp_images(from_points=moving_points,
to_points=transformed_points,
image=moving_image,
output_region=output_region,
interpolation_order=interpolation_order,
approximate_grid=approximate_grid)
| 2.265625 | 2 |
main/tests/test_celery.py | OpenHumans/oh-23andme-source | 0 | 12976 | from django.test import TestCase, RequestFactory
import vcr
from django.conf import settings
from django.core.management import call_command
from open_humans.models import OpenHumansMember
from main.celery import read_reference, clean_raw_23andme
from main.celery_helper import vcf_header
import os
import tempfile
import requests
import requests_mock
from main.celery import process_file
class ParsingTestCase(TestCase):
"""
test that files are parsed correctly
"""
def setUp(self):
"""
Set up the app for following tests
"""
settings.DEBUG = True
call_command('init_proj_config')
self.factory = RequestFactory()
data = {"access_token": '<PASSWORD>',
"refresh_token": '<PASSWORD>',
"expires_in": 36000}
self.oh_member = OpenHumansMember.create(oh_id='12345678',
data=data)
self.oh_member.save()
self.user = self.oh_member.user
self.user.set_password('<PASSWORD>')
self.user.save()
def test_read_reference(self):
"""
Test function to read the reference file.
"""
REF_23ANDME_FILE = os.path.join(os.path.dirname(__file__),
'fixtures/test_reference.txt')
ref = read_reference(REF_23ANDME_FILE)
self.assertEqual(ref, {'1': {'82154': 'A', '752566': 'G'}})
def test_vcf_header(self):
"""
Test function to create a VCF header
"""
hd = vcf_header(
source='23andme',
reference='http://example.com',
format_info=['<ID=GT,Number=1,Type=String,Description="GT">'])
self.assertEqual(len(hd), 6)
expected_header_fields = ["##fileformat",
"##fileDate",
'##source',
'##reference',
'##FORMAT',
'#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER' +
'\tINFO\tFORMAT\t23ANDME_DATA']
self.assertEqual([i.split("=")[0] for i in hd], expected_header_fields)
def test_23andme_cleaning(self):
"""
Test that cleanup works as expected
"""
with requests_mock.Mocker() as m:
get_url = 'http://example.com/23andme_file.txt'
closed_input_file = os.path.join(os.path.dirname(__file__),
'fixtures/23andme_invalid.txt')
fhandle = open(closed_input_file, "rb")
content = fhandle.read()
m.register_uri('GET',
get_url,
content=content,
status_code=200)
tf_in = tempfile.NamedTemporaryFile(suffix=".txt")
tf_in.write(requests.get(get_url).content)
tf_in.flush()
cleaned_input = clean_raw_23andme(tf_in)
cleaned_input.seek(0)
lines = cleaned_input.read()
self.assertEqual(lines.find('<NAME>'), -1)
self.assertNotEqual(lines.find('data file generated'), -1)
@vcr.use_cassette('main/tests/fixtures/process_file.yaml',
record_mode='none')
def test_process_file(self):
"""
test process_file celery task
"""
member = {"project_member_id": "1234"}
dfile = {'id': 34567,
'basename': '23andme_valid.txt',
'created': '2018-03-30T00:09:36.563486Z',
'download_url': 'https://myawslink.com/member-files/direct-sharing-1337/1234/23andme_valid.txt?Signature=nope&Expires=1522390374&AWSAccessKeyId=nope',
'metadata': {'tags': ['bar'], 'description': 'foo'},
'source': 'direct-sharing-1337'}
process_file(dfile, 'myaccesstoken', member, dfile['metadata'])
@vcr.use_cassette('main/tests/fixtures/process_file_bz2.yaml',
record_mode='none')
def test_process_file_bz2(self):
"""
test process_file celery task
"""
member = {"project_member_id": "1234"}
dfile = {'id': 34567,
'basename': '23andme_valid.txt.bz2',
'created': '2018-03-30T00:09:36.563486Z',
'download_url': 'https://myawslink.com/member-files/direct-sharing-1337/1234/23andme_valid.txt.bz2?Signature=nope&Expires=1522390374&AWSAccessKeyId=nope',
'metadata': {'tags': ['bar'], 'description': 'foo'},
'source': 'direct-sharing-1337'}
process_file(dfile, 'myaccesstoken', member, dfile['metadata'])
@vcr.use_cassette('main/tests/fixtures/process_file_gz.yaml',
record_mode='none')
def test_process_file_gz(self):
"""
test process_file celery task
"""
member = {"project_member_id": "1234"}
dfile = {'id': 34567,
'basename': '23andme_valid.txt.gz',
'created': '2018-03-30T00:09:36.563486Z',
'download_url': 'https://myawslink.com/member-files/direct-sharing-1337/1234/23andme_valid.txt.gz?Signature=nope&Expires=1522390374&AWSAccessKeyId=nope',
'metadata': {'tags': ['bar'], 'description': 'foo'},
'source': 'direct-sharing-1337'}
process_file(dfile, 'myaccesstoken', member, dfile['metadata'])
@vcr.use_cassette('main/tests/fixtures/process_file_zip.yaml',
record_mode='none')
def test_process_file_zip(self):
"""
test process_file celery task
"""
member = {"project_member_id": "1234"}
dfile = {'id': 34567,
'basename': '23andme_valid.zip',
'created': '2018-03-30T00:09:36.563486Z',
'download_url': 'https://myawslink.com/member-files/direct-sharing-1337/1234/23andme_valid.zip?Signature=nope&Expires=1522390374&AWSAccessKeyId=nope',
'metadata': {'tags': ['bar'], 'description': 'foo'},
'source': 'direct-sharing-1337'}
process_file(dfile, 'myaccesstoken', member, dfile['metadata'])
| 2.34375 | 2 |
apps/tracking/admin.py | Codeidea/budget-tracker | 0 | 12977 | <reponame>Codeidea/budget-tracker
from django.contrib import admin
from .models import LogCategory, BudgetLog
# Register your models here.
admin.site.register(LogCategory)
admin.site.register(BudgetLog) | 1.210938 | 1 |
nn_model/embedding_layer.py | onlyrico/mling_sdgms | 4 | 12978 | # -*- coding: UTF-8 -*-
#!/usr/bin/python3
"""
Embedding Layer
"""
#************************************************************
# Imported Libraries
#************************************************************
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from gensim.models import KeyedVectors
import pdb
class EmbeddingLayer(nn.Module):
def __init__(self, params, vocab, pretrained_emb_path = None):
super(EmbeddingLayer, self).__init__()
# embedding layer
self.lang = vocab.lang
self.vocab = vocab
self.emb_dim = params.emb_dim
self.embeddings = nn.Embedding(vocab.vocab_size, self.emb_dim, padding_idx = vocab.PAD_ID)
self.init_emb(self.embeddings, pretrained_emb_path, vocab)
# ijcai dropout, p = 0.2
self.emb_do = nn.Dropout(p = params.emb_do)
self.use_cuda = params.cuda
def init_emb(self, embeddings, pretrained_emb_path, vocab):
if pretrained_emb_path is not None:
self.load_pretrained(pretrained_emb_path, embeddings, vocab)
else:
"""
Initialize embedding weight like word2vec.
The u_embedding is a uniform distribution in [-0.5/emb_dim, 0.5/emb_dim],
"""
initrange = 0.5 / self.emb_dim
embeddings.weight.data.uniform_(-initrange, initrange)
embeddings.weight.data[vocab.PAD_ID] = 0
def load_pretrained(self, pretrained_emb_path, embeddings, vocab):
print('loading {} embeddings for {}'.format(pretrained_emb_path, self.lang))
try:
pre_emb = KeyedVectors.load_word2vec_format(pretrained_emb_path, binary = False)
except:
print('Did not found {} embeddings for {}'.format(pretrained_emb_path, self.lang))
return
# ignore only pad
for i in range(1, len(vocab.idx2word)):
try:
embeddings.weight.data[i] = torch.from_numpy(pre_emb[vocab.idx2word[i]])
except:
continue
def forward(self, batch_input):
input_word_embs = self.embeddings(batch_input)
input_word_embs = self.emb_do(input_word_embs)
return input_word_embs
| 2.65625 | 3 |
grpclib/server.py | panaetov/grpclib | 0 | 12979 | import abc
import socket
import logging
import asyncio
import warnings
import h2.config
import h2.exceptions
from .utils import DeadlineWrapper
from .const import Status
from .stream import send_message, recv_message
from .stream import StreamIterator
from .metadata import Metadata, Deadline
from .protocol import H2Protocol, AbstractHandler
from .exceptions import GRPCError, ProtocolError
from .encoding.base import GRPC_CONTENT_TYPE
from .encoding.proto import ProtoCodec
log = logging.getLogger(__name__)
class Stream(StreamIterator):
"""
Represents gRPC method call – HTTP/2 request/stream, and everything you
need to communicate with client in order to handle this request.
As you can see, every method handler accepts single positional argument -
stream:
.. code-block:: python
async def MakeLatte(self, stream: grpclib.server.Stream):
task: cafe_pb2.LatteOrder = await stream.recv_message()
...
await stream.send_message(empty_pb2.Empty())
This is true for every gRPC method type.
"""
# stream state
_send_initial_metadata_done = False
_send_message_count = 0
_send_trailing_metadata_done = False
_cancel_done = False
def __init__(self, stream, cardinality, codec, recv_type, send_type,
*, metadata, deadline=None):
self._stream = stream
self._cardinality = cardinality
self._codec = codec
self._recv_type = recv_type
self._send_type = send_type
self.metadata = metadata
self.deadline = deadline
async def recv_message(self):
"""Coroutine to receive incoming message from the client.
If client sends UNARY request, then you can call this coroutine
only once. If client sends STREAM request, then you should call this
coroutine several times, until it returns None. To simplify your code
in this case, :py:class:`Stream` class implements async iteration
protocol, so you can use it like this:
.. code-block:: python
async for massage in stream:
do_smth_with(message)
or even like this:
.. code-block:: python
messages = [msg async for msg in stream]
HTTP/2 has flow control mechanism, so server will acknowledge received
DATA frames as a message only after user consumes this coroutine.
:returns: message
"""
return await recv_message(self._stream, self._codec, self._recv_type)
async def send_initial_metadata(self):
"""Coroutine to send headers with initial metadata to the client.
In gRPC you can send initial metadata as soon as possible, because
gRPC doesn't use `:status` pseudo header to indicate success or failure
of the current request. gRPC uses trailers for this purpose, and
trailers are sent during :py:meth:`send_trailing_metadata` call, which
should be called in the end.
.. note:: This coroutine will be called implicitly during first
:py:meth:`send_message` coroutine call, if not called before
explicitly.
"""
if self._send_initial_metadata_done:
raise ProtocolError('Initial metadata was already sent')
await self._stream.send_headers([
(':status', '200'),
('content-type', (GRPC_CONTENT_TYPE + '+'
+ self._codec.__content_subtype__)),
])
self._send_initial_metadata_done = True
async def send_message(self, message, **kwargs):
"""Coroutine to send message to the client.
If server sends UNARY response, then you should call this coroutine only
once. If server sends STREAM response, then you can call this coroutine
as many times as you need.
:param message: message object
"""
if 'end' in kwargs:
warnings.warn('"end" argument is deprecated, use '
'"stream.send_trailing_metadata" explicitly',
stacklevel=2)
end = kwargs.pop('end', False)
assert not kwargs, kwargs
if not self._send_initial_metadata_done:
await self.send_initial_metadata()
if not self._cardinality.server_streaming:
if self._send_message_count:
raise ProtocolError('Server should send exactly one message '
'in response')
await send_message(self._stream, self._codec, message, self._send_type)
self._send_message_count += 1
if end:
await self.send_trailing_metadata()
async def send_trailing_metadata(self, *, status=Status.OK,
status_message=None):
"""Coroutine to send trailers with trailing metadata to the client.
This coroutine allows sending trailers-only responses, in case of some
failure conditions during handling current request, i.e. when
``status is not OK``.
.. note:: This coroutine will be called implicitly at exit from
request handler, with appropriate status code, if not called
explicitly during handler execution.
:param status: resulting status of this coroutine call
:param status_message: description for a status
"""
if self._send_trailing_metadata_done:
raise ProtocolError('Trailing metadata was already sent')
if not self._send_message_count and status is Status.OK:
raise ProtocolError('{!r} requires non-empty response'
.format(status))
if self._send_initial_metadata_done:
headers = []
else:
# trailers-only response
headers = [(':status', '200')]
headers.append(('grpc-status', str(status.value)))
if status_message is not None:
headers.append(('grpc-message', status_message))
await self._stream.send_headers(headers, end_stream=True)
self._send_trailing_metadata_done = True
if status != Status.OK and self._stream.closable:
self._stream.reset_nowait()
async def cancel(self):
"""Coroutine to cancel this request/stream.
Server will send RST_STREAM frame to the client, so it will be
explicitly informed that there is nothing to expect from the server
regarding this request/stream.
"""
if self._cancel_done:
raise ProtocolError('Stream was already cancelled')
await self._stream.reset() # TODO: specify error code
self._cancel_done = True
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
if (
self._send_trailing_metadata_done
or self._cancel_done
or self._stream._transport.is_closing()
):
# to suppress exception propagation
return True
if exc_val is not None:
if isinstance(exc_val, GRPCError):
status = exc_val.status
status_message = exc_val.message
elif isinstance(exc_val, Exception):
status = Status.UNKNOWN
status_message = 'Internal Server Error'
else:
# propagate exception
return
elif not self._send_message_count:
status = Status.UNKNOWN
status_message = 'Empty response'
else:
status = Status.OK
status_message = None
try:
await self.send_trailing_metadata(status=status,
status_message=status_message)
except h2.exceptions.StreamClosedError:
pass
# to suppress exception propagation
return True
async def request_handler(mapping, _stream, headers, codec, release_stream):
try:
headers_map = dict(headers)
if headers_map[':method'] != 'POST':
await _stream.send_headers([
(':status', '405'),
], end_stream=True)
if _stream.closable:
_stream.reset_nowait()
return
content_type = headers_map.get('content-type')
if content_type is None:
await _stream.send_headers([
(':status', '415'),
('grpc-status', str(Status.UNKNOWN.value)),
('grpc-message', 'Missing content-type header'),
], end_stream=True)
if _stream.closable:
_stream.reset_nowait()
return
base_content_type, _, sub_type = content_type.partition('+')
sub_type = sub_type or ProtoCodec.__content_subtype__
if (
base_content_type != GRPC_CONTENT_TYPE
or sub_type != codec.__content_subtype__
):
await _stream.send_headers([
(':status', '415'),
('grpc-status', str(Status.UNKNOWN.value)),
('grpc-message', 'Unacceptable content-type header'),
], end_stream=True)
if _stream.closable:
_stream.reset_nowait()
return
if headers_map.get('te') != 'trailers':
await _stream.send_headers([
(':status', '400'),
('grpc-status', str(Status.UNKNOWN.value)),
('grpc-message', 'Required "te: trailers" header is missing'),
], end_stream=True)
if _stream.closable:
_stream.reset_nowait()
return
h2_path = headers_map[':path']
method = mapping.get(h2_path)
if method is None:
await _stream.send_headers([
(':status', '200'),
('grpc-status', str(Status.UNIMPLEMENTED.value)),
('grpc-message', 'Method not found'),
], end_stream=True)
if _stream.closable:
_stream.reset_nowait()
return
metadata = Metadata.from_headers(headers)
try:
deadline = Deadline.from_metadata(metadata)
except ValueError:
await _stream.send_headers([
(':status', '200'),
('grpc-status', str(Status.UNKNOWN.value)),
('grpc-message', 'Invalid grpc-timeout header'),
], end_stream=True)
if _stream.closable:
_stream.reset_nowait()
return
async with Stream(_stream, method.cardinality, codec,
method.request_type, method.reply_type,
metadata=metadata, deadline=deadline) as stream:
deadline_wrapper = None
try:
if deadline:
deadline_wrapper = DeadlineWrapper()
with deadline_wrapper.start(deadline):
with deadline_wrapper:
await method.func(stream)
else:
await method.func(stream)
except asyncio.TimeoutError:
if deadline_wrapper and deadline_wrapper.cancelled:
log.exception('Deadline exceeded')
raise GRPCError(Status.DEADLINE_EXCEEDED)
else:
log.exception('Timeout occurred')
raise
except asyncio.CancelledError:
log.exception('Request was cancelled')
raise
except Exception:
log.exception('Application error')
raise
except Exception:
log.exception('Server error')
finally:
release_stream()
class _GC(abc.ABC):
_gc_counter = 0
@property
@abc.abstractmethod
def __gc_interval__(self):
raise NotImplementedError
@abc.abstractmethod
def __gc_collect__(self):
pass
def __gc_step__(self):
self._gc_counter += 1
if not (self._gc_counter % self.__gc_interval__):
self.__gc_collect__()
class Handler(_GC, AbstractHandler):
__gc_interval__ = 10
closing = False
def __init__(self, mapping, codec, *, loop):
self.mapping = mapping
self.codec = codec
self.loop = loop
self._tasks = {}
self._cancelled = set()
def __gc_collect__(self):
self._tasks = {s: t for s, t in self._tasks.items()
if not t.done()}
self._cancelled = {t for t in self._cancelled
if not t.done()}
def accept(self, stream, headers, release_stream):
self.__gc_step__()
self._tasks[stream] = self.loop.create_task(
request_handler(self.mapping, stream, headers, self.codec,
release_stream)
)
def cancel(self, stream):
task = self._tasks.pop(stream)
task.cancel()
self._cancelled.add(task)
def close(self):
for task in self._tasks.values():
task.cancel()
self._cancelled.update(self._tasks.values())
self.closing = True
async def wait_closed(self):
if self._cancelled:
await asyncio.wait(self._cancelled, loop=self.loop)
def check_closed(self):
self.__gc_collect__()
return not self._tasks and not self._cancelled
class Server(_GC, asyncio.AbstractServer):
"""
HTTP/2 server, which uses gRPC service handlers to handle requests.
Handler is a subclass of the abstract base class, which was generated
from .proto file:
.. code-block:: python
class CoffeeMachine(cafe_grpc.CoffeeMachineBase):
async def MakeLatte(self, stream):
task: cafe_pb2.LatteOrder = await stream.recv_message()
...
await stream.send_message(empty_pb2.Empty())
server = Server([CoffeeMachine()], loop=loop)
"""
__gc_interval__ = 10
def __init__(self, handlers, *, loop, codec=None):
"""
:param handlers: list of handlers
:param loop: asyncio-compatible event loop
"""
mapping = {}
for handler in handlers:
mapping.update(handler.__mapping__())
self._mapping = mapping
self._loop = loop
self._codec = codec or ProtoCodec()
self._config = h2.config.H2Configuration(
client_side=False,
header_encoding='utf-8',
)
self._tcp_server = None
self._handlers = set()
def __gc_collect__(self):
self._handlers = {h for h in self._handlers
if not (h.closing and h.check_closed())}
def _protocol_factory(self):
self.__gc_step__()
handler = Handler(self._mapping, self._codec, loop=self._loop)
self._handlers.add(handler)
return H2Protocol(handler, self._config, loop=self._loop)
async def start(self, host=None, port=None, *,
family=socket.AF_UNSPEC, flags=socket.AI_PASSIVE,
sock=None, backlog=100, ssl=None, reuse_address=None,
reuse_port=None):
"""Coroutine to start the server.
:param host: can be a string, containing IPv4/v6 address or domain name.
If host is None, server will be bound to all available interfaces.
:param port: port number.
:param family: can be set to either :py:data:`python:socket.AF_INET` or
:py:data:`python:socket.AF_INET6` to force the socket to use IPv4 or
IPv6. If not set it will be determined from host.
:param flags: is a bitmask for
:py:meth:`~python:asyncio.AbstractEventLoop.getaddrinfo`.
:param sock: sock can optionally be specified in order to use a
preexisting socket object. If specified, host and port should be
omitted (must be None).
:param backlog: is the maximum number of queued connections passed to
listen().
:param ssl: can be set to an :py:class:`~python:ssl.SSLContext`
to enable SSL over the accepted connections.
:param reuse_address: tells the kernel to reuse a local socket in
TIME_WAIT state, without waiting for its natural timeout to expire.
:param reuse_port: tells the kernel to allow this endpoint to be bound
to the same port as other existing endpoints are bound to,
so long as they all set this flag when being created.
"""
if self._tcp_server is not None:
raise RuntimeError('Server is already started')
self._tcp_server = await self._loop.create_server(
self._protocol_factory, host, port,
family=family, flags=flags, sock=sock, backlog=backlog, ssl=ssl,
reuse_address=reuse_address, reuse_port=reuse_port
)
def close(self):
"""Stops accepting new connections, cancels all currently running
requests. Request handlers are able to handle `CancelledError` and
exit properly.
"""
if self._tcp_server is None:
raise RuntimeError('Server is not started')
self._tcp_server.close()
for handler in self._handlers:
handler.close()
async def wait_closed(self):
"""Coroutine to wait until all existing request handlers will exit
properly.
"""
if self._tcp_server is None:
raise RuntimeError('Server is not started')
await self._tcp_server.wait_closed()
if self._handlers:
await asyncio.wait({h.wait_closed() for h in self._handlers},
loop=self._loop)
| 2.8125 | 3 |
examples/vector_dot.py | Wheest/EVA | 0 | 12980 | #!/usr/bin/env python
import argparse
from eva import EvaProgram, Input, Output
from eva.ckks import CKKSCompiler
from eva.seal import generate_keys
import numpy as np
import time
from eva.std.numeric import horizontal_sum
def dot(x, y):
return np.dot(x, y)
def generate_inputs_naive(size, label="x"):
inputs = dict()
inputs_np = np.zeros((size))
i = 0
for n in range(size):
# each element is a list (i.e. a vector of size 1)
inputs[f"{label}_{n}"] = [i]
inputs_np[n] = i
i += 1
return inputs, inputs_np
def generate_vector_dot_naive(size):
"""Vector dot product with vector size of 1"""
fhe_dot = EvaProgram("fhe_dot", vec_size=1)
with fhe_dot:
a = np.array([Input(f"x_{n}") for n in range(size)]).reshape(1, size)
b = np.array([Input(f"w_{k}") for k in range(size)]).reshape(size, 1)
out = dot(a, b)
Output("y", out[0][0])
fhe_dot.set_input_scales(32)
fhe_dot.set_output_ranges(32)
return fhe_dot
def generate_inputs(size, label="x"):
inputs = dict()
inputs_np = np.zeros((size))
i = 0
# all data is stored in a single list of size `size`
inputs[label] = list(range(size))
for n in range(size):
inputs_np[n] = i
i += 1
return inputs, inputs_np
def generate_vector_dot(size):
"""Vector dot product with CKKS vector size equal to the size"""
fhe_dot = EvaProgram("fhe_dot", vec_size=size)
with fhe_dot:
a = np.array([Input("x")])
b = np.array([Input(f"w")])
out = dot(a, b)
Output("y", horizontal_sum(out))
fhe_dot.set_input_scales(32)
fhe_dot.set_output_ranges(32)
return fhe_dot
def benchmark_vector_dot(size, mode="SIMD"):
if mode == "SIMD":
# generate program with SIMD-style
inputs, inputs_np = generate_inputs(size, label="x")
weights, weights_np = generate_inputs(size, label="w")
fhe_dot = generate_vector_dot(size)
else:
# generate program with vector size = 1
inputs, inputs_np = generate_inputs_naive(size, label="x")
weights, weights_np = generate_inputs_naive(size, label="w")
fhe_dot = generate_vector_dot_naive(size)
# compiling program
data = {**weights, **inputs}
compiler = CKKSCompiler(config={"security_level": "128", "warn_vec_size": "false"})
compiled, params, signature = compiler.compile(fhe_dot)
public_ctx, secret_ctx = generate_keys(params)
enc_inputs = public_ctx.encrypt(data, signature)
# Running program
start = time.time()
enc_outputs = public_ctx.execute(compiled, enc_inputs)
end = time.time()
run_time = end - start
# decrypt the output
outputs = secret_ctx.decrypt(enc_outputs, signature)
y = np.array(outputs["y"])
# get time for plaintext dot product
start = time.time()
true_y = inputs_np.dot(weights_np)
end = time.time()
plain_run_time = end - start
# verifying correctness of output
np.testing.assert_allclose(y, true_y)
return run_time, plain_run_time
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run a dot product program")
parser.add_argument(
"--mode",
default="SIMD",
choices=["SIMD", "naive"],
)
args = parser.parse_args()
results_cipher = dict()
results_plain = dict()
if args.mode == "SIMD":
print("Generating code in SIMD style")
else:
print("Generating code in naive style")
for size in [4, 8, 16, 32, 64, 128, 256, 512, 1024]:
time_cipher, time_plain = benchmark_vector_dot(size, args.mode)
results_cipher[f"{size}"] = time_cipher
results_plain[f"{size}"] = time_plain
print(f"Done vector size {size}, CKKS time: {time_cipher}")
print("Done")
print("CKKS times:", results_cipher)
print("Plain text times:", results_plain)
| 2.90625 | 3 |
ramcache.py | Lopez6969/chromium-dashboard | 0 | 12981 | from __future__ import division
from __future__ import print_function
# -*- coding: utf-8 -*-
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module manages a distributed RAM cache as a global python dictionary in
each AppEngine instance. AppEngine can spin up new instances or kill old ones
at any time. Each instance's RAM cache is independent and might not have the
same entries as found in the RAM caches of other instances.
Each instance will do the work needed to compute a given RAM cache entry
itself. The values computed in a given instance will speed up future requests
made to that instance only.
When the user edits something in the app, the updated entity is stored in
datastore. Also, the singleton SharedInvalidate entity is updated with the
timestamp of the change. Every request handler must start processing a request
by first calling SharedInvalidate.check_for_distributed_invalidation() which
checks for any needed invalidations and clears RAM cache entries in
that instance if needed.
For now, there is only a single RAM cache per instance and when anything is
invalidated, that entire RAM cache is completely cleared. In the future,
invalidations could be compartmentalized by RAM cache type, or even specific
entity IDs. Monorail uses that approach, but existing ChromeStatus code does
not need it.
Calling code must not mutate any value that is passed into set() or returned
from get(). If calling code needs to mutate such objects, it should call
copy.copy() or copy.deepcopy() to avoid unintentional cumulative mutations.
Unlike memcache, this RAM cache has no concept of expiration time. So,
whenever a cached value would become invalid, it must be invalidated.
"""
import logging
import time as time_module
from google.appengine.ext import db
global_cache = {}
expires = {}
# Whenever the cache would have more than this many items, some
# random item is dropped, or the entire cache is cleared.
# If our instances are killed by appengine for exceeding memory limits,
# we can configure larger instances and/or reduce this value.
MAX_CACHE_SIZE = 10000
def set(key, value, time=None):
"""Emulate the memcache.set() method using a RAM cache."""
if len(global_cache) + 1 > MAX_CACHE_SIZE:
popped_item = global_cache.popitem()
if popped_item[0] in expires:
del expires[popped_item[0]]
global_cache[key] = value
if time:
expires[key] = int(time_module.time()) + time
def _check_expired(keys):
now = int(time_module.time())
for key in keys:
if key in expires and expires[key] < now:
del expires[key]
del global_cache[key]
def get(key):
"""Emulate the memcache.get() method using a RAM cache."""
_check_expired([key])
verb = 'hit' if key in global_cache else 'miss'
logging.info('cache %s for %r', verb, key)
return global_cache.get(key)
def get_multi(keys):
"""Emulate the memcache.get_multi() method using a RAM cache."""
_check_expired(keys)
return {
key: global_cache[key]
for key in keys
if key in global_cache
}
def set_multi(entries):
"""Emulate the memcache.set_multi() method using a RAM cache."""
if len(global_cache) + len(entries) > MAX_CACHE_SIZE:
global_cache.clear()
expires.clear()
global_cache.update(entries)
def delete(key):
"""Emulate the memcache.delete() method using a RAM cache."""
if key in global_cache:
del global_cache[key]
flush_all() # Note: this is wasteful but infrequent in our app.
def flush_all():
"""Emulate the memcache.flush_all() method using a RAM cache.
This does not clear the RAM cache in this instance. That happens
at the start of the next request when the request handler calls
SharedInvalidate.check_for_distributed_invalidation().
"""
SharedInvalidate.invalidate()
class SharedInvalidateParent(db.Model):
pass
class SharedInvalidate(db.Model):
PARENT_ENTITY_ID = 123
PARENT_KEY = db.Key.from_path('SharedInvalidateParent', PARENT_ENTITY_ID)
SINGLETON_ENTITY_ID = 456
SINGLETON_KEY = db.Key.from_path(
'SharedInvalidateParent', PARENT_ENTITY_ID,
'SharedInvalidate', SINGLETON_ENTITY_ID)
last_processed_timestamp = None
updated = db.DateTimeProperty(auto_now=True)
@classmethod
def invalidate(cls):
"""Tell this and other appengine instances to invalidate their caches."""
singleton = cls.get(cls.SINGLETON_KEY)
if not singleton:
singleton = SharedInvalidate(key=cls.SINGLETON_KEY)
singleton.put() # automatically sets singleton.updated to now.
# The cache in each instance (including this one) will be
# cleared on the next call to check_for_distributed_invalidation()
# which should happen at the start of request processing.
@classmethod
def check_for_distributed_invalidation(cls):
"""Check if any appengine instance has invlidated the cache."""
singleton = cls.get(cls.SINGLETON_KEY, read_policy=db.STRONG_CONSISTENCY)
if not singleton:
return # No news is good news
if (cls.last_processed_timestamp is None or
singleton.updated > cls.last_processed_timestamp):
global_cache.clear()
expires.clear()
cls.last_processed_timestamp = singleton.updated
def check_for_distributed_invalidation():
"""Just a shorthand way to call the class method."""
SharedInvalidate.check_for_distributed_invalidation()
| 2.203125 | 2 |
src/sales/migrations/0029_auto_20191025_1058.py | vladimirtkach/yesjob | 0 | 12982 | <filename>src/sales/migrations/0029_auto_20191025_1058.py
# Generated by Django 2.2 on 2019-10-25 10:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sales', '0028_auto_20191024_1736'),
]
operations = [
migrations.AlterField(
model_name='interaction',
name='result',
field=models.CharField(blank=True, max_length=1000),
),
]
| 1.210938 | 1 |
maintenance/pymelControlPanel.py | GlenWalker/pymel | 0 | 12983 | """
UI for controlling how api classes and mel commands are combined into pymel classes.
This UI modifies factories.apiToMelData which is pickled out to apiMelBridge.
It controls:
which mel methods correspond to api methods
disabling of api methods
preference for overloaded methods (since currently only one overloaded method is supported)
renaming of apiMethod
"""
import inspect, re, os
import pymel.core as pm
import pymel.internal.factories as factories
import logging
logger = logging.getLogger(__name__)
if logger.level == logging.NOTSET:
logger.setLevel(logging.INFO)
FRAME_WIDTH = 800
VERBOSE = True
class PymelControlPanel(object):
def __init__(self):
# key is a tuple of (class, method)
self.classList = sorted( list( set( [ key[0] for key in factories.apiToMelData.keys()] ) ) )
self.classFrames={}
self.processClassFrames()
self.buildUI()
def buildUI(self):
_notifySavingDisabled()
self.win = pm.window(title='Pymel Control Panel')
self.win.show()
with pm.paneLayout(configuration='vertical3', paneSize=([1,20,100], [3,20,100]) ) as self.pane:
# Lef Column: Api Classes
self.classScrollList = pm.textScrollList('apiClassList')
# Center Column: Api Methods
# Would LIKE to do it like this, but there is currently a bug with
# objectType UI, such that even if
# layout('window4|paneLayout5', q=1, exists=1) == True
# when you run:
# objectTypeUI('window4|paneLayout5')
# you will get an error:
# RuntimeError: objectTypeUI: Object 'window4|paneLayout5' not found.
# with formLayout() as apiForm:
# #with scrollLayout() as scroll:
# with tabLayout('apiMethodCol') as self.apiMethodCol:
# pass
# status = helpLine(h=60)
# So, instead, we do it old-school...
apiForm = pm.formLayout()
self.apiMethodCol = pm.tabLayout('apiMethodCol')
pm.setParent(apiForm)
status = pm.cmds.helpLine(h=60)
pm.setParent(self.pane)
apiForm.attachForm( self.apiMethodCol, 'top', 5 )
apiForm.attachForm( self.apiMethodCol, 'left', 5 )
apiForm.attachForm( self.apiMethodCol, 'right', 5 )
apiForm.attachControl( self.apiMethodCol, 'bottom', 5, status )
apiForm.attachPosition( status, 'bottom', 5, 20 )
apiForm.attachForm( status, 'bottom', 5 )
apiForm.attachForm( status, 'left', 5 )
apiForm.attachForm( status, 'right', 5 )
# Right Column: Mel Methods
melForm = pm.formLayout()
label1 = pm.text( label='Unassigned Mel Methods' )
self.unassignedMelMethodLister = pm.textScrollList()
label2 = pm.text( label='Assigned Mel Methods' )
self.assignedMelMethodLister = pm.textScrollList()
label3 = pm.text( label='Disabled Mel Methods' )
self.disabledMelMethodLister = pm.textScrollList()
pm.setParent(self.pane)
melForm.attachForm( label1, 'top', 5 )
melForm.attachForm( label1, 'left', 5 )
melForm.attachForm( label1, 'right', 5 )
melForm.attachControl( self.unassignedMelMethodLister, 'top', 0, label1 )
melForm.attachForm( self.unassignedMelMethodLister, 'left', 5 )
melForm.attachForm( self.unassignedMelMethodLister, 'right', 5 )
melForm.attachPosition( self.unassignedMelMethodLister, 'bottom', 5, 33 )
melForm.attachControl( label2, 'top', 5, self.unassignedMelMethodLister)
melForm.attachForm( label2, 'left', 5 )
melForm.attachForm( label2, 'right', 5 )
melForm.attachControl( self.assignedMelMethodLister, 'top', 0, label2 )
melForm.attachForm( self.assignedMelMethodLister, 'left', 5 )
melForm.attachForm( self.assignedMelMethodLister, 'right', 5 )
melForm.attachPosition( self.assignedMelMethodLister, 'bottom', 5, 66 )
melForm.attachControl( label3, 'top', 5, self.assignedMelMethodLister)
melForm.attachForm( label3, 'left', 5 )
melForm.attachForm( label3, 'right', 5 )
melForm.attachControl( self.disabledMelMethodLister, 'top', 0, label3 )
melForm.attachForm( self.disabledMelMethodLister, 'left', 5 )
melForm.attachForm( self.disabledMelMethodLister, 'right', 5 )
melForm.attachForm( self.disabledMelMethodLister, 'bottom', 5 )
pm.setParent('..')
pm.popupMenu(parent=self.unassignedMelMethodLister, button=3 )
pm.menuItem(l='disable', c=pm.Callback( PymelControlPanel.disableMelMethod, self, self.unassignedMelMethodLister ) )
pm.popupMenu(parent=self.assignedMelMethodLister, button=3 )
pm.menuItem(l='disable', c=pm.Callback( PymelControlPanel.disableMelMethod, self, self.assignedMelMethodLister ) )
pm.popupMenu(parent=self.disabledMelMethodLister, button=3 )
pm.menuItem(l='enable', c=pm.Callback( PymelControlPanel.enableMelMethod))
self.classScrollList.extend( self.classList )
self.classScrollList.selectCommand( lambda: self.apiClassList_selectCB() )
pm.scriptJob(uiDeleted=[str(self.win),cacheResults])
self.win.show()
def disableMelMethod(self, menu):
msel = menu.getSelectItem()
csel = self.classScrollList.getSelectItem()
if msel and csel:
method = msel[0]
clsname = csel[0]
menu.removeItem(method)
self.disabledMelMethodLister.append( method )
#print clsname, method, factories.apiToMelData[ (clsname, method) ]
factories.apiToMelData[ (clsname, method) ]['melEnabled'] = False
def enableMelMethod(self):
menu = self.disabledMelMethodLister
msel = menu.getSelectItem()
csel = self.classScrollList.getSelectItem()
if msel and csel:
method = msel[0]
clsname = csel[0]
menu.removeItem(method)
self.unassignedMelMethodLister.append( method )
#print clsname, method, factories.apiToMelData[ (clsname, method) ]
factories.apiToMelData[ (clsname, method) ].pop('melEnabled')
@staticmethod
def getMelMethods(className):
"""get all mel-derived methods for this class"""
import maintenance.build
if not factories.classToMelMap.keys():
# force factories.classToMelMap to be populated
list(maintenance.build.iterPyNodeText())
assert factories.classToMelMap
reg = re.compile('(.*[a-z])([XYZ])$')
newlist = []
origlist = factories.classToMelMap.get(className, [])
for method in origlist:
m = reg.search(method)
if m:
# strip off the XYZ component and replace with *
newname = m.group(1) + '*'
if newname not in newlist:
newlist.append(newname)
else:
newlist.append(method)
return sorted(newlist)
def apiClassList_selectCB(self, *args):
sel = self.classScrollList.getSelectItem()
if sel:
self.buildClassColumn(sel[0])
def assignMelMethod(self, method):
#print "method %s is now assigned" % method
if method in pm.util.listForNone( self.unassignedMelMethodLister.getAllItems() ):
self.unassignedMelMethodLister.removeItem(method)
self.assignedMelMethodLister.append( method )
def unassignMelMethod(self, method):
#print "method %s is now unassigned" % method
if method in pm.util.listForNone( self.assignedMelMethodLister.getAllItems() ):
self.assignedMelMethodLister.removeItem(method)
self.unassignedMelMethodLister.append( method )
def processClassFrames(self):
"""
This triggers the generation of all the defaults for `factories.apiToMelData`, but it does
not create any UI elements. It creates `ClassFrame` instances, which in turn create
`MethodRow` instances, but the creation of UI elements is delayed until a particular
configuration is requested via `buildClassColumn`.
"""
logger.info( 'processing all classes...' )
for className in self.classList:
melMethods = self.getMelMethods(className)
logger.debug( '%s: mel methods: %s' % (className, melMethods) )
for clsName, apiClsName in getClassHierarchy(className):
if apiClsName and apiClsName not in ['list']:
if clsName not in self.classFrames:
frame = ClassFrame( self, clsName, apiClsName)
self.classFrames[clsName] = frame
# temporarily disable the melName updating until we figure out how to deal
# with base classes that are the parents of many others, and which therefore end up with
# methods derived from many different mel commands, which are only applicable for the inherited classes
# not for the base class on its own. ( see ObjectSet and Character, for an example, specifically 'getIntersection' method )
#self.classFrames[clsName].updateMelNames( melMethods )
logger.info( 'done processing classes' )
def buildClassColumn(self, className ):
"""
Build an info column for a class. This column will include processed `ClassFrame`s for it and its parent classes
"""
pm.setParent(self.apiMethodCol)
self.apiMethodCol.clear()
self.unassignedMelMethodLister.removeAll()
self.assignedMelMethodLister.removeAll()
self.disabledMelMethodLister.removeAll()
melMethods = self.getMelMethods(className)
for method in melMethods:
# fix
if (className, method) in factories.apiToMelData and factories.apiToMelData[ (className, method) ] == {'enabled':False}:
d = factories.apiToMelData.pop( (className, method) )
d.pop('enabled')
d['melEnabled'] = False
if (className, method) in factories.apiToMelData and factories.apiToMelData[(className, method)].get('melEnabled',True) == False:
self.disabledMelMethodLister.append( method )
else:
self.unassignedMelMethodLister.append( method )
#filter = set( ['double', 'MVector'] )
filter = []
count = 0
for clsName, apiClsName in getClassHierarchy(className):
if apiClsName:
#print cls
if clsName in self.classFrames:
logger.debug( "building UI for %s", clsName )
frame = self.classFrames[clsName].buildUI(filter)
self.apiMethodCol.setTabLabel( [frame, clsName] )
count+=1
#frame.setVisible(False)
#if i != len(mro)-1:
# frame.setCollapse(True)
else:
logger.debug( "skipping %s", clsName )
self.apiMethodCol.setSelectTabIndex(count)
#self.classFrames[className].frame.setCollapse(False)
class ClassFrame(object):
def __init__(self, parent, className, apiClassName ):
self.parent = parent
self.className = className
self.apiClassName = apiClassName
self.rows = {}
self.classInfo = factories.apiClassInfo[apiClassName]['methods']
for method in self.classInfo.keys():
row = MethodRow( self, self.className, self.apiClassName, method, self.classInfo[method] )
self.rows[method] = row
def updateMelNames(self, melMethods):
logger.debug( '%s: updating melNames' % self.className )
for rowName, row in self.rows.items():
row.updateMelNames( melMethods )
def buildUI(self, filter=None):
count = 0
#self.form = formLayout()
with pm.frameLayout(collapsable=False, label='%s (%s)' % (self.className, self.apiClassName),
width = FRAME_WIDTH) as self.frame:
#labelAlign='top')
with pm.tabLayout() as tab:
invertibles = factories.apiClassInfo[self.apiClassName].get('invertibles', [])
usedMethods = []
with pm.formLayout() as pairdForm:
tab.setTabLabel( [pairdForm, 'Paired'] )
with pm.scrollLayout() as pairedScroll:
with pm.columnLayout(visible=False, adjustableColumn=True) as pairedCol:
for setMethod, getMethod in invertibles:
pm.setParent(pairedCol) # column
frame = pm.frameLayout(label = '%s / %s' % (setMethod, getMethod),
labelVisible=True, collapsable=True,
collapse=True, width = FRAME_WIDTH)
col2 = pm.columnLayout()
pairCount = 0
pairCount += self.rows[setMethod].buildUI(filter)
pairCount += self.rows[getMethod].buildUI(filter)
usedMethods += [setMethod, getMethod]
if pairCount == 0:
#deleteUI(col2)
frame.setVisible(False)
frame.setHeight(1)
count += pairCount
pairedCol.setVisible(True)
pairdForm.attachForm( pairedScroll, 'top', 5 )
pairdForm.attachForm( pairedScroll, 'left', 5 )
pairdForm.attachForm( pairedScroll, 'right', 5 )
pairdForm.attachForm( pairedScroll, 'bottom', 5 )
with pm.formLayout() as unpairedForm:
tab.setTabLabel( [unpairedForm, 'Unpaired'] )
with pm.scrollLayout() as unpairedScroll:
with pm.columnLayout(visible=False ) as unpairedCol:
# For some reason, on linux, the unpairedCol height is wrong...
# track + set it ourselves
unpairedHeight = 10 # a little extra buffer...
#rowSpace = unpairedCol.getRowSpacing()
for methodName in sorted( self.classInfo.keys() ):
pm.setParent(unpairedCol)
if methodName not in usedMethods:
frame = pm.frameLayout(label = methodName,
labelVisible=True, collapsable=True,
collapse=True, width = FRAME_WIDTH)
col2 = pm.columnLayout()
count += self.rows[methodName].buildUI(filter)
unpairedHeight += self.rows[methodName].frame.getHeight()# + rowSpace
unpairedCol.setHeight(unpairedHeight)
#self.form.attachForm( self.frame, 'left', 2)
#self.form.attachForm( self.frame, 'right', 2)
#self.form.attachForm( self.frame, 'top', 2)
#self.form.attachForm( self.frame, 'bottom', 2)
unpairedCol.setVisible(True)
unpairedForm.attachForm( unpairedScroll, 'top', 5 )
unpairedForm.attachForm( unpairedScroll, 'left', 5 )
unpairedForm.attachForm( unpairedScroll, 'right', 5 )
unpairedForm.attachForm( unpairedScroll, 'bottom', 5 )
return self.frame
class MethodRow(object):
def __init__(self, parent, className, apiClassName, apiMethodName,
methodInfoList):
self.parent = parent
self.className = className
self.methodName = factories.apiClassInfo[apiClassName].get('pymelMethods', {}).get(apiMethodName, apiMethodName)
self.apiClassName = apiClassName
self.apiMethodName = apiMethodName
self.methodInfoList = methodInfoList
self.data = factories._getApiOverrideData(self.className, self.methodName)
self.classInfo = factories.apiClassInfo[self.apiClassName]['methods'][self.apiMethodName]
try:
enabledArray = self.getEnabledArray()
except:
print self.apiClassName, self.apiMethodName
raise
# DEFAULT VALUES
# correct old values
# we no longer store positive values, only negative -- meaning methods will be enabled by default
# if 'enabled' in self.data and ( self.data['enabled'] == True or sum(enabledArray) == 0 ):
# logger.debug( '%s.%s: enabled array: %s' % ( self.className, self.methodName, enabledArray ) )
# logger.debug( '%s.%s: removing enabled entry' % ( self.className, self.methodName) )
# self.data.pop('enabled', None)
# enabled
# if not self.data.has_key( 'enabled' ):
# self.data['enabled'] = True
if self.methodName in factories.EXCLUDE_METHODS : # or sum(enabledArray) == 0:
self.data['enabled'] = False
# useName mode
if not self.data.has_key( 'useName' ):
self.data['useName'] = 'API'
else:
# correct old values
useNameVal = self.data['useName']
if useNameVal == True:
self.data['useName'] = 'API'
elif useNameVal == False:
self.data['useName'] = 'MEL'
elif useNameVal not in ['MEL', 'API']:
self.data['useName'] = str(useNameVal)
# correct old values
if self.data.has_key('overloadPrecedence'):
self.data['overloadIndex'] = self.data.pop('overloadPrecedence')
# correct old values
if self.data.has_key('melName'):
#logger.debug( "correcting melName %s %s %s" % (self.className, self.methodName, str(self.data['melName']) ) )
self.data['melName'] = str(self.data['melName'])
overloadId = self.data.get('overloadIndex', 0)
if overloadId is None:
# in a previous test, it was determined there were no wrappable overload methods,
# but there may be now. try again.
overloadId = 0
# ensure we don't use a value that is not valid
for i in range(overloadId, len(enabledArray)+1):
try:
if enabledArray[i]:
break
except IndexError: # went too far, so none are valid
overloadId = None
# if val is None:
# # nothing valid
# self.data.pop('overloadIndex', None)
# else:
self.data['overloadIndex'] = overloadId
def crossReference(self, melName):
""" create an entry for the melName which points to the data being tracked for the api name"""
factories.apiToMelData[ (self.className, melName ) ] = self.data
def uncrossReference(self, melName):
factories.apiToMelData.pop( (self.className, melName ) )
def updateMelNames(self, melMethods):
# melName
if not self.data.has_key( 'melName' ):
match = None
for method in melMethods:
methreg = re.compile(method.replace('*', '.{0,1}') + '$')
#print self.methodName, methreg
if methreg.match( self.methodName ):
match = str(method)
break
if match:
logger.debug( "%s.%s: adding melName %s" % ( self.className, self.methodName, match ) )
self.data['melName'] = match
self.crossReference( match )
def buildUI(self, filter=None):
if filter:
match = False
for i, info in enumerate( self.methodInfoList):
argUtil = factories.ApiArgUtil( self.apiClassName, self.apiMethodName, i )
if filter.intersection( argUtil.getInputTypes() + argUtil.getOutputTypes() ):
match = True
break
if match == False:
return False
self.layout = { 'columnAlign' : [1,'right'],
'columnAttach' : [1,'right',8] }
#print className, self.methodName, melMethods
isOverloaded = len(self.methodInfoList)>1
self.frame = pm.frameLayout( w=FRAME_WIDTH, labelVisible=False, collapsable=False)
logger.debug("building row for %s - %s" % (self.methodName, self.frame))
col = pm.columnLayout()
enabledArray = []
self.rows = []
self.overloadPrecedenceColl = None
self.enabledChBx = pm.checkBox(label=self.methodName,
changeCommand=pm.CallbackWithArgs( MethodRow.enableCB, self ) )
if isOverloaded:
self.overloadPrecedenceColl = pm.radioCollection()
for i in range( len(self.methodInfoList) ) :
self.createMethodInstance(i)
else:
#row = rowLayout( self.methodName + '_rowMain', nc=2, cw2=[200, 400] )
#self.enabledChBx = checkBox(label=self.methodName, changeCommand=CallbackWithArgs( MethodRow.enableCB, self ) )
#text(label='')
self.createMethodInstance(0)
#setParent('..')
pm.setParent(col)
pm.separator(w=800, h=6)
#self.row = rowLayout( self.methodName + '_rowSettings', nc=4, cw4=[200, 160, 180, 160] )
#self.rows.append(row)
self.row = pm.rowLayout( self.methodName + '_rowSettings', nc=2, cw2=[200, 220], **self.layout )
self.rows.append(self.row)
# create ui elements
pm.text(label='Mel Equivalent')
self.melNameTextField = pm.textField(w=170, editable=False)
self.melNameOptMenu = pm.popupMenu(parent=self.melNameTextField,
button=1,
postMenuCommand=pm.Callback( MethodRow.populateMelNameMenu, self ) )
pm.setParent('..')
self.row2 = pm.rowLayout( self.methodName + '_rowSettings2', nc=3, cw3=[200, 180, 240], **self.layout )
self.rows.append(self.row2)
pm.text(label='Use Name')
self.nameMode = pm.radioButtonGrp(label='', nrb=3, cw4=[1,50,50,50], labelArray3=['api', 'mel', 'other'] )
self.altNameText = pm.textField(w=170, enable=False)
self.altNameText.changeCommand( pm.CallbackWithArgs( MethodRow.alternateNameCB, self ) )
self.nameMode.onCommand( pm.Callback( MethodRow.nameTypeCB, self ) )
isEnabled = self.data.get('enabled', True)
# UI SETUP
melName = self.data.get('melName', '')
try:
#self.melNameOptMenu.setValue( melName )
self.melNameTextField.setText(melName)
if melName != '':
self.parent.parent.assignMelMethod( melName )
except RuntimeError:
# it is possible for a method name to be listed here that was set from a different view,
# where this class was a super class and more mel commands were available. expand the option list,
# and make this frame read-only
pm.menuItem( label=melName, parent=self.melNameOptMenu )
self.melNameOptMenu.setValue( melName )
logger.debug( "making %s frame read-only" % self.methodName )
self.frame.setEnable(False)
self.enabledChBx.setValue( isEnabled )
self.row.setEnable( isEnabled )
self.row2.setEnable( isEnabled )
name = self.data['useName']
if name == 'API' :
self.nameMode.setSelect( 1 )
self.altNameText.setEnable(False)
elif name == 'MEL' :
self.nameMode.setSelect( 2 )
self.altNameText.setEnable(False)
else :
self.nameMode.setSelect( 3 )
self.altNameText.setText(name)
self.altNameText.setEnable(True)
if self.overloadPrecedenceColl:
items = self.overloadPrecedenceColl.getCollectionItemArray()
try:
val = self.data.get('overloadIndex', 0)
if val is None:
logger.info( "no wrappable options for method %s" % self.methodName )
self.frame.setEnable( False )
else:
self.overloadPrecedenceColl.setSelect( items[ val ] )
except:
pass
# # ensure we don't use a value that is not valid
# for val in range(val, len(enabledArray)+1):
# try:
# if enabledArray[val]:
# break
# except IndexError:
# val = None
# if val is not None:
# self.overloadPrecedenceColl.setSelect( items[ val ] )
pm.setParent('..')
pm.setParent('..') # frame
pm.setParent('..') # column
return True
def enableCB(self, *args ):
logger.debug( 'setting enabled to %s' % args[0] )
if args[0] == False:
self.data['enabled'] = False
else:
self.data.pop('enabled', None)
self.row.setEnable( args[0] )
def nameTypeCB(self ):
logger.info( 'setting name type' )
selected = self.nameMode.getSelect()
if selected == 1:
val = 'API'
self.altNameText.setEnable(False)
elif selected == 2:
val = 'MEL'
self.altNameText.setEnable(False)
else:
val = str(self.altNameText.getText())
self.altNameText.setEnable(True)
logger.debug( 'data %s' % self.data )
self.data['useName'] = val
def alternateNameCB(self, *args ):
self.data['useName'] = str(args[0])
# def formatAnnotation(self, apiClassName, methodName ):
# defs = []
# try:
# for methodInfo in factories.apiClassInfo[apiClassName]['methods'][methodName] :
# args = ', '.join( [ '%s %s' % (x[1],x[0]) for x in methodInfo['args'] ] )
# defs.append( '%s( %s )' % ( methodName, args ) )
# return '\n'.join( defs )
# except KeyError:
# print "could not find documentation for", apiClassName, methodName
def overloadPrecedenceCB(self, i):
logger.debug( 'overloadPrecedenceCB' )
self.data['overloadIndex'] = i
def melNameChangedCB(self, newMelName):
oldMelName = str(self.melNameTextField.getText())
if oldMelName:
self.uncrossReference( oldMelName )
if newMelName == '[None]':
print "removing melName"
self.data.pop('melName',None)
self.parent.parent.unassignMelMethod( oldMelName )
self.melNameTextField.setText('')
else:
print "adding melName", newMelName
self.crossReference( newMelName )
self.data['melName'] = newMelName
self.parent.parent.assignMelMethod( newMelName )
self.melNameTextField.setText(newMelName)
def populateMelNameMenu(self):
"""called to populate the popup menu for choosing the mel equivalent to an api method"""
self.melNameOptMenu.deleteAllItems()
pm.menuItem(parent=self.melNameOptMenu, label='[None]', command=pm.Callback( MethodRow.melNameChangedCB, self, '[None]' ))
# need to add a listForNone to this in windows
items = self.parent.parent.unassignedMelMethodLister.getAllItems()
if items:
for method in items:
pm.menuItem(parent=self.melNameOptMenu, label=method, command=pm.Callback( MethodRow.melNameChangedCB, self, str(method) ))
def getEnabledArray(self):
"""returns an array of booleans that correspond to each override method and whether they can be wrapped"""
array = []
for i, info in enumerate( self.methodInfoList ):
argUtil = factories.ApiArgUtil( self.apiClassName, self.apiMethodName, i )
array.append( argUtil.canBeWrapped() )
return array
def createMethodInstance(self, i ):
#setUITemplate('attributeEditorTemplate', pushTemplate=1)
rowSpacing = [30, 20, 400]
defs = []
#try:
argUtil = factories.ApiArgUtil( self.apiClassName, self.apiMethodName, i )
proto = argUtil.getPrototype( className=False, outputs=True, defaults=False )
enable = argUtil.canBeWrapped()
if argUtil.isDeprecated():
pm.text(l='DEPRECATED')
# main info row
row = pm.rowLayout( '%s_rowMain%s' % (self.methodName,i), nc=3, cw3=rowSpacing, enable=enable )
self.rows.append(row)
pm.text(label='')
if self.overloadPrecedenceColl is not None:
# toggle for overloaded methods
pm.radioButton(label='', collection=self.overloadPrecedenceColl,
enable = enable,
onCommand=pm.Callback( MethodRow.overloadPrecedenceCB, self, i ))
pm.text( l='', #l=proto,
annotation = self.methodInfoList[i]['doc'],
enable = enable)
pm.setParent('..')
try:
argList = factories.apiClassOverrides[self.apiClassName]['methods'][self.apiMethodName][i]['args']
except (KeyError, IndexError):
argList = self.methodInfoList[i]['args']
returnType = self.methodInfoList[i]['returnType']
types = self.methodInfoList[i]['types']
args = []
for arg , type, direction in argList:
type = str(types[arg])
assert arg != 'return'
self._makeArgRow( i, type, arg, direction, self.methodInfoList[i]['argInfo'][arg]['doc'] )
if returnType:
self._makeArgRow( i, returnType, 'return', 'return', self.methodInfoList[i]['returnInfo']['doc'] )
pm.separator(w=800, h=14)
return enable
# methodInfo = factories.apiClassInfo[self.apiClassName]['methods'][self.apiMethodName][overloadNum]
# args = ', '.join( [ '%s %s' % (x[1],x[0]) for x in methodInfo['args'] ] )
# return '( %s ) --> ' % ( args )
#except:
# print "could not find documentation for", apiClassName, methodName
def setUnitType(self, methodIndex, argName, unitType ):
if self.apiClassName not in factories.apiClassOverrides:
factories.apiClassOverrides[self.apiClassName] = { 'methods' : {} }
methodOverrides = factories.apiClassOverrides[self.apiClassName]['methods']
if self.apiMethodName not in methodOverrides:
methodOverrides[self.apiMethodName] = {}
if argName == 'return':
if methodIndex not in methodOverrides[self.apiMethodName]:
methodOverrides[self.apiMethodName][methodIndex] = { 'returnInfo' : {} }
methodOverrides[self.apiMethodName][methodIndex]['returnInfo']['unitType'] = unitType
else:
if methodIndex not in methodOverrides[self.apiMethodName]:
methodOverrides[self.apiMethodName][methodIndex] = { 'argInfo' : {} }
if argName not in methodOverrides[self.apiMethodName][methodIndex]['argInfo']:
methodOverrides[self.apiMethodName][methodIndex]['argInfo'][argName] = {}
methodOverrides[self.apiMethodName][methodIndex]['argInfo'][argName]['unitType'] = unitType
def setDirection(self, methodIndex, argName, direction ):
if self.apiClassName not in factories.apiClassOverrides:
factories.apiClassOverrides[self.apiClassName] = { 'methods' : {} }
methodOverrides = factories.apiClassOverrides[self.apiClassName]['methods']
if self.apiMethodName not in methodOverrides:
methodOverrides[self.apiMethodName] = {}
if methodIndex not in methodOverrides[self.apiMethodName]:
methodOverrides[self.apiMethodName][methodIndex] = { }
try:
argList = methodOverrides[self.apiMethodName][methodIndex]['args']
except KeyError:
argList = self.methodInfoList[methodIndex]['args']
newArgList = []
inArgs = []
outArgs = []
for i_argName, i_argType, i_direction in argList:
if i_argName == argName:
argInfo = ( i_argName, i_argType, direction )
else:
argInfo = ( i_argName, i_argType, i_direction )
if argInfo[2] == 'in':
inArgs.append( i_argName )
else:
outArgs.append( i_argName )
newArgList.append( argInfo )
methodOverrides[self.apiMethodName][methodIndex] = { }
methodOverrides[self.apiMethodName][methodIndex]['args'] = newArgList
methodOverrides[self.apiMethodName][methodIndex]['inArgs'] = inArgs
methodOverrides[self.apiMethodName][methodIndex]['outArgs'] = outArgs
def _makeArgRow(self, methodIndex, type, argName, direction, annotation=''):
COL1_WIDTH = 260
COL2_WIDTH = 120
pm.rowLayout( nc=4, cw4=[COL1_WIDTH,COL2_WIDTH, 70, 150], **self.layout )
label = str(type)
pm.text( l=label, ann=annotation )
pm.text( l=argName, ann=annotation )
if direction == 'return':
pm.text( l='(result)' )
else:
direction_om = pm.optionMenu(l='', w=60, ann=annotation, cc=pm.CallbackWithArgs( MethodRow.setDirection, self, methodIndex, argName ) )
for unit in ['in', 'out']:
pm.menuItem(l=unit)
direction_om.setValue(direction)
if self._isPotentialUnitType(type) :
om = pm.optionMenu(l='', ann=annotation, cc=pm.CallbackWithArgs( MethodRow.setUnitType, self, methodIndex, argName ) )
for unit in ['unitless', 'linear', 'angular', 'time']:
pm.menuItem(l=unit)
if argName == 'return':
try:
value = factories.apiClassOverrides[self.apiClassName]['methods'][self.apiMethodName][methodIndex]['returnInfo']['unitType']
except KeyError:
pass
else:
try:
value = factories.apiClassOverrides[self.apiClassName]['methods'][self.apiMethodName][methodIndex]['argInfo'][argName]['unitType']
except KeyError:
pass
try:
om.setValue(value)
except: pass
else:
pm.text( l='', ann=annotation )
pm.setParent('..')
def _isPotentialUnitType(self, type):
type = str(type)
return type == 'MVector' or type.startswith('double')
def _getClass(className):
for module in [pm.nodetypes, pm.datatypes, pm.general]:
try:
pymelClass = getattr(module, className)
return pymelClass
except AttributeError:
pass
def getApiClassName( className ):
pymelClass = _getClass(className)
if pymelClass:
apiClass = None
apiClassName = None
#if cls.__name__ not in ['object']:
try:
apiClass = pymelClass.__dict__[ '__apicls__']
apiClassName = apiClass.__name__
except KeyError:
try:
apiClass = pymelClass.__dict__[ 'apicls']
apiClassName = apiClass.__name__
except KeyError:
#print "could not determine api class for", cls.__name__
apiClassName = None
return apiClassName
else:
logger.warning( "could not find class %s" % (className) )
def getClassHierarchy( className ):
pymelClass = _getClass(className)
if pymelClass:
mro = list( inspect.getmro(pymelClass) )
mro.reverse()
for i, cls in enumerate(mro):
#if cls.__name__ not in ['object']:
try:
apiClass = cls.__dict__[ '__apicls__']
apiClassName = apiClass.__name__
except KeyError:
try:
apiClass = cls.__dict__[ 'apicls']
apiClassName = apiClass.__name__
except KeyError:
#print "could not determine api class for", cls.__name__
apiClassName = None
yield cls.__name__, apiClassName
else:
logger.warning( "could not find class %s" % (className) )
def setManualDefaults():
# set some defaults
# TODO : allow these defaults to be controlled via the UI
pm.util.setCascadingDictItem( factories.apiClassOverrides, ('MFnTransform', 'methods', 'setScalePivot', 0, 'defaults', 'balance' ), True )
pm.util.setCascadingDictItem( factories.apiClassOverrides, ('MFnTransform', 'methods', 'setRotatePivot', 0, 'defaults', 'balance' ), True )
pm.util.setCascadingDictItem( factories.apiClassOverrides, ('MFnTransform', 'methods', 'setRotateOrientation', 0, 'defaults', 'balance' ), True )
pm.util.setCascadingDictItem( factories.apiClassOverrides, ('MFnSet', 'methods', 'getMembers', 0, 'defaults', 'flatten' ), False )
pm.util.setCascadingDictItem( factories.apiClassOverrides, ('MFnDagNode', 'methods', 'instanceCount', 0, 'defaults', 'total' ), True )
pm.util.setCascadingDictItem( factories.apiClassOverrides, ('MFnMesh', 'methods', 'createColorSetWithName', 1, 'defaults', 'modifier' ), None )
# add some manual invertibles: THESE MUST BE THE API NAMES
invertibles = [ ('MPlug', 0, 'setCaching', 'isCachingFlagSet') ,
('MPlug', 0, 'setChannelBox', 'isChannelBoxFlagSet'),
('MFnTransform', 0, 'enableLimit', 'isLimited'),
('MFnTransform', 0, 'setLimit', 'limitValue'),
('MFnTransform', 0, 'set', 'transformation'),
('MFnRadialField', 0, 'setType', 'radialType')
]
for className, methodIndex, setter, getter in invertibles:
# append to the class-level invertibles list
curr = pm.util.getCascadingDictItem( factories.apiClassInfo, (className, 'invertibles' ), [] )
pair = (setter, getter)
if pair not in curr:
curr.append( pair )
pm.util.setCascadingDictItem( factories.apiClassOverrides, (className, 'invertibles'), curr )
# add the individual method entries
pm.util.setCascadingDictItem( factories.apiClassOverrides, (className, 'methods', setter, methodIndex, 'inverse' ), (getter, True) )
pm.util.setCascadingDictItem( factories.apiClassOverrides, (className, 'methods', getter, methodIndex, 'inverse' ), (setter, False) )
nonInvertibles = [ ( 'MFnMesh', 0, 'setFaceVertexNormals', 'getFaceVertexNormals' ),
( 'MFnMesh', 0, 'setFaceVertexNormal', 'getFaceVertexNormal' ) ]
for className, methodIndex, setter, getter in nonInvertibles:
pm.util.setCascadingDictItem( factories.apiClassOverrides, (className, 'methods', setter, methodIndex, 'inverse' ), None )
pm.util.setCascadingDictItem( factories.apiClassOverrides, (className, 'methods', getter, methodIndex, 'inverse' ), None )
fixSpace()
def fixSpace():
"fix the Space enumerator"
enum = pm.util.getCascadingDictItem( factories.apiClassInfo, ('MSpace', 'pymelEnums', 'Space') )
keys = enum._keys.copy()
#print keys
val = keys.pop('postTransform', None)
if val is not None:
keys['object'] = val
newEnum = pm.util.Enum( 'Space', keys )
pm.util.setCascadingDictItem( factories.apiClassOverrides, ('MSpace', 'pymelEnums', 'Space'), newEnum )
else:
logger.warning( "could not fix Space")
def _notifySavingDisabled():
pm.confirmDialog(title='Saving Disabled',
message='Saving using this UI has been disabled until it'
' can be updated. Changes will not be saved.')
def cacheResults():
_notifySavingDisabled()
return
# res = pm.confirmDialog( title='Cache Results?',
# message="Would you like to write your changes to disk? If you choose 'No' your changes will be lost when you restart Maya.",
# button=['Yes','No'],
# cancelButton='No',
# defaultButton='Yes')
# print res
# if res == 'Yes':
# doCacheResults()
# def doCacheResults():
# print "---"
# print "adding manual defaults"
# setManualDefaults()
# print "merging dictionaries"
# # update apiClasIfno with the sparse data stored in apiClassOverrides
# factories.mergeApiClassOverrides()
# print "saving api cache"
# factories.saveApiCache()
# print "saving bridge"
# factories.saveApiMelBridgeCache()
# print "---"
| 2.46875 | 2 |
src/test/data/pa3/sample/list_get_element_oob_3.py | Leo-Enrique-Wu/chocopy_compiler_code_generation | 7 | 12984 | <filename>src/test/data/pa3/sample/list_get_element_oob_3.py
x:[int] = None
x = []
print(x[0])
| 1.476563 | 1 |
modules/vqvc/__init__.py | reppy4620/VCon | 4 | 12985 | from .model import VQVCModel
from .pl_model import VQVCModule
| 1.085938 | 1 |
kornia/constants.py | carlosb1/kornia | 1 | 12986 | <gh_stars>1-10
from typing import Union, TypeVar
from enum import Enum
import torch
pi = torch.tensor(3.14159265358979323846)
T = TypeVar('T', bound='Resample')
U = TypeVar('U', bound='BorderType')
class Resample(Enum):
NEAREST = 0
BILINEAR = 1
BICUBIC = 2
@classmethod
def get(cls, value: Union[str, int, T]) -> T: # type: ignore
if type(value) == str:
return cls[value.upper()] # type: ignore
if type(value) == int:
return cls(value) # type: ignore
if type(value) == cls:
return value # type: ignore
raise TypeError()
class BorderType(Enum):
CONSTANT = 0
REFLECT = 1
REPLICATE = 2
CIRCULAR = 3
@classmethod
def get(cls, value: Union[str, int, U]) -> U: # type: ignore
if type(value) == str:
return cls[value.upper()] # type: ignore
if type(value) == int:
return cls(value) # type: ignore
if type(value) == cls:
return value # type: ignore
raise TypeError()
| 2.328125 | 2 |
intrepyd/tests/test_openplc.py | bobosoft/intrepyd | 2 | 12987 | import intrepyd
from intrepyd.iec611312py.plcopen import parse_plc_open_file
from intrepyd.iec611312py.stmtprinter import StmtPrinter
import unittest
from . import from_fixture_path
class TestOpenPLC(unittest.TestCase):
def test_simple_1(self):
pous = parse_plc_open_file(from_fixture_path('openplc/simple1.xml'))
self.assertEqual(1, len(pous))
printer = StmtPrinter()
printer.processStatements(pous[0].statements)
self.assertEqual('output1 := (local1 + input1);', printer.result)
def test_datatype_1(self):
pous = parse_plc_open_file(from_fixture_path('openplc/datatype1.xml'))
self.assertEqual(1, len(pous))
def test_if_1(self):
pous = parse_plc_open_file(from_fixture_path('openplc/if1.xml'))
self.assertEqual(1, len(pous))
def test_if_2(self):
pous = parse_plc_open_file(from_fixture_path('openplc/if2.xml'))
self.assertEqual(1, len(pous))
def test_if_3(self):
pous = parse_plc_open_file(from_fixture_path('openplc/if3.xml'))
self.assertEqual(1, len(pous))
def test_if_4(self):
pous = parse_plc_open_file(from_fixture_path('openplc/if4.xml'))
self.assertEqual(1, len(pous))
printer = StmtPrinter()
printer.processStatements(pous[0].statements)
self.assertEqual('IF (100 < (UDINT_TO_DINT((CONST_IN.Tolerance_Max / 100)) * UnitDelay_2_DSTATE)) THEN overInfusion := 1; END_IF;',
printer.result)
# It is slow, as expected
# def test_infusion_pump(self):
# pous = parsePlcOpenFile('tests/openplc/GPCA_SW_Functional_subst.xml')
# self.assertEqual(1, len(pous))
if __name__ == "__main__":
unittest.main()
| 2.34375 | 2 |
reveal_graph_embedding/datautil/asu_datautil/asu_read_data.py | MKLab-ITI/reveal-graph-embedding | 31 | 12988 | <gh_stars>10-100
__author__ = '<NAME> (<EMAIL>)'
import numpy as np
import scipy.sparse as sparse
from reveal_graph_embedding.common import get_file_row_generator
def read_adjacency_matrix(file_path, separator):
"""
Reads an edge list in csv format and returns the adjacency matrix in SciPy Sparse COOrdinate format.
Inputs: - file_path: The path where the adjacency matrix is stored.
- separator: The delimiter among values (e.g. ",", "\t", " ")
Outputs: - adjacency_matrix: The adjacency matrix in SciPy Sparse COOrdinate format.
"""
# Open file
file_row_generator = get_file_row_generator(file_path, separator)
# Initialize lists for row and column sparse matrix arguments
row = list()
col = list()
append_row = row.append
append_col = col.append
# Read all file rows
for file_row in file_row_generator:
source_node = np.int64(file_row[0])
target_node = np.int64(file_row[1])
# Add edge
append_row(source_node)
append_col(target_node)
# Since this is an undirected network also add the reciprocal edge
append_row(target_node)
append_col(source_node)
row = np.array(row, dtype=np.int64)
col = np.array(col, dtype=np.int64)
data = np.ones_like(row, dtype=np.float64)
number_of_nodes = np.max(row) # I assume that there are no missing nodes at the end.
# Array count should start from 0.
row -= 1
col -= 1
# Form sparse adjacency matrix
adjacency_matrix = sparse.coo_matrix((data, (row, col)), shape=(number_of_nodes, number_of_nodes))
return adjacency_matrix
def read_node_label_matrix(file_path, separator, number_of_nodes):
"""
Reads node-label pairs in csv format and returns a list of tuples and a node-label matrix.
Inputs: - file_path: The path where the node-label matrix is stored.
- separator: The delimiter among values (e.g. ",", "\t", " ")
- number_of_nodes: The number of nodes of the full graph. It is possible that not all nodes are labelled.
Outputs: - node_label_matrix: The node-label associations in a NumPy array of tuples format.
- number_of_categories: The number of categories/classes the nodes may belong to.
- labelled_node_indices: A NumPy array containing the labelled node indices.
"""
# Open file
file_row_generator = get_file_row_generator(file_path, separator)
# Initialize lists for row and column sparse matrix arguments
row = list()
col = list()
append_row = row.append
append_col = col.append
# Populate the arrays
for file_row in file_row_generator:
node = np.int64(file_row[0])
label = np.int64(file_row[1])
# Add label
append_row(node)
append_col(label)
number_of_categories = len(set(col)) # I assume that there are no missing labels. There may be missing nodes.
labelled_node_indices = np.array(list(set(row)))
row = np.array(row, dtype=np.int64)
col = np.array(col, dtype=np.int64)
data = np.ones_like(row, dtype=np.float64)
# Array count should start from 0.
row -= 1
col -= 1
labelled_node_indices -= 1
# Form sparse adjacency matrix
node_label_matrix = sparse.coo_matrix((data, (row, col)), shape=(number_of_nodes, number_of_categories))
node_label_matrix = node_label_matrix.tocsr()
return node_label_matrix, number_of_categories, labelled_node_indices
| 3.296875 | 3 |
create_tweet_classes.py | jmcguinness11/StockPredictor | 0 | 12989 | # create_tweet_classes.py
# this assumes the existence of a get_class(day, hour, ticker) function
# that returns the class (0, 1, or -1) for a given hour and ticker
import collections
import json
import random
refined_tweets = collections.defaultdict(list)
#returns label for company and time
def getLabel(ticker, month, day, hour):
return random.randint(-1,1)
#parses individual json file
def parseJSON(data, month, day, hour):
results = []
for tweet in data.itervalues():
text = tweet['text']
label = getLabel(tweet['company'], month, day, hour)
results.append([text,label])
return results
def loadData(months, days):
hours = [10, 11, 12, 13, 14]
minutes = [0, 15, 30, 45]
output_data = []
for month in months:
for day in days:
for hour in hours:
for minute in minutes:
filename = 'tweets_{}_{}_{}_{}.dat'.format(month, day, hour, minute)
with open(filename, 'r') as f:
try:
data = json.load(f)
except ValueError as err:
print filename
exit(1)
output_data += parseJSON(data, month, day, hour)
f.close()
print len(output_data)
print output_data[0:10]
return output_data
def main():
days = [9,10,11,12,13,16,17]
loadData([4], days)
if __name__=='__main__':
main()
| 3.109375 | 3 |
integration_tests/test_router.py | madfish-solutions/quipuswap-token2token-core | 0 | 12990 | from unittest import TestCase
import json
from helpers import *
from pytezos import ContractInterface, pytezos, MichelsonRuntimeError
from pytezos.context.mixin import ExecutionContext
token_a = "<KEY>"
token_b = "<KEY>"
token_c = "<KEY>"
token_d = "<KEY>"
pair_ab = {
"token_a_type" : {
"fa2": {
"token_address": token_a,
"token_id": 0
}
},
"token_b_type": {
"fa2": {
"token_address": token_b,
"token_id": 1
}
},
}
pair_bc = {
"token_a_type": {
"fa2": {
"token_address": token_b,
"token_id": 1
}
},
"token_b_type" : {
"fa2": {
"token_address": token_c,
"token_id": 2
}
}
}
pair_ac = {
"token_a_type" : {
"fa2": {
"token_address": token_a,
"token_id": 0
}
},
"token_b_type" : {
"fa2": {
"token_address": token_c,
"token_id": 2
}
}
}
pair_cd = {
"token_a_type" : {
"fa2": {
"token_address": token_c,
"token_id": 2
}
},
"token_b_type" : {
"fa2": {
"token_address": token_d,
"token_id": 3
}
}
}
class TokenToTokenRouterTest(TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
dex_code = open("./integration_tests/compiled/Dex.tz", 'r').read()
cls.dex = ContractInterface.from_michelson(dex_code)
initial_storage_michelson = json.load(open("./integration_tests/compiled/storage.json", 'r'))
cls.init_storage = cls.dex.storage.decode(initial_storage_michelson)
def test_tt_token_to_token_router(self):
amount_in=10_000
chain = LocalChain(storage=self.init_storage)
res = chain.execute(self.dex.addPair(pair_ab, 100_000, 300_000))
res = chain.execute(self.dex.addPair(pair_bc, 500_000, 700_000))
# interpret the call without applying it
res = chain.interpret(self.dex.swap({
"swaps" : [
{
"pair_id": 0,
"operation": "a_to_b",
},
{
"pair_id": 1,
"operation": "a_to_b",
}
],
"amount_in" : amount_in,
"min_amount_out" : 1,
"receiver" : julian,
"deadline": 100_000
}))
transfers = parse_token_transfers(res)
contract_in = next(v for v in transfers if v["destination"] == contract_self_address)
self.assertEqual(contract_in["token_address"], token_a)
self.assertEqual(contract_in["amount"], 10_000)
routed_out = next(v for v in transfers if v["destination"] == julian)
self.assertEqual(routed_out["token_address"], token_c)
# same swap but one by one
res = chain.interpret(self.dex.swap(
swaps=[{
"pair_id": 0,
"operation": "a_to_b",
}],
amount_in=amount_in,
min_amount_out=1,
receiver=julian,
deadline=100_000
))
transfers = parse_token_transfers(res)
token_b_out = next(v for v in transfers if v["destination"] == julian)
res = chain.interpret(self.dex.swap(
swaps=[{
"pair_id": 1,
"operation": "a_to_b",
}],
amount_in=token_b_out["amount"],
min_amount_out=1,
receiver=julian,
deadline=100_000,
))
transfers = parse_token_transfers(res)
token_c_out = next(v for v in transfers if v["destination"] == julian)
self.assertEqual(routed_out["amount"], token_c_out["amount"])
def test_tt_router_triangle(self):
chain = LocalChain(storage=self.init_storage)
res = chain.execute(self.dex.addPair(pair_ab, 100_000_000_000, 100_000_000_000))
res = chain.execute(self.dex.addPair(pair_bc, 100_000_000_000, 100_000_000_000))
res = chain.execute(self.dex.addPair(pair_ac, 100_000_000_000, 100_000_000_000))
# interpret the call without applying it
res = chain.interpret(self.dex.swap({
"swaps" : [
{
"pair_id": 0,
"operation": "a_to_b",
},
{
"pair_id": 1,
"operation": "a_to_b",
},
{
"pair_id": 2,
"operation": "b_to_a",
}
],
"amount_in" : 10_000,
"min_amount_out" : 1,
"receiver" : julian,
"deadline": 100_000
}))
transfers = parse_token_transfers(res)
token_c_out = next(v for v in transfers if v["destination"] == julian)
self.assertEqual(token_c_out["amount"], 9909) # ~ 9910 by compound interest formula
def test_tt_router_ab_ba(self):
chain = LocalChain(storage=self.init_storage)
res = chain.execute(self.dex.addPair(pair_ab, 100_000_000_000, 100_000_000_000))
res = chain.interpret(self.dex.swap({
"swaps" : [
{
"pair_id": 0,
"operation": "a_to_b",
},
{
"pair_id": 0,
"operation": "b_to_a",
}
],
"amount_in" : 10_000,
"min_amount_out" : 1,
"receiver" : julian,
"deadline": 100_000
}))
transfers = parse_token_transfers(res)
token_out = next(v for v in transfers if v["destination"] == julian)
self.assertEqual(token_out["amount"], 9939)
def test_tt_router_impossible_path(self):
chain = LocalChain(storage=self.init_storage)
res = chain.execute(self.dex.addPair(pair_ab, 1111, 3333))
res = chain.execute(self.dex.addPair(pair_cd, 5555, 7777))
# can't find path
with self.assertRaises(MichelsonRuntimeError):
res = chain.interpret(self.dex.swap({
"swaps" : [
{
"pair_id": 0,
"operation": "a_to_b",
},
{
"pair_id": 1,
"operation": "a_to_b",
}
],
"amount_in" : 334,
"min_amount_out" : 1,
"receiver" : julian,
"deadline": 100_000
}))
with self.assertRaises(MichelsonRuntimeError):
res = chain.interpret(self.dex.swap({
"swaps" : [
{
"pair_id": 0,
"operation": "a_to_b",
},
{
"pair_id": 0,
"operation": "a_to_b",
}
],
"amount_in" : 334,
"min_amount_out" : 1,
"receiver" : julian,
"deadline": 100_000
}))
def test_tt_router_cant_overbuy(self):
chain = LocalChain(storage=self.init_storage)
res = chain.execute(self.dex.addPair(pair_ab, 100_000, 100_000))
res = chain.execute(self.dex.addPair(pair_bc, 10_000, 10_000))
res = chain.execute(self.dex.addPair(pair_ac, 1_000_000, 1_000_000))
# overbuy at the very beginning
res = chain.interpret(self.dex.swap({
"swaps" : [
{
"pair_id": 0,
"operation": "a_to_b",
}
],
"amount_in" : 100_000_000_000,
"min_amount_out" : 1,
"receiver" : julian,
"deadline": 100_000
}))
transfers = parse_token_transfers(res)
token_out = next(v for v in transfers if v["destination"] == julian)
self.assertEqual(token_out["amount"], 99_999)
# overbuy at the end
res = chain.interpret(self.dex.swap({
"swaps" : [
{
"pair_id": 0,
"operation": "a_to_b",
},
{
"pair_id": 1,
"operation": "a_to_b",
}
],
"amount_in" : 100_000_000,
"min_amount_out" : 1,
"receiver" : julian,
"deadline": 100_000
}))
transfers = parse_token_transfers(res)
token_out = next(v for v in transfers if v["destination"] == julian)
self.assertLess(token_out["amount"], 9_999)
# overbuy in the middle
res = chain.interpret(self.dex.swap({
"swaps" : [
{
"pair_id": 0,
"operation": "a_to_b",
},
{
"pair_id": 1,
"operation": "a_to_b",
},
{
"pair_id": 2,
"operation": "b_to_a",
}
],
"amount_in" : 10_000_000_000,
"min_amount_out" : 1,
"receiver" : julian,
"deadline": 100_000
}))
transfers = parse_token_transfers(res)
token_out = next(v for v in transfers if v["destination"] == julian)
self.assertLess(token_out["amount"], 9_999)
def test_tt_router_mixed_fa2_fa12(self):
pair_ab = {
"token_a_type" : {
"fa12": token_b,
},
"token_b_type": {
"fa2": {
"token_address": token_a,
"token_id": 1
}
},
}
pair_bc = {
"token_a_type" : {
"fa12": token_b,
},
"token_b_type" : {
"fa2": {
"token_address": token_c,
"token_id": 2
}
}
}
amount_in=10_000
chain = LocalChain(storage=self.init_storage)
res = chain.execute(self.dex.addPair(pair_ab, 100_000, 300_000))
res = chain.execute(self.dex.addPair(pair_bc, 500_000, 700_000))
# interpret the call without applying it
res = chain.interpret(self.dex.swap({
"swaps" : [
{
"pair_id": 0,
"operation": "b_to_a",
},
{
"pair_id": 1,
"operation": "a_to_b",
}
],
"amount_in" : amount_in,
"min_amount_out" : 1,
"receiver" : julian,
"deadline": 100_000
}))
transfers = parse_token_transfers(res)
contract_in = next(v for v in transfers if v["destination"] == contract_self_address)
self.assertEqual(contract_in["token_address"], token_a)
self.assertEqual(contract_in["amount"], 10_000)
routed_out = next(v for v in transfers if v["destination"] == julian)
self.assertEqual(routed_out["token_address"], token_c)
| 2.078125 | 2 |
practice/practice_perfect/ex7.py | recursivelycurious/wordnik-repl | 0 | 12991 | def remove_duplicates(lst):
new = []
for x in lst:
if x not in new:
new.append(x)
return new
| 3.65625 | 4 |
run.py | evilspyboy/twitch-relay-monitor | 1 | 12992 | <reponame>evilspyboy/twitch-relay-monitor<gh_stars>1-10
import datetime
from datetime import timedelta
import pprint
from config import *
from helper import *
import time
import logging
from logging.handlers import RotatingFileHandler
logger = logging.getLogger('Twitch Relay Monitor')
logger.setLevel(logging.DEBUG)
handler = RotatingFileHandler('/home/pi/twitch_relay_monitor/logs/app.log', maxBytes=200000, backupCount=2)
handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logger.addHandler(handler)
def print_verbose(comment):
if verbose_mode==1:
#print to screen
print(comment)
else:
logger.info(comment)
#First, start by getting token to access Twitch api
r=get_token(client_id,client_secret,grant_type,scope)
if r == False:
# if there is a problem, end the program
logger.error("Can't Auth user")
exit(1)
# since streamer username is given we need to get its broadcaster id for other requests
broadcaster=get_broadcaster_id(client_id,username)
if broadcaster==False:
# if there is a problem, end the program
logger.error("Can not get broadcster id")
exit(1)
if "access_token" not in r:
# if there is a problem, end the program
logger.error("Access token is missing " + str(r))
exit(1)
access_token=r['access_token'];
expires_in=r['expires_in']
# Fresh token interval will keep track of the time we need to validate the token
fresh_token_interval=token_validate_interval
skip_count=0
while True:
wait_time=online_user_wait_time
# refresh token if expired
if fresh_token_interval <30:
#confirm the token is valid
if is_valid_token(access_token) ==False:
r=get_token(client_id,client_secret,grant_type,scope)
if r ==False:
skip_count=skip_count+1
logger.info("Fresh Token Skip get token , skip:" + str(skip_count))
time.sleep(skip_wait_time)
continue
access_token=r['access_token'];
expires_in=r['expires_in']
fresh_token_interval=token_validate_interval
if is_user_live(client_id,access_token,username):
print_verbose("User ["+username+"] online")
set_stream(1)
user_streaming_flag=1
else:
print_verbose("User ["+username+"] offline")
set_hypetrain(0)
set_follow(0)
set_stream(0)
user_streaming_flag=0
wait_time=user_offline_wait_time
last_hype_train_action=get_last_hype_train_action(client_id,access_token,broadcaster["_id"])
if last_hype_train_action ==False:
skip_count=skip_count+1
logger.info("Hype Train Skip get token , skip:" + str(skip_count))
time.sleep(skip_wait_time)
continue
#retrieve most recent follow event
last_follow_action=get_last_follow_action(client_id,access_token,broadcaster["_id"])
if last_follow_action ==False:
skip_count=skip_count+1
logger.info("Follow Skip get token , skip:" + str(skip_count))
time.sleep(skip_wait_time)
continue
#mark follow if last follow event is < event notification time from current time
if user_streaming_flag==1:
subscribe_time=last_follow_action["data"][0]["followed_at"]
subscribe_time=datetime.datetime.strptime(subscribe_time,'%Y-%m-%dT%H:%M:%SZ')
if datetime.datetime.utcnow() < subscribe_time + timedelta(seconds=event_notification_delay):
print_verbose("Relay Function - Follow Event Active")
set_follow(1)
else:
set_follow(0)
#set hype train state
if(is_train_active(last_hype_train_action["data"])):
print_verbose("Train Active at level " + str(last_hype_train_action["data"][0]["event_data"]['level']))
level=last_hype_train_action["data"][0]["event_data"]['level']
if 1 <= level <= 5:
if user_streaming_flag==1:
logger.info("Relay Function - Hype Train Event")
set_hypetrain(level)
wait_time=5 # active hype train wait time in seconds
else:
print_verbose("Train not active")
set_hypetrain(0)
wait_time=online_user_wait_time
fresh_token_interval=fresh_token_interval-wait_time
if skip_count == max_skip_count:
logger.error("Skip count limit reached")
exit(1)
time.sleep(wait_time)
#reset skip_count if one request execute without issue within max_skip_count
skip_count=0 | 2.46875 | 2 |
aiassistants/assistants/ptype/src/Config.py | wrattler/wrattler | 56 | 12993 | class Config:
# helps to store settings for an experiment.
def __init__(self, _experiments_folder_path='experiments', _dataset_name='dataset', _column_names='unknown',
_types={1:'integer', 2:'string', 3:'float', 4:'boolean', 5:'gender', 6:'unknown', 7:'date-iso-8601', 8:'date-eu', 9:'date-non-std-subtype', 10:'date-non-std',
11:'positive integer', 12:'positive float'}):
self.main_experiments_folder = _experiments_folder_path
self.dataset_name = _dataset_name
self.column_names = _column_names
self.types = _types
self.types_as_list = list(_types.values())
columns = ['missing', 'catch-all',]
for key in _types:
columns.append(_types[key])
self.columns = columns | 2.65625 | 3 |
rbc/tests/test_omnisci_array.py | guilhermeleobas/rbc | 0 | 12994 | import os
from collections import defaultdict
from rbc.omnisci_backend import Array
from rbc.errors import OmnisciServerError
from numba import types as nb_types
import pytest
rbc_omnisci = pytest.importorskip('rbc.omniscidb')
available_version, reason = rbc_omnisci.is_available()
pytestmark = pytest.mark.skipif(not available_version, reason=reason)
@pytest.fixture(scope='module')
def omnisci():
# TODO: use omnisci_fixture from rbc/tests/__init__.py
config = rbc_omnisci.get_client_config(debug=not True)
m = rbc_omnisci.RemoteOmnisci(**config)
table_name = os.path.splitext(os.path.basename(__file__))[0]
m.sql_execute(f'DROP TABLE IF EXISTS {table_name}')
sqltypes = ['FLOAT[]', 'DOUBLE[]',
'TINYINT[]', 'SMALLINT[]', 'INT[]', 'BIGINT[]',
'BOOLEAN[]']
# todo: TEXT ENCODING DICT, TEXT ENCODING NONE, TIMESTAMP, TIME,
# DATE, DECIMAL/NUMERIC, GEOMETRY: POINT, LINESTRING, POLYGON,
# MULTIPOLYGON, See
# https://www.omnisci.com/docs/latest/5_datatypes.html
colnames = ['f4', 'f8', 'i1', 'i2', 'i4', 'i8', 'b']
table_defn = ',\n'.join('%s %s' % (n, t)
for t, n in zip(sqltypes, colnames))
m.sql_execute(f'CREATE TABLE IF NOT EXISTS {table_name} ({table_defn});')
data = defaultdict(list)
for i in range(5):
for j, n in enumerate(colnames):
if n == 'b':
data[n].append([_i % 2 == 0 for _i in range(-3, 3)])
elif n.startswith('f'):
data[n].append([i * 10 + _i + 0.5 for _i in range(-3, 3)])
else:
data[n].append([i * 10 + _i for _i in range(-3, 3)])
m.load_table_columnar(table_name, **data)
m.table_name = table_name
yield m
try:
m.sql_execute(f'DROP TABLE IF EXISTS {table_name}')
except Exception as msg:
print('%s in deardown' % (type(msg)))
@pytest.mark.parametrize('c_name', ['int8_t i1', 'int16_t i2', 'int32_t i4', 'int64_t i8',
'float f4', 'double f8'])
@pytest.mark.parametrize('device', ['cpu', 'gpu'])
def test_ptr(omnisci, c_name, device):
omnisci.reset()
if not omnisci.has_cuda and device == 'gpu':
pytest.skip('test requires CUDA-enabled omniscidb server')
from rbc.external import external
if omnisci.compiler is None:
pytest.skip('test requires clang C/C++ compiler')
ctype, cname = c_name.split()
c_code = f'''
#include <stdint.h>
#ifdef __cplusplus
extern "C" {{
#endif
{ctype} mysum_impl({ctype}* x, int n) {{
{ctype} r = 0;
for (int i=0; i < n; i++) {{
r += x[i];
}}
return r;
}}
{ctype} myval_impl({ctype}* x) {{
return *x;
}}
#ifdef __cplusplus
}}
#endif
'''
omnisci.user_defined_llvm_ir[device] = omnisci.compiler(c_code)
mysum_impl = external(f'{ctype} mysum_impl({ctype}*, int32_t)')
myval_impl = external(f'{ctype} myval_impl({ctype}*)')
@omnisci(f'{ctype}({ctype}[])', devices=[device])
def mysum_ptr(x):
return mysum_impl(x.ptr(), len(x))
@omnisci(f'{ctype}({ctype}[], int32_t)', devices=[device])
def myval_ptr(x, i):
return myval_impl(x.ptr(i))
desrc, result = omnisci.sql_execute(
f'select {cname}, mysum_ptr({cname}) from {omnisci.table_name}')
for a, r in result:
if cname == 'i1':
assert sum(a) % 256 == r % 256
else:
assert sum(a) == r
desrc, result = omnisci.sql_execute(
f'select {cname}, myval_ptr({cname}, 0), myval_ptr({cname}, 2) from {omnisci.table_name}')
for a, r0, r2 in result:
assert a[0] == r0
assert a[2] == r2
def test_len_i32(omnisci):
omnisci.reset()
@omnisci('int64(int32[])')
def array_sz_int32(x):
return len(x)
desrc, result = omnisci.sql_execute(
f'select i4, array_sz_int32(i4) from {omnisci.table_name}')
for a, sz in result:
assert len(a) == sz
def test_len_f64(omnisci):
omnisci.reset()
@omnisci('int64(float64[])')
def array_sz_double(x):
return len(x)
desrc, result = omnisci.sql_execute(
f'select f8, array_sz_double(f8) from {omnisci.table_name}')
for a, sz in result:
assert len(a) == sz
@pytest.mark.skipif(available_version[:2] == (5, 1),
reason="skip due to a bug in omniscidb 5.1 (got %s)" % (
available_version,))
def test_getitem_bool(omnisci):
omnisci.reset()
@omnisci('bool(bool[], int64)')
def array_getitem_bool(x, i):
return x[i]
query = f'select b, array_getitem_bool(b, 2) from {omnisci.table_name}'
desrc, result = omnisci.sql_execute(query)
for a, item in result:
assert a[2] == item
def test_getitem_i8(omnisci):
omnisci.reset()
@omnisci('int8(int8[], int32)')
def array_getitem_int8(x, i):
return x[i]
query = f'select i1, array_getitem_int8(i1, 2) from {omnisci.table_name}'
desrc, result = omnisci.sql_execute(query)
for a, item in result:
assert a[2] == item
def test_getitem_i32(omnisci):
omnisci.reset()
@omnisci('int32(int32[], int32)')
def array_getitem_int32(x, i):
return x[i]
query = f'select i4, array_getitem_int32(i4, 2) from {omnisci.table_name}'
desrc, result = omnisci.sql_execute(query)
for a, item in result:
assert a[2] == item
def test_getitem_i64(omnisci):
omnisci.reset()
@omnisci('int64(int64[], int64)')
def array_getitem_int64(x, i):
return x[i]
query = f'select i8, array_getitem_int64(i8, 2) from {omnisci.table_name}'
desrc, result = omnisci.sql_execute(query)
for a, item in result:
assert a[2] == item
def test_getitem_float(omnisci):
omnisci.reset()
@omnisci('double(double[], int32)')
def array_getitem_double(x, i):
return x[i]
query = f'select f8, array_getitem_double(f8, 2) from {omnisci.table_name}'
desrc, result = omnisci.sql_execute(query)
for a, item in result:
assert a[2] == item
assert type(a[2]) == type(item)
@omnisci('float(float[], int64)')
def array_getitem_float(x, i):
return x[i]
query = f'select f4, array_getitem_float(f4, 2) from {omnisci.table_name}'
desrc, result = omnisci.sql_execute(query)
for a, item in result:
assert a[2] == item
assert type(a[2]) == type(item)
def test_sum(omnisci):
omnisci.reset()
@omnisci('int32(int32[])')
def array_sum_int32(x):
r = 0
n = len(x)
for i in range(n):
r = r + x[i]
return r
query = f'select i4, array_sum_int32(i4) from {omnisci.table_name}'
desrc, result = omnisci.sql_execute(query)
for a, s in result:
assert sum(a) == s
@pytest.mark.skipif(available_version[:2] == (5, 1),
reason="skip due to a bug in omniscidb 5.1 (got %s)" % (
available_version,))
def test_even_sum(omnisci):
omnisci.reset()
@omnisci('int32(bool[], int32[])')
def array_even_sum_int32(b, x):
r = 0
n = len(x)
for i in range(n):
if b[i]:
r = r + x[i]
return r
query = f'select b, i4, array_even_sum_int32(b, i4) from {omnisci.table_name}'
desrc, result = omnisci.sql_execute(query)
for b, i4, s in result:
assert sum([i_ for b_, i_ in zip(b, i4) if b_]) == s
def test_array_setitem(omnisci):
omnisci.reset()
@omnisci('double(double[], int32)')
def array_setitem_sum(b, c):
n = len(b)
s = 0
for i in range(n):
b[i] = b[i] * c # changes the value inplace
s += b[i]
b[i] = b[i] / c
return s
query = f'select f8, array_setitem_sum(f8, 4) from {omnisci.table_name}'
_, result = omnisci.sql_execute(query)
for f8, s in result:
assert sum(f8) * 4 == s
def test_array_constructor_noreturn(omnisci):
omnisci.reset()
from rbc.omnisci_backend import Array
from numba import types
@omnisci('float64(int32)')
def array_noreturn(size):
a = Array(size, types.float64)
b = Array(size, types.float64)
c = Array(size, types.float64)
for i in range(size):
a[i] = b[i] = c[i] = i + 3.0
s = 0.0
for i in range(size):
s += a[i] + b[i] + c[i] - a[i] * b[i]
return s
query = 'select array_noreturn(10)'
_, result = omnisci.sql_execute(query)
r = list(result)[0]
assert (r == (-420.0,))
def test_array_constructor_return(omnisci):
omnisci.reset()
from rbc.omnisci_backend import Array
from numba import types
from rbc.externals.stdio import printf
@omnisci('float64[](int32)')
def array_return(size):
printf("entering array_return(%i)\n", size)
a = Array(size, types.float64)
b = Array(size, types.float64)
for i in range(size):
a[i] = float(i)
b[i] = float(size - i - 1)
if size % 2:
c = a
else:
c = b
printf("returning array with length %i\n", len(c))
return c
query = 'select array_return(9), array_return(10)'
_, result = omnisci.sql_execute(query)
r = list(result)[0]
assert r == (list(map(float, range(9))),
list(map(float, reversed(range(10)))))
def test_array_constructor_len(omnisci):
omnisci.reset()
from rbc.omnisci_backend import Array
from numba import types
@omnisci('int64(int32)')
def array_len(size):
a = Array(size, types.float64)
return len(a)
query = 'select array_len(30)'
_, result = omnisci.sql_execute(query)
assert list(result)[0] == (30,)
def test_array_constructor_getitem(omnisci):
omnisci.reset()
from rbc.omnisci_backend import Array
import numpy as np
@omnisci('double(int32, int32)')
def array_ptr(size, pos):
a = Array(size, np.double)
for i in range(size):
a[i] = i + 0.0
return a[pos]
query = 'select array_ptr(5, 3)'
_, result = omnisci.sql_execute(query)
assert list(result)[0] == (3.0,)
def test_array_constructor_is_null(omnisci):
omnisci.reset()
from rbc.omnisci_backend import Array
@omnisci('int8(int64)')
def array_is_null(size):
a = Array(size, 'double')
return a.is_null()
query = 'select array_is_null(3);'
_, result = omnisci.sql_execute(query)
assert list(result)[0] == (0,)
inps = [('int32', 'i4', 'trunc'), ('int32', 'i4', 'sext'),
('int32', 'i4', 'zext'), ('float', 'f4', 'fptrunc'),
('double', 'f8', 'fpext')]
@pytest.mark.parametrize("typ, col, suffix", inps,
ids=[item[-1] for item in inps])
def test_issue197(omnisci, typ, col, suffix):
omnisci.reset()
import rbc.omnisci_backend as np
from numba import types
cast = dict(
trunc=types.int64,
sext=types.int8,
zext=types.uint8,
fptrunc=types.float64,
fpext=types.float32)[suffix]
def fn_issue197(x):
y = np.zeros_like(x)
for i in range(len(x)):
y[i] = cast(x[i] + 3)
return y
fn_name = f"fn_issue197_{typ}_{suffix}"
fn_issue197.__name__ = fn_name
omnisci(f'{typ}[]({typ}[])')(fn_issue197)
_, result = omnisci.sql_execute(
f'SELECT {col}, {fn_name}({col}) FROM {omnisci.table_name};'
)
column, ret = list(result)[0]
for x, y in zip(column, ret):
assert y == x + 3
def test_issue197_bool(omnisci):
omnisci.reset()
import rbc.omnisci_backend as np
@omnisci('bool[](bool[])')
def fn_issue197_bool(x):
y = np.zeros_like(x)
for i in range(len(x)):
y[i] = bool(x[i])
return y
col = 'b'
fn_name = 'fn_issue197_bool'
_, result = omnisci.sql_execute(
f'SELECT {col}, {fn_name}({col}) FROM {omnisci.table_name};'
)
column, ret = list(result)[0]
for x, y in zip(column, ret):
assert bool(x) == bool(y)
def test_issue109(omnisci):
@omnisci('double[](int32)')
def issue109(size):
a = Array(5, 'double')
for i in range(5):
a[i] = nb_types.double(i)
return a
_, result = omnisci.sql_execute('select issue109(3);')
assert list(result) == [([0.0, 1.0, 2.0, 3.0, 4.0],)]
def test_issue77(omnisci):
@omnisci('int64[]()')
def issue77():
a = Array(5, 'int64')
a.fill(1)
return a
if omnisci.version[:2] >= (5, 8):
_, result = omnisci.sql_execute('select issue77();')
assert list(result)[0][0] == [1, 1, 1, 1, 1]
else:
with pytest.raises(OmnisciServerError) as exc:
_, result = omnisci.sql_execute('select issue77();')
assert exc.match('Could not bind issue77()')
| 1.71875 | 2 |
env/lib/python3.8/site-packages/unidecode/x093.py | avdhari/enigma | 48 | 12995 | <filename>env/lib/python3.8/site-packages/unidecode/x093.py<gh_stars>10-100
data = (
'Lun ', # 0x00
'Kua ', # 0x01
'Ling ', # 0x02
'Bei ', # 0x03
'Lu ', # 0x04
'Li ', # 0x05
'Qiang ', # 0x06
'Pou ', # 0x07
'Juan ', # 0x08
'Min ', # 0x09
'Zui ', # 0x0a
'Peng ', # 0x0b
'An ', # 0x0c
'Pi ', # 0x0d
'Xian ', # 0x0e
'Ya ', # 0x0f
'Zhui ', # 0x10
'Lei ', # 0x11
'A ', # 0x12
'Kong ', # 0x13
'Ta ', # 0x14
'Kun ', # 0x15
'Du ', # 0x16
'Wei ', # 0x17
'Chui ', # 0x18
'Zi ', # 0x19
'Zheng ', # 0x1a
'Ben ', # 0x1b
'Nie ', # 0x1c
'Cong ', # 0x1d
'Qun ', # 0x1e
'Tan ', # 0x1f
'Ding ', # 0x20
'Qi ', # 0x21
'Qian ', # 0x22
'Zhuo ', # 0x23
'Qi ', # 0x24
'Yu ', # 0x25
'Jin ', # 0x26
'Guan ', # 0x27
'Mao ', # 0x28
'Chang ', # 0x29
'Tian ', # 0x2a
'Xi ', # 0x2b
'Lian ', # 0x2c
'Tao ', # 0x2d
'Gu ', # 0x2e
'Cuo ', # 0x2f
'Shu ', # 0x30
'Zhen ', # 0x31
'Lu ', # 0x32
'Meng ', # 0x33
'Lu ', # 0x34
'Hua ', # 0x35
'Biao ', # 0x36
'Ga ', # 0x37
'Lai ', # 0x38
'Ken ', # 0x39
'Kazari ', # 0x3a
'Bu ', # 0x3b
'Nai ', # 0x3c
'Wan ', # 0x3d
'Zan ', # 0x3e
'[?] ', # 0x3f
'De ', # 0x40
'Xian ', # 0x41
'[?] ', # 0x42
'Huo ', # 0x43
'Liang ', # 0x44
'[?] ', # 0x45
'Men ', # 0x46
'Kai ', # 0x47
'Ying ', # 0x48
'Di ', # 0x49
'Lian ', # 0x4a
'Guo ', # 0x4b
'Xian ', # 0x4c
'Du ', # 0x4d
'Tu ', # 0x4e
'Wei ', # 0x4f
'Cong ', # 0x50
'Fu ', # 0x51
'Rou ', # 0x52
'Ji ', # 0x53
'E ', # 0x54
'Rou ', # 0x55
'Chen ', # 0x56
'Ti ', # 0x57
'Zha ', # 0x58
'Hong ', # 0x59
'Yang ', # 0x5a
'Duan ', # 0x5b
'Xia ', # 0x5c
'Yu ', # 0x5d
'Keng ', # 0x5e
'Xing ', # 0x5f
'Huang ', # 0x60
'Wei ', # 0x61
'Fu ', # 0x62
'Zhao ', # 0x63
'Cha ', # 0x64
'Qie ', # 0x65
'She ', # 0x66
'Hong ', # 0x67
'Kui ', # 0x68
'Tian ', # 0x69
'Mou ', # 0x6a
'Qiao ', # 0x6b
'Qiao ', # 0x6c
'Hou ', # 0x6d
'Tou ', # 0x6e
'Cong ', # 0x6f
'Huan ', # 0x70
'Ye ', # 0x71
'Min ', # 0x72
'Jian ', # 0x73
'Duan ', # 0x74
'Jian ', # 0x75
'Song ', # 0x76
'Kui ', # 0x77
'Hu ', # 0x78
'Xuan ', # 0x79
'Duo ', # 0x7a
'Jie ', # 0x7b
'Zhen ', # 0x7c
'Bian ', # 0x7d
'Zhong ', # 0x7e
'Zi ', # 0x7f
'Xiu ', # 0x80
'Ye ', # 0x81
'Mei ', # 0x82
'Pai ', # 0x83
'Ai ', # 0x84
'Jie ', # 0x85
'[?] ', # 0x86
'Mei ', # 0x87
'Chuo ', # 0x88
'Ta ', # 0x89
'Bang ', # 0x8a
'Xia ', # 0x8b
'Lian ', # 0x8c
'Suo ', # 0x8d
'Xi ', # 0x8e
'Liu ', # 0x8f
'Zu ', # 0x90
'Ye ', # 0x91
'Nou ', # 0x92
'Weng ', # 0x93
'Rong ', # 0x94
'Tang ', # 0x95
'Suo ', # 0x96
'Qiang ', # 0x97
'Ge ', # 0x98
'Shuo ', # 0x99
'Chui ', # 0x9a
'Bo ', # 0x9b
'Pan ', # 0x9c
'Sa ', # 0x9d
'Bi ', # 0x9e
'Sang ', # 0x9f
'Gang ', # 0xa0
'Zi ', # 0xa1
'Wu ', # 0xa2
'Ying ', # 0xa3
'Huang ', # 0xa4
'Tiao ', # 0xa5
'Liu ', # 0xa6
'Kai ', # 0xa7
'Sun ', # 0xa8
'Sha ', # 0xa9
'Sou ', # 0xaa
'Wan ', # 0xab
'Hao ', # 0xac
'Zhen ', # 0xad
'Zhen ', # 0xae
'Luo ', # 0xaf
'Yi ', # 0xb0
'Yuan ', # 0xb1
'Tang ', # 0xb2
'Nie ', # 0xb3
'Xi ', # 0xb4
'Jia ', # 0xb5
'Ge ', # 0xb6
'Ma ', # 0xb7
'Juan ', # 0xb8
'Kasugai ', # 0xb9
'Habaki ', # 0xba
'Suo ', # 0xbb
'[?] ', # 0xbc
'[?] ', # 0xbd
'[?] ', # 0xbe
'Na ', # 0xbf
'Lu ', # 0xc0
'Suo ', # 0xc1
'Ou ', # 0xc2
'Zu ', # 0xc3
'Tuan ', # 0xc4
'Xiu ', # 0xc5
'Guan ', # 0xc6
'Xuan ', # 0xc7
'Lian ', # 0xc8
'Shou ', # 0xc9
'Ao ', # 0xca
'Man ', # 0xcb
'Mo ', # 0xcc
'Luo ', # 0xcd
'Bi ', # 0xce
'Wei ', # 0xcf
'Liu ', # 0xd0
'Di ', # 0xd1
'Qiao ', # 0xd2
'Cong ', # 0xd3
'Yi ', # 0xd4
'Lu ', # 0xd5
'Ao ', # 0xd6
'Keng ', # 0xd7
'Qiang ', # 0xd8
'Cui ', # 0xd9
'Qi ', # 0xda
'Chang ', # 0xdb
'Tang ', # 0xdc
'Man ', # 0xdd
'Yong ', # 0xde
'Chan ', # 0xdf
'Feng ', # 0xe0
'Jing ', # 0xe1
'Biao ', # 0xe2
'Shu ', # 0xe3
'Lou ', # 0xe4
'Xiu ', # 0xe5
'Cong ', # 0xe6
'Long ', # 0xe7
'Zan ', # 0xe8
'Jian ', # 0xe9
'Cao ', # 0xea
'Li ', # 0xeb
'Xia ', # 0xec
'Xi ', # 0xed
'Kang ', # 0xee
'[?] ', # 0xef
'Beng ', # 0xf0
'[?] ', # 0xf1
'[?] ', # 0xf2
'Zheng ', # 0xf3
'Lu ', # 0xf4
'Hua ', # 0xf5
'Ji ', # 0xf6
'Pu ', # 0xf7
'Hui ', # 0xf8
'Qiang ', # 0xf9
'Po ', # 0xfa
'Lin ', # 0xfb
'Suo ', # 0xfc
'Xiu ', # 0xfd
'San ', # 0xfe
'Cheng ', # 0xff
)
| 1.484375 | 1 |
src/core/default/commands/bucket/utils.py | cdev-framework/cdev-sdk | 2 | 12996 | from dataclasses import dataclass
import re
from tokenize import group
from core.constructs.resource import ResourceModel
from core.constructs.workspace import Workspace
RUUID = "cdev::simple::bucket"
def get_cloud_output_from_cdev_name(component_name: str, cdev_name: str) -> str:
try:
ws = Workspace.instance()
cloud_output = ws.get_backend().get_cloud_output_by_name(
ws.get_resource_state_uuid(), component_name, RUUID, cdev_name
)
return cloud_output
except Exception as e:
print(f"Could not find resource {component_name}:{RUUID}:{cdev_name}")
print(e)
return None
def get_resource_from_cdev_name(component_name: str, cdev_name: str) -> ResourceModel:
try:
ws = Workspace.instance()
resource = ws.get_backend().get_resource_by_name(
ws.get_resource_state_uuid(), component_name, RUUID, cdev_name
)
return resource
except Exception as e:
print(f"Could not find resource {component_name}:{RUUID}:{cdev_name}")
print(e)
return None
remote_name_regex = "bucket://([a-z,_]+).([a-z,_]+)/?(\S+)?"
compiled_regex = re.compile(remote_name_regex)
@dataclass
class remote_location:
component_name: str
cdev_bucket_name: str
path: str
def is_valid_remote(name: str) -> bool:
return True if compiled_regex.match(name) else False
def parse_remote_location(name: str) -> remote_location:
match = compiled_regex.match(name)
if not match:
raise Exception(
"provided name {name} does not match regex for a remote bucket object"
)
return remote_location(
component_name=match.group(1),
cdev_bucket_name=match.group(2),
path=match.group(3),
)
| 2.484375 | 2 |
examples/blank_cylinders.py | reflectometry/osrefl | 2 | 12997 | <reponame>reflectometry/osrefl
from greens_thm_form import greens_form_line, greens_form_shape
from numpy import arange, linspace, float64, indices, zeros_like, ones_like, pi, sin, complex128, array, exp, newaxis, cumsum, sum, cos, sin, log, log10
from osrefl.theory.DWBAGISANS import dwbaWavefunction
class shape:
def __init__(self, name):
self.name = name
self.points = []
self.sld = 0.0
self.sldi = 0.0
def rectangle(x0, y0, dx, dy, sld=0.0, sldi=0.0):
#generate points for a rectangle
rect = shape('rectangle')
rect.points = [[x0,y0], [x0+dx, y0], [x0+dx, y0+dy], [x0, y0+dy]]
rect.sld = sld
rect.sldi = sldi
rect.area = dx * dy
return rect
def sawtooth(z, n=6, x_length=3000.0, base_width=500.0, height=300.0, sld=0.0, sldi=0.0, sld_front=0.0, sldi_front=0.0):
if z>height:
return [], sld_front
width = (z / height) * base_width
front_width = base_width - width
rects = [rectangle(0, base_width*(i+0.5) - width/2.0, x_length, width, sld, sldi) for i in range(n)]
# now rectangles for the gaps between the sawtooths...
if (sld_front !=0.0 and sldi_front != 0.0):
front_rects = [rectangle(0, 0, x_length, front_width/2.0, sld_front, sldi_front)]
front_rects.extend([rectangle(0, base_width*(i+0.5)+width/2.0, x_length, front_width, sld_front, sldi_front) for i in range(1,n-1)])
front_rects.append(rectangle(0, base_width*(n-0.5)+width/2.0, x_length, front_width/2.0, sld_front, sldi_front))
rects.extend(front_rects)
# now calculate the average SLD (nuclear) for the layer
avg_sld = (width * sld + front_width * sld_front) / base_width
avg_sldi = (width * sldi + front_width * sldi_front) / base_width
return rects, avg_sld, avg_sldi
def arc(r, theta_start, theta_end, x_center, y_center, theta_step=1.0, close=True, sld=0.0, sldi=0.0, ):
a = shape('arc')
a.theta_start = theta_start
a.theta_end = theta_end
a.area = pi * r**2 * abs(theta_end - theta_start)/360.0
if close == True:
a.points.append([x_center, y_center]) # center point
numpoints = (theta_end - theta_start) / theta_step + 1
thetas = linspace(theta_start, theta_end, numpoints) * pi/180 # to radians
for th in thetas:
a.points.append([r*cos(th) + x_center, r*sin(th) + y_center])
a.sld = sld
a.sldi = sldi
return a
def limit_cyl(arc, xmin=0.0, xmax=0.0, ymin=0.0, ymax=0.0):
new_arc = shape('arc')
new_arc.sld = arc.sld
new_arc.sldi = arc.sldi
new_arc.theta_start = arc.theta_start
new_arc.theta_end = arc.theta_end
#new_arc.area = arc.area
for point in arc.points:
if (point[0] >= xmin) and (point[0] <= xmax) and (point[1] >=ymin) and (point[1] <= ymax):
new_arc.points.append(point)
if len(new_arc.points) < 3:
new_arc.area = 0.0
else:
new_arc.area = (len(new_arc.points) - 2) / 360.0 * arc.area
return new_arc
def conj(sld):
conjugate_sld = sld.copy()
conjugate_sld[:,2] *= -1
return conjugate_sld
# alternating SLD
wavelength = 1.24 # x-ray wavelength, Angstroms
spacing = 600.0 # distance between cylinder centers
radius = 200.0 # Angstroms, radius of cylinders
thickness = 300.0 # Angstrom, thickness of cylinder layer
sublayer_thickness = 200.0 # Angstrom, full layer of matrix below cylinders
matrix_sld = pi/(wavelength**2) * 2.0 * 1.0e-6 # substrate
matrix_sldi = pi/(wavelength**2) * 2.0 * 1.0e-7 # absorption in substrate
cyl_sld = 0.0
cyl_sldi = 0.0 # cylinders are holes in matrix
unit_dx = 2.0 * spacing
unit_dy = 1.0 * spacing
matrix = rectangle(0,0, 3000, 3000, matrix_sld, matrix_sldi)
cylinders = []
centers = []
for i in range(3):
for j in range(6):
x0 = i * 2.0 * spacing
y0 = j * spacing
x1 = x0 + spacing # basis
y1 = y0 + spacing/2.0
cylinders.append(arc(radius, 0.0, 360.0, x0, y0, sld=cyl_sld, sldi=cyl_sldi))
cylinders.append(arc(radius, 0.0, 360.0, x1, y1, sld=cyl_sld, sldi=cyl_sldi))
cyl_area = 0.0
for cyl in cylinders:
cyl_area += cyl.area
clipped_cylinders = [limit_cyl(cyl, xmin=0.0, xmax=3000.0, ymin=0.0, ymax=3000.0) for cyl in cylinders]
clipped_cyl_area = 0.0
for cyl in clipped_cylinders:
clipped_cyl_area += cyl.area
print "clipped_cyl_area / matrix.area = ", clipped_cyl_area / matrix.area
print "ratio should be 0.3491 for FCT planar array with a/b = 2 and r = a/6"
avg_sld = (matrix.area * matrix_sld + clipped_cyl_area * cyl_sld) / matrix.area
avg_sldi = (matrix.area * matrix_sldi + clipped_cyl_area * cyl_sldi) / matrix.area
front_sld = 0.0 # air
back_sld = pi/(wavelength**2) * 2.0 * 5.0e-6 # substrate
back_sldi = pi/(wavelength**2) * 2.0 * 7.0e-8 # absorption in substrate
qz = linspace(0.01, 0.21, 501)
qy = linspace(-0.1, 0.1, 500)
qx = ones_like(qy, dtype=complex128) * 1e-8
SLDArray = [ [0,0,0], # air
[avg_sld, thickness, avg_sldi], # sample
[matrix_sld, sublayer_thickness, matrix_sldi], # full matrix layer under cylinders
[back_sld, 0, back_sldi] ]
FT = zeros_like(qx, dtype=complex128)
for cyl in clipped_cylinders:
FT += greens_form_shape(cyl.points, qx, qy) * (cyl.sld)
FT += greens_form_shape(matrix.points, qx, qy) * (matrix.sld)
FT += greens_form_shape(matrix.points, qx, qy) * (-avg_sld)
SLDArray = array(SLDArray)
def calc_gisans(alpha_in, show_plot=True):
#alpha_in = 0.25 # incoming beam angle
kz_in_0 = 2*pi/wavelength * sin(alpha_in * pi/180.0)
kz_out_0 = kz_in - qz
wf_in = dwbaWavefunction(kz_in_0, SLDArray)
wf_out = dwbaWavefunction(-kz_out_0, conj(SLDArray))
kz_in_l = wf_in.kz_l
kz_out_l = -wf_out.kz_l
zs = cumsum(SLDArray[1:-1,1])
dz = SLDArray[1:-1,1][:,newaxis]
z_array = array(zs)[:,newaxis]
qrt_inside = kz_in_l[1] - kz_out_l[1]
qtt_inside = kz_in_l[1] + kz_out_l[1]
qtr_inside = -kz_in_l[1] + kz_out_l[1]
qrr_inside = -kz_in_l[1] - kz_out_l[1]
# the overlap is the forward-moving amplitude c in psi_in multiplied by
# the forward-moving amplitude in the time-reversed psi_out, which
# ends up being the backward-moving amplitude d in the non-time-reversed psi_out
# (which is calculated by the wavefunction calculator)
# ... and vice-verso for d and c in psi_in and psi_out
overlap = wf_out.d[1] * wf_in.c[1] / (1j * qtt_inside) * (exp(1j * qtt_inside * thickness) - 1.0)
overlap += wf_out.c[1] * wf_in.d[1] / (1j * qrr_inside) * (exp(1j * qrr_inside * thickness) - 1.0)
overlap += wf_out.d[1] * wf_in.d[1] / (1j * qtr_inside) * (exp(1j * qtr_inside * thickness) - 1.0)
overlap += wf_out.c[1] * wf_in.c[1] / (1j * qrt_inside) * (exp(1j * qrt_inside * thickness) - 1.0)
overlap_BA = 1.0 / (1j * qz) * (exp(1j * qz * thickness) - 1.0)
overlap_BA += 1.0 / (-1j * qz) * (exp(-1j * qz * thickness) - 1.0)
gisans = overlap[:,newaxis] * FT[newaxis, :]
gisans_BA = overlap_BA[:,newaxis] * FT[newaxis, :]
extent = [qy.min(), qy.max(), qz.min(), qz.max()]
if show_plot == True:
from pylab import imshow, figure, colorbar
figure()
imshow(log10(abs(gisans)**2), origin='lower', extent=extent, aspect='auto')
colorbar()
figure()
imshow(log10(abs(gisans_BA)**2), origin='lower', extent=extent, aspect='auto')
colorbar()
return gisans, gisans_BA
| 2.46875 | 2 |
EXAMPLE/test_backtest/MACD_JCSC.py | evsteel/QUANTAXIS | 2 | 12998 | # -*- coding: utf-8 -*-
# Demo: MACD strategy
# src: ./test_backtest/MACD_JCSC.py
# jupyter: ./test_backtest/QUANTAXIS回测分析全过程讲解.ipynb
# paper: ./test_backtest/QUANTAXIS回测分析全过程讲解.md
import QUANTAXIS as QA
import numpy as np
import pandas as pd
import datetime
st1=datetime.datetime.now()
# define the MACD strategy
def MACD_JCSC(dataframe, SHORT=12, LONG=26, M=9):
"""
1.DIF向上突破DEA,买入信号参考。
2.DIF向下跌破DEA,卖出信号参考。
"""
CLOSE = dataframe.close
DIFF = QA.EMA(CLOSE, SHORT) - QA.EMA(CLOSE, LONG)
DEA = QA.EMA(DIFF, M)
MACD = 2*(DIFF-DEA)
CROSS_JC = QA.CROSS(DIFF, DEA)
CROSS_SC = QA.CROSS(DEA, DIFF)
ZERO = 0
return pd.DataFrame({'DIFF': DIFF, 'DEA': DEA, 'MACD': MACD, 'CROSS_JC': CROSS_JC, 'CROSS_SC': CROSS_SC, 'ZERO': ZERO})
# create account
Account = QA.QA_Account()
Broker = QA.QA_BacktestBroker()
Account.reset_assets(1000000)
Account.account_cookie = 'macd_stock'
QA.QA_SU_save_strategy('MACD_JCSC','Indicator',Account.account_cookie)
# get data from mongodb
data = QA.QA_fetch_stock_day_adv(
['000001', '000002', '000004', '600000'], '2017-09-01', '2018-05-20')
data = data.to_qfq()
# add indicator
ind = data.add_func(MACD_JCSC)
# ind.xs('000001',level=1)['2018-01'].plot()
data_forbacktest=data.select_time('2018-01-01','2018-05-01')
for items in data_forbacktest.panel_gen:
for item in items.security_gen:
daily_ind=ind.loc[item.index]
if daily_ind.CROSS_JC.iloc[0]>0:
order=Account.send_order(
code=item.code[0],
time=item.date[0],
amount=1000,
towards=QA.ORDER_DIRECTION.BUY,
price=0,
order_model=QA.ORDER_MODEL.CLOSE,
amount_model=QA.AMOUNT_MODEL.BY_AMOUNT
)
#print(item.to_json()[0])
Broker.receive_order(QA.QA_Event(order=order,market_data=item))
trade_mes=Broker.query_orders(Account.account_cookie,'filled')
res=trade_mes.loc[order.account_cookie,order.realorder_id]
order.trade(res.trade_id,res.trade_price,res.trade_amount,res.trade_time)
elif daily_ind.CROSS_SC.iloc[0]>0:
#print(item.code)
if Account.sell_available.get(item.code[0], 0)>0:
order=Account.send_order(
code=item.code[0],
time=item.date[0],
amount=Account.sell_available.get(item.code[0], 0),
towards=QA.ORDER_DIRECTION.SELL,
price=0,
order_model=QA.ORDER_MODEL.MARKET,
amount_model=QA.AMOUNT_MODEL.BY_AMOUNT
)
#print
Broker.receive_order(QA.QA_Event(order=order,market_data=item))
trade_mes=Broker.query_orders(Account.account_cookie,'filled')
res=trade_mes.loc[order.account_cookie,order.realorder_id]
order.trade(res.trade_id,res.trade_price,res.trade_amount,res.trade_time)
Account.settle()
print('TIME -- {}'.format(datetime.datetime.now()-st1))
print(Account.history)
print(Account.history_table)
print(Account.daily_hold)
# create Risk analysis
Risk = QA.QA_Risk(Account)
Account.save()
Risk.save()
# print(Risk.message)
# print(Risk.assets)
# Risk.plot_assets_curve()
# plt=Risk.plot_dailyhold()
# plt.show()
# plt1=Risk.plot_signal()
# plt.show()
# performance=QA.QA_Performance(Account)
# plt=performance.plot_pnlmoney(performance.pnl_fifo)
# plt.show()
# Risk.assets.plot()
# Risk.benchmark_assets.plot()
# save result
#account_info = QA.QA_fetch_account({'account_cookie': 'user_admin_macd'})
#account = QA.QA_Account().from_message(account_info[0])
#print(account)
| 2.375 | 2 |
tests/zquantum/qaoa/ansatzes/farhi_ansatz_test.py | zapatacomputing/z-quantum-qaoa | 3 | 12999 | <gh_stars>1-10
from zquantum.core.interfaces.ansatz_test import AnsatzTests
from zquantum.core.circuits import Circuit, H, RX, RZ
from zquantum.core.utils import compare_unitary
from zquantum.core.openfermion import change_operator_type
from zquantum.qaoa.ansatzes.farhi_ansatz import (
QAOAFarhiAnsatz,
create_farhi_qaoa_circuits,
create_all_x_mixer_hamiltonian,
)
from openfermion import QubitOperator, IsingOperator
import pytest
import numpy as np
import sympy
class TestQAOAFarhiAnsatz(AnsatzTests):
@pytest.fixture
def ansatz(self):
cost_hamiltonian = QubitOperator((0, "Z")) + QubitOperator((1, "Z"))
mixer_hamiltonian = QubitOperator((0, "X")) + QubitOperator((1, "X"))
return QAOAFarhiAnsatz(
number_of_layers=1,
cost_hamiltonian=cost_hamiltonian,
mixer_hamiltonian=mixer_hamiltonian,
)
@pytest.fixture
def beta(self):
return sympy.Symbol("beta_0")
@pytest.fixture
def gamma(self):
return sympy.Symbol("gamma_0")
@pytest.fixture
def symbols_map(self, beta, gamma):
return {beta: 0.5, gamma: 0.7}
@pytest.fixture
def target_unitary(self, beta, gamma, symbols_map):
target_circuit = Circuit()
target_circuit += H(0)
target_circuit += H(1)
target_circuit += RZ(2 * gamma)(0)
target_circuit += RZ(2 * gamma)(1)
target_circuit += RX(2 * beta)(0)
target_circuit += RX(2 * beta)(1)
return target_circuit.bind(symbols_map).to_unitary()
def test_set_cost_hamiltonian(self, ansatz):
# Given
new_cost_hamiltonian = QubitOperator((0, "Z")) - QubitOperator((1, "Z"))
# When
ansatz.cost_hamiltonian = new_cost_hamiltonian
# Then
assert ansatz._cost_hamiltonian == new_cost_hamiltonian
def test_set_cost_hamiltonian_invalidates_circuit(self, ansatz):
# Given
new_cost_hamiltonian = QubitOperator((0, "Z")) - QubitOperator((1, "Z"))
# When
ansatz.cost_hamiltonian = new_cost_hamiltonian
# Then
assert ansatz._parametrized_circuit is None
def test_set_mixer_hamiltonian(self, ansatz):
# Given
new_mixer_hamiltonian = QubitOperator((0, "Z")) - QubitOperator((1, "Z"))
# When
ansatz.mixer_hamiltonian = new_mixer_hamiltonian
# Then
ansatz._mixer_hamiltonian == new_mixer_hamiltonian
def test_set_mixer_hamiltonian_invalidates_circuit(self, ansatz):
# Given
new_mixer_hamiltonian = QubitOperator((0, "Z")) - QubitOperator((1, "Z"))
# When
ansatz.mixer_hamiltonian = new_mixer_hamiltonian
# Then
assert ansatz._parametrized_circuit is None
def test_get_number_of_qubits(self, ansatz):
# Given
new_cost_hamiltonian = (
QubitOperator((0, "Z")) + QubitOperator((1, "Z")) + QubitOperator((2, "Z"))
)
target_number_of_qubits = 3
# When
ansatz.cost_hamiltonian = new_cost_hamiltonian
# Then
assert ansatz.number_of_qubits == target_number_of_qubits
def test_get_number_of_qubits_with_ising_hamiltonian(self, ansatz):
# Given
new_cost_hamiltonian = (
QubitOperator((0, "Z")) + QubitOperator((1, "Z")) + QubitOperator((2, "Z"))
)
new_cost_hamiltonian = change_operator_type(new_cost_hamiltonian, IsingOperator)
target_number_of_qubits = 3
# When
ansatz.cost_hamiltonian = new_cost_hamiltonian
# Then
assert ansatz.number_of_qubits == target_number_of_qubits
def test_get_parametrizable_circuit(self, ansatz, beta, gamma):
# Then
assert ansatz.parametrized_circuit.free_symbols == [
gamma,
beta,
]
def test_generate_circuit(self, ansatz, symbols_map, target_unitary):
# When
parametrized_circuit = ansatz._generate_circuit()
evaluated_circuit = parametrized_circuit.bind(symbols_map)
final_unitary = evaluated_circuit.to_unitary()
# Then
assert compare_unitary(final_unitary, target_unitary, tol=1e-10)
def test_generate_circuit_with_ising_operator(
self, ansatz, symbols_map, target_unitary
):
# When
ansatz.cost_hamiltonian = change_operator_type(
ansatz.cost_hamiltonian, IsingOperator
)
parametrized_circuit = ansatz._generate_circuit()
evaluated_circuit = parametrized_circuit.bind(symbols_map)
final_unitary = evaluated_circuit.to_unitary()
# Then
assert compare_unitary(final_unitary, target_unitary, tol=1e-10)
def test_create_farhi_qaoa_circuits():
# Given
hamiltonians = [
QubitOperator("Z0 Z1"),
QubitOperator("Z0") + QubitOperator("Z1"),
]
number_of_layers = 2
# When
circuits = create_farhi_qaoa_circuits(hamiltonians, number_of_layers)
# Then
assert len(circuits) == len(hamiltonians)
for circuit in circuits:
assert isinstance(circuit, Circuit)
def test_create_farhi_qaoa_circuits_when_number_of_layers_is_list():
# Given
hamiltonians = [
QubitOperator("Z0 Z1"),
QubitOperator("Z0") + QubitOperator("Z1"),
]
number_of_layers = [2, 3]
# When
circuits = create_farhi_qaoa_circuits(hamiltonians, number_of_layers)
# Then
assert len(circuits) == len(hamiltonians)
for circuit in circuits:
assert isinstance(circuit, Circuit)
def test_create_farhi_qaoa_circuits_fails_when_length_of_inputs_is_not_equal():
# Given
hamiltonians = [
QubitOperator("Z0 Z1"),
QubitOperator("Z0") + QubitOperator("Z1"),
]
number_of_layers = [2]
# When
with pytest.raises(AssertionError):
create_farhi_qaoa_circuits(hamiltonians, number_of_layers)
def test_create_all_x_mixer_hamiltonian():
# Given
number_of_qubits = 4
target_operator = (
QubitOperator("X0")
+ QubitOperator("X1")
+ QubitOperator("X2")
+ QubitOperator("X3")
)
# When
operator = create_all_x_mixer_hamiltonian(number_of_qubits)
# Then
assert operator == target_operator
| 2.03125 | 2 |