id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
5111083 | from ament_index_python import get_package_share_directory
import opengen as og
import casadi.casadi as cs
import os
from parameters import *
b = cs.SX.sym('b', ni)
x = cs.SX.sym('x', nt)
# Bound control outputs
umax = [force] * nt
umin = [0] * nt
bounds = og.constraints.Rectangle(umin, umax)
problem = cs.transpose(x) @ (0.1*cs.SX_eye(nt)) @ x
constraint = A @ x - b
problem = og.builder.Problem(x, b, problem) \
.with_penalty_constraints(
constraint
) \
.with_constraints(bounds) \
meta = og.config.OptimizerMeta() \
.with_version('1.0.0') \
.with_authors(['<NAME>']) \
.with_licence('MIT') \
.with_optimizer_name('tsl_optimizer')
build_config = og.config.BuildConfiguration() \
.with_build_directory(os.path.join(get_package_share_directory('slider_experiment'), 'python_build')) \
.with_build_mode('release') \
.with_build_python_bindings() \
.with_rebuild(False) \
solver_config = og.config.SolverConfiguration() \
.with_lbfgs_memory(32) \
.with_tolerance(1e-6) \
.with_max_inner_iterations(128)
builder = og.builder.OpEnOptimizerBuilder(problem,
metadata=meta,
build_configuration=build_config,
solver_configuration=solver_config
) \
.with_verbosity_level(1)
builder.build() | StarcoderdataPython |
9767433 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from dlrn.api import app
from dlrn.config import ConfigOptions
from dlrn.db import closeSession
from dlrn.db import Commit
from dlrn.db import getSession
from flask import g
from flask import Response
from prometheus_client.core import CounterMetricFamily
from prometheus_client.core import REGISTRY
from prometheus_client import generate_latest
from prometheus_client import Summary
from six.moves import configparser
# Create a metric to track time spent and requests made.
REQUEST_TIME = Summary('dlrn_request_processing_seconds',
'Time spent processing request')
def _get_db():
if 'db' not in g:
g.db = getSession(app.config['DB_PATH'])
return g.db
def _get_config_options(config_file):
cp = configparser.RawConfigParser()
cp.read(config_file)
return ConfigOptions(cp)
class DLRNPromCollector(object):
@REQUEST_TIME.time()
def collect(self):
config_options = _get_config_options(app.config['CONFIG_FILE'])
c_success = CounterMetricFamily('dlrn_builds_succeeded',
'Total number of successful builds',
labels=['baseurl'])
c_failed = CounterMetricFamily('dlrn_builds_failed',
'Total number of failed builds',
labels=['baseurl'])
c_retry = CounterMetricFamily('dlrn_builds_retry',
'Total number of builds in retry state',
labels=['baseurl'])
c_overall = CounterMetricFamily('dlrn_builds',
'Total number of builds',
labels=['baseurl'])
# Find the commits count for each metric
with app.app_context():
session = _get_db()
successful_commits = session.query(Commit).filter(
Commit.status == 'SUCCESS').count()
failed_commits = session.query(Commit).filter(
Commit.status == 'FAILED').count()
retried_commits = session.query(Commit).filter(
Commit.status == 'RETRY').count()
all_commits = session.query(Commit).count()
c_success.add_metric([config_options.baseurl], successful_commits)
c_failed.add_metric([config_options.baseurl], failed_commits)
c_retry.add_metric([config_options.baseurl], retried_commits)
c_overall.add_metric([config_options.baseurl], all_commits)
return [c_success, c_failed, c_retry, c_overall]
REGISTRY.register(DLRNPromCollector())
@app.route('/metrics', methods=['GET'])
def prom_metrics():
return Response(generate_latest(), mimetype='text/plain')
@app.teardown_appcontext
def teardown_db(exception=None):
session = g.pop('db', None)
if session is not None:
closeSession(session)
| StarcoderdataPython |
6457535 | <filename>opus/import/populate_obs_instrument_COVIMS.py<gh_stars>1-10
################################################################################
# populate_obs_instrument_COVIMS.py
#
# Routines to populate fields specific to COVIMS.
################################################################################
# Ordering:
# time_sec1/2 must come before observation_duration
# planet_id must come before opus_id
import pdsfile
from config_data import *
import import_util
from populate_obs_mission_cassini import *
from populate_util import *
################################################################################
# THESE NEED TO BE IMPLEMENTED FOR EVERY INSTRUMENT
################################################################################
### OBS_GENERAL TABLE ###
def _COVIMS_file_spec_helper(**kwargs):
metadata = kwargs['metadata']
index_row = metadata['index_row']
# Format: "/data/1999010T054026_1999010T060958"
path_name = index_row['PATH_NAME']
# Format: "v1294638283_1.qub"
file_name = index_row['FILE_NAME']
volume_id = kwargs['volume_id']
return volume_id + path_name + '/' + file_name
def populate_obs_general_COVIMS_opus_id_OBS(**kwargs):
metadata = kwargs['metadata']
phase_name = metadata['phase_name'].lower()
file_spec = _COVIMS_file_spec_helper(**kwargs)
pds_file = pdsfile.PdsFile.from_filespec(file_spec, fix_case=True)
opus_id = pds_file.opus_id
if not opus_id:
import_util.log_nonrepeating_error(
f'Unable to create OPUS_ID for FILE_SPEC "{file_spec}"')
return file_spec.split('/')[-1] + '_' + phase_name
opus_id += '_' + phase_name
return opus_id
def populate_obs_general_COVIMS_ring_obs_id_OBS(**kwargs):
metadata = kwargs['metadata']
index_row = metadata['index_row']
filename = index_row['FILE_NAME']
image_num = filename[1:11]
phase_name = metadata['phase_name']
planet = helper_cassini_planet_id(**kwargs)
if planet is None:
pl_str = ''
else:
pl_str = planet[0]
return pl_str + '_CUBE_CO_VIMS_' + image_num + '_' + phase_name
def populate_obs_general_COVIMS_inst_host_id_OBS(**kwargs):
return 'CO'
def populate_obs_general_COVIMS_quantity_OBS(**kwargs):
metadata = kwargs['metadata']
index_row = metadata['index_row']
inst_mod = index_row['INSTRUMENT_MODE_ID']
if inst_mod == 'OCCULTATION':
return 'OPDEPTH'
return 'REFLECT'
# XXX CAL?
def populate_obs_general_COVIMS_observation_type_OBS(**kwargs):
metadata = kwargs['metadata']
index_row = metadata['index_row']
inst_mod = index_row['INSTRUMENT_MODE_ID']
if inst_mod == 'OCCULTATION':
return 'TS' # Time Series
return 'SCU' # Spectral Cube
def populate_obs_general_COVIMS_time1_OBS(**kwargs):
return populate_time1_from_index(**kwargs)
def populate_obs_general_COVIMS_time2_OBS(**kwargs):
return populate_time2_from_index(**kwargs)
def populate_obs_general_COVIMS_target_name_OBS(**kwargs):
return helper_cassini_intended_target_name(**kwargs)
def populate_obs_general_COVIMS_observation_duration_OBS(**kwargs):
return populate_observation_duration_from_time(**kwargs)
def populate_obs_pds_COVIMS_note_OBS(**kwargs):
None
def populate_obs_general_COVIMS_primary_file_spec_OBS(**kwargs):
return _COVIMS_file_spec_helper(**kwargs)
def populate_obs_pds_COVIMS_primary_file_spec_OBS(**kwargs):
return _COVIMS_file_spec_helper(**kwargs)
def populate_obs_pds_COVIMS_product_creation_time_OBS(**kwargs):
return populate_product_creation_time_from_supp_index(**kwargs)
# Format: "CO-E/V/J/S-VIMS-2-QUBE-V1.0"
def populate_obs_pds_COVIMS_data_set_id_OBS(**kwargs):
return populate_data_set_id_from_index_label(**kwargs)
# Format: "1/1294638283_1"
def populate_obs_pds_COVIMS_product_id_OBS(**kwargs):
return populate_product_id_from_index(**kwargs)
def populate_obs_general_COVIMS_right_asc1_OBS(**kwargs):
metadata = kwargs['metadata']
ring_geo_row = metadata.get('ring_geo_row', None)
if ring_geo_row is not None:
return import_util.safe_column(ring_geo_row, 'MINIMUM_RIGHT_ASCENSION')
index_row = metadata['index_row']
ra = import_util.safe_column(index_row, 'RIGHT_ASCENSION')
return ra
def populate_obs_general_COVIMS_right_asc2_OBS(**kwargs):
metadata = kwargs['metadata']
ring_geo_row = metadata.get('ring_geo_row', None)
if ring_geo_row is not None:
return import_util.safe_column(ring_geo_row, 'MAXIMUM_RIGHT_ASCENSION')
index_row = metadata['index_row']
ra = import_util.safe_column(index_row, 'RIGHT_ASCENSION')
return ra
def populate_obs_general_COVIMS_declination1_OBS(**kwargs):
metadata = kwargs['metadata']
ring_geo_row = metadata.get('ring_geo_row', None)
if ring_geo_row is not None:
return import_util.safe_column(ring_geo_row, 'MINIMUM_DECLINATION')
index_row = metadata['index_row']
dec = import_util.safe_column(index_row, 'DECLINATION')
return dec
def populate_obs_general_COVIMS_declination2_OBS(**kwargs):
metadata = kwargs['metadata']
ring_geo_row = metadata.get('ring_geo_row', None)
if ring_geo_row is not None:
return import_util.safe_column(ring_geo_row, 'MAXIMUM_DECLINATION')
index_row = metadata['index_row']
dec = import_util.safe_column(index_row, 'DECLINATION')
return dec
### OBS_TYPE_IMAGE TABLE ###
def populate_obs_type_image_COVIMS_image_type_id_OBS(**kwargs):
metadata = kwargs['metadata']
phase_name = metadata['phase_name']
index_row = metadata['index_row']
inst_mod = index_row['INSTRUMENT_MODE_ID']
if inst_mod != 'IMAGE':
return None
if phase_name == 'VIS':
return 'PUSH'
return 'RAST'
def populate_obs_type_image_COVIMS_duration_OBS(**kwargs):
metadata = kwargs['metadata']
index_row = metadata['index_row']
inst_mod = index_row['INSTRUMENT_MODE_ID']
if inst_mod != 'IMAGE':
return None
phase_name = metadata['phase_name']
ir_exp = import_util.safe_column(index_row, 'IR_EXPOSURE')
vis_exp = import_util.safe_column(index_row, 'VIS_EXPOSURE')
if phase_name == 'IR':
if ir_exp is None:
return None
if ir_exp < 0:
import_util.log_nonrepeating_warning(f'IR Exposure {ir_exp} is < 0')
return None
return ir_exp/1000
if vis_exp is None:
return None
if vis_exp < 0:
import_util.log_nonrepeating_warning(f'VIS Exposure {vis_exp} is < 0')
return None
return vis_exp/1000
def populate_obs_type_image_COVIMS_levels_OBS(**kwargs):
return 4096
def populate_obs_type_image_COVIMS_lesser_pixel_size_OBS(**kwargs):
metadata = kwargs['metadata']
index_row = metadata['index_row']
inst_mod = index_row['INSTRUMENT_MODE_ID']
if inst_mod != 'IMAGE':
return None
width = import_util.safe_column(index_row, 'SWATH_WIDTH')
length = import_util.safe_column(index_row, 'SWATH_LENGTH')
return min(width, length)
def populate_obs_type_image_COVIMS_greater_pixel_size_OBS(**kwargs):
metadata = kwargs['metadata']
index_row = metadata['index_row']
inst_mod = index_row['INSTRUMENT_MODE_ID']
if inst_mod != 'IMAGE':
return None
width = import_util.safe_column(index_row, 'SWATH_WIDTH')
length = import_util.safe_column(index_row, 'SWATH_LENGTH')
return max(width, length)
### OBS_WAVELENGTH TABLE ###
def populate_obs_wavelength_COVIMS_wavelength1_OBS(**kwargs):
metadata = kwargs['metadata']
phase_name = metadata['phase_name']
if phase_name == 'IR':
return 0.8842
return 0.35054
def populate_obs_wavelength_COVIMS_wavelength2_OBS(**kwargs):
metadata = kwargs['metadata']
phase_name = metadata['phase_name']
if phase_name == 'IR':
return 5.1225
return 1.04598
def populate_obs_wavelength_COVIMS_wave_res1_OBS(**kwargs):
metadata = kwargs['metadata']
phase_name = metadata['phase_name']
if phase_name == 'IR':
return 0.01662
return 0.0073204
def populate_obs_wavelength_COVIMS_wave_res2_OBS(**kwargs):
metadata = kwargs['metadata']
phase_name = metadata['phase_name']
if phase_name == 'IR':
return 0.01662
return 0.0073204
def populate_obs_wavelength_COVIMS_wave_no1_OBS(**kwargs):
metadata = kwargs['metadata']
wavelength_row = metadata['obs_wavelength_row']
return 10000 / wavelength_row['wavelength2'] # cm^-1
def populate_obs_wavelength_COVIMS_wave_no2_OBS(**kwargs):
metadata = kwargs['metadata']
wavelength_row = metadata['obs_wavelength_row']
return 10000 / wavelength_row['wavelength1'] # cm^-1
def populate_obs_wavelength_COVIMS_wave_no_res1_OBS(**kwargs):
metadata = kwargs['metadata']
wl_row = metadata['obs_wavelength_row']
wave_res2 = wl_row['wave_res2']
wl2 = wl_row['wavelength2']
if wave_res2 is None or wl2 is None:
return None
return wave_res2 * 10000. / (wl2*wl2)
def populate_obs_wavelength_COVIMS_wave_no_res2_OBS(**kwargs):
metadata = kwargs['metadata']
wl_row = metadata['obs_wavelength_row']
wave_res1 = wl_row['wave_res1']
wl1 = wl_row['wavelength1']
if wave_res1 is None or wl1 is None:
return None
return wave_res1 * 10000. / (wl1*wl1)
def populate_obs_wavelength_COVIMS_spec_flag_OBS(**kwargs):
return 'Y'
def populate_obs_wavelength_COVIMS_spec_size_OBS(**kwargs):
metadata = kwargs['metadata']
phase_name = metadata['phase_name']
if phase_name == 'IR':
return 256
return 96
def populate_obs_wavelength_COVIMS_polarization_type_OBS(**kwargs):
return 'NONE'
### populate_obs_occultation TABLE ###
def populate_obs_occultation_COVIMS_occ_type_OBS(**kwargs):
return None
def populate_obs_occultation_COVIMS_occ_dir_OBS(**kwargs):
return None
def populate_obs_occultation_COVIMS_body_occ_flag_OBS(**kwargs):
return None
def populate_obs_occultation_COVIMS_optical_depth_min_OBS(**kwargs):
return None
def populate_obs_occultation_COVIMS_optical_depth_max_OBS(**kwargs):
return None
def populate_obs_occultation_COVIMS_temporal_sampling_OBS(**kwargs):
return None
def populate_obs_occultation_COVIMS_quality_score_OBS(**kwargs):
return None
def populate_obs_occultation_COVIMS_wl_band_OBS(**kwargs):
return None
def populate_obs_occultation_COVIMS_source_OBS(**kwargs):
return None
def populate_obs_occultation_COVIMS_host_OBS(**kwargs):
return None
################################################################################
# THESE NEED TO BE IMPLEMENTED FOR EVERY CASSINI INSTRUMENT
################################################################################
def populate_obs_mission_cassini_COVIMS_ert1_OBS(**kwargs):
return None
def populate_obs_mission_cassini_COVIMS_ert2_OBS(**kwargs):
return None
def populate_obs_mission_cassini_COVIMS_spacecraft_clock_count1_OBS(**kwargs):
metadata = kwargs['metadata']
index_row = metadata['index_row']
count = index_row['SPACECRAFT_CLOCK_START_COUNT']
sc = '1/' + count
sc = helper_fix_cassini_sclk(sc)
try:
sc_cvt = opus_support.parse_cassini_sclk(sc)
except Exception as e:
import_util.log_nonrepeating_warning(
f'Unable to parse Cassini SCLK "{sc}": {e}')
return None
return sc_cvt
def populate_obs_mission_cassini_COVIMS_spacecraft_clock_count2_OBS(**kwargs):
metadata = kwargs['metadata']
index_row = metadata['index_row']
count = index_row['SPACECRAFT_CLOCK_STOP_COUNT']
sc = '1/' + count
sc = helper_fix_cassini_sclk(sc)
try:
sc_cvt = opus_support.parse_cassini_sclk(sc)
except Exception as e:
import_util.log_nonrepeating_warning(
f'Unable to parse Cassini SCLK "{sc}": {e}')
return None
cassini_row = metadata['obs_mission_cassini_row']
sc1 = cassini_row['spacecraft_clock_count1']
if sc1 is not None and sc_cvt < sc1:
import_util.log_warning(
f'spacecraft_clock_count1 ({sc1}) and spacecraft_clock_count2 ({sc_cvt}) '
+'are in the wrong order - setting to count1')
sc_cvt = sc1
return sc_cvt
def populate_obs_mission_cassini_COVIMS_mission_phase_name_OBS(**kwargs):
return helper_cassini_mission_phase_name(**kwargs)
def populate_obs_mission_cassini_COVIMS_sequence_id_OBS(**kwargs):
metadata = kwargs['metadata']
index_row = metadata['index_row']
seqid = index_row['SEQ_ID']
return seqid
################################################################################
# THESE ARE SPECIFIC TO OBS_INSTRUMENT_COVIMS
################################################################################
def populate_obs_instrument_covims_channel_OBS(**kwargs):
metadata = kwargs['metadata']
phase_name = metadata['phase_name']
return (phase_name, phase_name)
def populate_obs_instrument_covims_vis_exposure_OBS(**kwargs):
metadata = kwargs['metadata']
index_row = metadata['index_row']
vis_exp = import_util.safe_column(index_row, 'VIS_EXPOSURE')
if vis_exp is None:
return None
return vis_exp / 1000.
def populate_obs_instrument_covims_ir_exposure_OBS(**kwargs):
metadata = kwargs['metadata']
index_row = metadata['index_row']
ir_exp = import_util.safe_column(index_row, 'IR_EXPOSURE')
if ir_exp is None:
return None
return ir_exp / 1000.
| StarcoderdataPython |
3229463 | import hashlib
import re
from collections import Counter
from typing import Dict, List, Optional, Union
import pandas as pd
from joblib import Parallel, delayed
from ..utils import BaseProcessor
class PreDeduplicationProcessor(BaseProcessor):
"""This class is used to process data to format expected by code clones detection tool SourcererCC.
Args:
project_id: An id required in SourcererCC, we use it to denote different dataset parts (train/val/test).
data_format: In which format mined data is saved.
chunksize: Number of examples to proccess at once (data is read in chunks). Optional, default value is 1000.
n_workers: Maximum number of concurrently running jobs. Optional, default value is 1 (sequential execution).
logger_name: Name of logger for this class. Optional, default value is None.
"""
def __init__(
self,
project_id: int,
data_format: str,
chunksize: Optional[int] = None,
n_workers: Optional[int] = None,
logger_name: Optional[str] = None,
):
super().__init__(chunksize=chunksize, n_workers=n_workers, data_format=data_format, logger_name=logger_name)
self._separators = r'[;.\[\]\(\)\~!\-\_\+\&\*/%<>\^\|\?\{\}=\#,"\\\:\$\'`@ +\n\r\t]'
self._project_id = project_id
self._n_workers = n_workers
def _get_diff_from_mods(self, mods: List[Dict[str, str]]) -> str:
"""Constructs single diff from all file modifications in one commit.
We don't want to consider filenames when running duplicates search on diffs,
so `old_path`/`new_path`/`change_type` fields are ignored.
"""
return " ".join(mod["diff"] for mod in mods)
def _hash_string(self, x: str) -> str:
"""Obtains hash of given string."""
hash = hashlib.md5()
hash.update(x.encode("utf-8"))
return hash.hexdigest()
def _split_by_several_separators(self, x: str) -> List[str]:
"""Splits given string by punctuation and whitespaces."""
return [y.strip() for y in re.split(self._separators, x) if y]
def _process_single_example(self, cur_id: int, cur_example: Union[str, List[Dict[str, str]]], data_col: str) -> str:
"""Converts a single example into format required by SourcererCC.
It includes the following steps:
* Preprocess example (different for diffs and messages)
* Calculate total # tokens and unique # tokens
* Obtain required spring representation:
'project_id,sample_id,total_n_tokens,unique_n_tokens,token_hash@#@token1@@::@@frequency,...'
"""
if not isinstance(cur_id, int):
try:
cur_id = int(cur_id)
except ValueError:
self.logger.error(f"`id` is expected to be `int`, got {cur_id} of `{type(cur_id)} instead")
return ""
# diff preprocessing
if data_col != "message":
processed_example = self._preprocess_mods(cur_id, cur_example)
# message preprocessing
else:
processed_example = self._preprocess_msg(cur_id, cur_example)
c = Counter(self._split_by_several_separators(processed_example))
tokens_enc = (
self._hash_string(processed_example) + "@#@" + ",".join(f"{token}@@::@@{freq}" for token, freq in c.items())
)
total_n_tokens = sum(c.values())
unique_n_tokens = len(c)
return f"{self._project_id},{cur_id},{total_n_tokens},{unique_n_tokens},{tokens_enc}\n"
def _preprocess_mods(self, cur_id: int, cur_example: List[Dict[str, str]]) -> str:
"""Preprocesses modifications from single commit, which currently includes the following:
* unite modifications into single diff string
* remove '@@ xxx yyy @@' git stuff via regular expression
"""
try:
processed_example = self._get_diff_from_mods(cur_example)
processed_example = re.sub("@@.*?@@\n", "", processed_example)
except TypeError as e:
self.logger.error(f"[diff] {cur_id} produced TypeError {e}")
processed_example = str(cur_example)
return processed_example
def _preprocess_msg(self, cur_id: int, cur_example: str) -> str:
"""Preprocesses a single commit message, which currently includes the following:
* cast to lowercase
"""
try:
processed_example = cur_example.lower()
except AttributeError as e:
self.logger.error(f"[message] {cur_id} produced AttributeError {e}")
processed_example = str(cur_example)
return processed_example
def process(self, chunk: pd.DataFrame, data_col: str, **kwargs) -> List[str]:
"""Processes each example in a chunk into format required by SourcererCC.
Args:
chunk: Small subset of original dataset.
data_col: Should be `message` to process messages or `mods` to process diffs.
"""
with Parallel(self._n_workers) as pool:
res = pool(
delayed(self._process_single_example)(cur_id=item["id"], cur_example=item[data_col], data_col=data_col)
for _, item in chunk[["id", data_col]].iterrows()
)
return res
| StarcoderdataPython |
1900683 | <filename>Bolivian_Lowlands/Specific_loads_lowlands/HI_Household.py
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 13 10:32:11 2021
@author: Clau
"""
'''
Paper: Energy sufficiency, lowlands.
User: High Income Household
'''
from core import User, np
User_list = []
#Defining users
H2 = User("high income", 1)
User_list.append(H2)
#Appliances
H2_indoor_bulb = H2.Appliance(H2,4,7,2,120,0.2,10)
H2_indoor_bulb.windows([1082,1440],[0,30],0.35)
H2_outdoor_bulb = H2.Appliance(H2,2,13,2,600,0.2,10)
H2_outdoor_bulb.windows([0,330],[1082,1440],0.35)
H2_TV = H2.Appliance(H2,2,60,2,120,0.1,5)
H2_TV.windows([1082,1440],[0,60],0.35)
H2_DVD = H2.Appliance(H2,1,8,2,40,0.1,5)
H2_DVD.windows([1082,1440],[0,60],0.35)
H2_Antenna = H2.Appliance(H2,1,8,2,80,0.1,5)
H2_Antenna.windows([1082,1440],[0,60],0.35)
H2_Radio = H2.Appliance(H2,1,36,2,60,0.1,5)
H2_Radio.windows([390,450],[1082,1260],0.35)
H2_Phone_charger = H2.Appliance(H2,4,2,2,300,0.2,5)
H2_Phone_charger.windows([1110,1440],[0,30],0.35)
H2_Freezer = H2.Appliance(H2,1,200,1,1440,0,30, 'yes',2)
H2_Freezer.windows([0,1440],[0,0])
H2_Freezer.specific_cycle_1(5,15,200,15)
H2_Freezer.specific_cycle_2(200,10,5,20)
H2_Freezer.cycle_behaviour([480,1200],[0,0],[0,479],[1201,1440])
H2_Mixer = H2.Appliance(H2,1,50,3,30,0.1,1, occasional_use = 0.33)
H2_Mixer.windows([420,450],[660,750],0.35,[1020,1170])
H2_Fan = H2.Appliance(H2,1,171,1,220,0.27,60)
H2_Fan.windows([720,1080],[0,0])
H2_Laptop = H2.Appliance(H2,1,70,1,90,0.3,30)
H2_Laptop.windows([960,1200],[0,0]) | StarcoderdataPython |
8191824 | # ------------------------------------------------------------------------------
# CMSC 291 Lecture 10: Default Application configuration for Jupyter Lab
# ------------------------------------------------------------------------------
## The directory to use for notebooks and kernels.
c.NotebookApp.notebook_dir = "notebooks"
## Forces users to use a password for the Notebook server. This is useful in a
# multi user environment, for instance when everybody in the LAN can access each
# other's machine through ssh.
#
# In such a case, serving the notebook server on localhost is not secure since
# any user can connect to the notebook server via ssh. But not necessary here.
c.NotebookApp.password_required = False
## The port the notebook server will listen on (env: JUPYTER_PORT).
c.NotebookApp.port = 8888
| StarcoderdataPython |
1670882 | <gh_stars>1000+
from .tensor import *
from .sparse import *
| StarcoderdataPython |
3588848 | <reponame>Davidxswang/leetcode<filename>medium/230-Kth Smallest Element in a BST.py
"""
https://leetcode.com/problems/kth-smallest-element-in-a-bst/
Given a binary search tree, write a function kthSmallest to find the kth smallest element in it.
Example 1:
Input: root = [3,1,4,null,2], k = 1
3
/ \
1 4
\
2
Output: 1
Example 2:
Input: root = [5,3,6,2,4,null,null,1], k = 3
5
/ \
3 6
/ \
2 4
/
1
Output: 3
Follow up:
What if the BST is modified (insert/delete operations) often and you need to find the kth smallest frequently? How would you optimize the kthSmallest routine?
Constraints:
The number of elements of the BST is between 1 to 10^4.
You may assume k is always valid, 1 ≤ k ≤ BST's total elements.
"""
# time complexity: O(n), space complexity: O(the height of the tree)
# the follow up question can be solved, according to the solution page of the problem, by using a double linked list to speed up the search.
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def kthSmallest(self, root: TreeNode, k: int) -> int:
self.result = None
self.counter = 0
self.target = k
self.dfs(root)
return self.result
def dfs(self, root: TreeNode) -> None:
if root is None:
return
self.dfs(root.left)
if self.counter < self.target:
self.result = root.val
self.counter += 1
if self.counter == self.target:
return
self.dfs(root.right)
| StarcoderdataPython |
1657118 | <reponame>OctavianLee/Cytisas
"""
Registry for task queue.
"""
import cPickle as pickle
from cytisas.tqueue.task import Task
class Registry(object):
"""A registry for task queue."""
def __init__(self):
self._registry = {}
def get_task_string(self, task):
"""Generate a string of a task.
:params: task: an instance of task.
"""
return '{}.{}'.format(task.module_name, task.func_name)
def register(self, task):
"""Register a task.
:params: task: an instance of task.
:returns: a dictionary for redis.
"""
task_name = self.get_task_string(task)
if task_name not in self._registry:
redis_dict = {}
data = pickle.dumps(task.func_data)
self._registry[task_name] = task
redis_dict['func_data'] = data
# this feature is not implemented now.
redis_dict['timeout'] = 10
return redis_dict
def unregister(self, redis_data):
"""Unregister a task.
:params: redis_data: the data get from redis.
:returns: the unregistered task.
"""
task = Task()
func_data = redis_data[0]
time_out = redis_data[1]
if not func_data:
return None
try:
(task._module_name,
task._func_name,
task._args,
task._kwargs) = pickle.loads(func_data)
except Exception as exc:
raise exc
task_name = self.get_task_string(task)
if task_name not in self._registry:
return None
del self._registry[task_name]
return task
registry = Registry()
| StarcoderdataPython |
4881522 | """This module implements simple, dynamic argument parsing for Python scripts.
Use this when argparse is too verbose.
"""
from collections import OrderedDict
import sys
from typing import Text, Optional, Dict, List
def _parse(args: List[Text]) -> Dict:
"""Simple key value arg parser which doesn't need to know ahead of time what the argument names will be.
Args should be a raw list of the arguments passed to a script, as in:
import sys
_parse(sys.argv[1:])
"""
eq = '='
out = OrderedDict()
k = None
err = ValueError('Got unparsable arguments: {}'.format(args))
for arg in args:
if eq in arg:
first, second = arg.split(eq, maxsplit=1)
if not first.startswith('-'):
raise err
first = first.replace('-', '')
out[first] = second
k = None
else:
if k is None:
if arg.startswith('-'):
k = arg.replace('-', '')
else:
raise err
else:
if arg.startswith('-'):
out[k] = None
k = arg.replace('-', '')
else:
out[k] = arg
k = None
if k is not None:
out[k] = None
return out
def _convert_values_to_types(v: Optional[Text]):
"""Converts strings to ints and floats where possible."""
if v is None:
return v
try:
return int(v)
except ValueError:
pass
try:
return float(v)
except ValueError:
pass
if isinstance(v, Text):
# remove quotes
v = _remove_bounding_text(v, '"')
v = _remove_bounding_text(v, "'")
return v
def _remove_bounding_text(string: Text, remove: Text):
if string[0] == string[-1] == remove:
string = string[1:-1]
return string
def _convert_numeric_values(d: Dict):
return OrderedDict([(k, _convert_values_to_types(v)) for k, v in d.items()])
def _remove_quotes(d: Dict):
return OrderedDict([(k, _convert_values_to_types(v)) for k, v in d.items()])
def args_2_dict(args=None):
if args is None:
args = sys.argv[1:]
parsed = _parse(args)
return _convert_numeric_values(parsed)
if __name__ == '__main__':
args = [
'--eq1=1',
'--split', 'value',
'--eq2.0=2.0',
'--single',
'--single2',
'--split2', 'value2',
'--quotes="handle quotes"'
]
print()
print("Parsed:")
parsed = _parse(args)
for k, v in parsed.items():
print("{:>8}: {}".format(k, v))
print()
print("Converted to numerics:")
converted = _convert_numeric_values(parsed)
for k, v in converted.items():
print("{:>8}: {:<20} {:<20}".format(k, str(v), str(type(v))))
| StarcoderdataPython |
9790268 | <filename>oschown/chown_neutron.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oschown import base
from oschown import exception
class NeutronProject(base.ChownableProject):
@property
def name(self):
return 'neutron'
def collect_resource_by_id(self, context, resource_id):
raise exception.ProjectCheckFailed(
'Neutron resources cannot be transferred. '
'Please detatch from all networks.')
| StarcoderdataPython |
6622019 | """This script will run the agent in a 1 D environment and plot its learning
progress
Author: <NAME>, 22.12.2021
"""
import numpy as np
import matplotlib.pyplot as plt
from environment import DiscreteEnvironment
from agent import AgentBase
from analysis import AnalyzerEpisode, AnalyzerRun, animate_episodes
import seaborn as sns
def generate_course_1():
win = 5
death = -10
return np.array([
[ 0, 0, 0, 1, 0, win, 0, death, 1, 0, 0],
[death, 0, 0, 4, 0, 0, 0, 0, 5, 0, 0],
[death, 0, 0, 4, 0, 0, 0, 0, 5, 0, win]
]).transpose(), win, death
def generate_square():
win = 5
death = -10
return np.array([
[ 0, 0, 0],
[ 0, death, 0],
[ 0, 0, win ]
]).transpose(), win, death
def generate_square_w_wall():
win = 5
death = -15
return np.array([
[ 0, 0, 0, 0, 0],
[ 0, death, death, death, 0],
[ 0, death, 0, 0, 0],
[ 0, death, 0, 0, 0],
[ 0, 0, 0, 0, win ]
]).transpose(), win, death
def main():
#game_map, win_values, death_values = generate_course_1()
#game_map, win_values, death_values = generate_square()
game_map, win_values, death_values = generate_square_w_wall()
wins = np.arange(game_map.size)[game_map.flatten() == win_values]
deaths = np.arange(game_map.size)[game_map.flatten() == death_values]
environment = DiscreteEnvironment(
game_map=game_map,
init_mode="zero",
#init_mode="random",
reward_mode="cumulative",
wins=wins,
deaths=deaths
)
agent = AgentBase(
environment=environment,
discount_factor=0.8,
learning_rate=0.2,
epsilon_greedyness=0.5
)
#--- do training ---
n_episodes_train = 500
agent.run(n_episodes=n_episodes_train)
#---
#--- plot analysis plots ---
analysis = AnalyzerRun(run_statistics=agent._run_statistics)
plt.figure(figsize=(6, 10))
n = 4
plt.subplot(n, 1, 1)
analysis.plot_steps()
plt.subplot(n, 1, 2)
analysis.plot_reward()
plt.subplot(n, 1, 3)
analysis.plot_actions()
plt.subplot(n, 1, 4)
analysis.plot_average_final_state()
plt.tight_layout()
plt.show()
#---
#--- do additional exploitation runs to recods gifs ---
agent._epsilon = 1
n_episodes_exploit = 3
agent.run(n_episodes=n_episodes_exploit)
n_episdes_tot = n_episodes_train + n_episodes_exploit
animate_episodes(
agent,
episodes=[0, n_episdes_tot-2, n_episdes_tot-1],
show=True,
save_path="animations",
wins=wins,
deaths=deaths
)
#---
if __name__ == '__main__':
main() | StarcoderdataPython |
273663 | import os
import sqlite3
import pandas as pd
class DBLoader(object):
def __init__(self, db_path = None, raw_path = None):
"""
Initialize common parameters used for the database and inputs.
If none given, they are defaulted to their expected locations within the repository.
Default Database Location: /data/processed/atn_db.sqlite
Default Raw Data Folder: /data/raw/
Parameters
----------
db_path: string
Path to the database
raw_path: string
Path to folder containing raw data
Returns
----------
None
"""
root_dir = os.path.abspath(os.path.join(os.getcwd(),".."))
if db_path:
self.db_path = db_path
else:
self.db_path = os.path.join(root_dir,'data','processed','atn_db.sqlite')
if raw_path:
self.raw_path = raw_path
else:
self.raw_path = os.path.join(root_dir,'data','raw','')
def db_query(self, query):
"""
Connect to the database, execute a query, and close the database.
Parameters
----------
query: string
A SQL query to execute
Returns
----------
None
Performs the query passed
"""
# print(self.db_path)
conn = sqlite3.connect(self.db_path)
conn.execute(query)
conn.commit()
conn.close()
def df_to_db(self,table_name,df):
"""
Connect to a dabase and append a pandas dataframe to it
Parameters
----------
df: DataFrame Object
Returns
----------
None
Appends the passed dataframe to the database
"""
conn = sqlite3.connect(self.db_path)
pd.DataFrame.to_sql(self=df, name = table_name, con=conn, if_exists='append', index=False,chunksize=100000)
conn.close()
def csv_loader(self,load_file_path,import_cols,col_dict,table_name):
"""
Load a comma delimited file into a Database table
Parameters:
-----------
import_cols: list
A list of columns to import from the file
load_file_path: string
Path to the file to open
col_dict: dictionary
Dictionary of column names that need to be remapped before appending to the db table
table_name: string
Name of the table to load the data to
"""
load_df = pd.read_csv(load_file_path, usecols = import_cols)
load_df.rename(columns=col_dict, inplace=True)
self.df_to_db(table_name, load_df)
class DBQueries(object):
def __init__(self,db_path = None, raw_path = None):
"""
Initialize common parameters used for the database and inputs.
If none given, they are defaulted to their expected locations within the repository.
Default Database Location: /data/processed/atn_db.sqlite
Default Raw Data Folder: /data/raw/
Parameters
----------
db_path: string
Path to the database
raw_path: string
Path to folder containing raw data
Returns
----------
None
"""
root_dir = os.path.abspath(os.path.join(os.getcwd(),".."))
if db_path:
self.db_path = db_path
else:
self.db_path = os.path.join(root_dir,'data','processed','atn_db.sqlite')
if raw_path:
self.raw_path = raw_path
else:
self.raw_path = os.path.join(root_dir,'data','raw','')
def query_to_df(self,query,params=None):
'''
Executes a query and returns the data as a pandas dataframe
Parameters:
----------
query: string
SQL query to execute
params: string or list-like
Passes parameters to panda's to_sql params used in
SQL parameterization
Returns:
----------
Pandas DataFrame of the query results
'''
conn = sqlite3.connect(self.db_path)
db_df = pd.read_sql(query,conn,params=params)
conn.close()
return(db_df)
def query_timeframe(self,start_date,end_date):
'''
Retrieve all data in the database within a specified time frame
Parameters:
----------
start_date: string
In the format of 'YYYY-MM-DD'
end_date: string
In the format of 'YYYY-MM-DD'
Returns:
----------
Pandas DataFrame of the query results
'''
sql = '''
SELECT * FROM atn_performance
WHERE Flight_Date > :start
AND Flight_Date < :end
'''
df = pd.read_sql(sql,conn,params = {'start':start_date,'end':end_date})
conn.close()
return(df) | StarcoderdataPython |
12847064 | <reponame>marasiali/Django-bog
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import AbstractUser
class User(AbstractUser):
is_author = models.BooleanField(default=False, verbose_name='نویسنده', help_text='نشان میدهد که آیا این کاربر میتواند مطلب ارسال کند یا خیر.')
premium_date = models.DateTimeField(default=timezone.now, verbose_name='پایان اشتراک')
class Meta:
verbose_name = 'کاربر'
verbose_name_plural = 'کاربران'
def __str__(self):
return self.username
def premium_days_remaining(self):
return (self.premium_date - timezone.now()).days
premium_days_remaining.short_description = 'مدت زمان باقیمانده از اشتراک ویژه'
def has_premium(self):
return self.premium_date > timezone.now()
has_premium.boolean = True
has_premium.short_description = 'اشتراک ویژه' | StarcoderdataPython |
1896917 | <filename>database.py
class Database:
def __init__(self, filename):
if isinstance(filename, str):
self.filename = filename
else:
raise TypeError('Filename should be a string!')
def get_connection(self):
import sqlite3
# Return a connection to the database
return sqlite3.connect(self.filename)
@property
def tables(self):
# Get a connection to the database
with self.get_connection() as connection:
# Fetch the name of each table
cursor = connection.execute("SELECT name FROM sqlite_master WHERE type='table';")
# Import Table
from table import Table
# Create a list of tables
tables = [Table(self, row[0]) for row in cursor]
# Return the list containing the tables
return tables
def create_table(self, name, columns):
column_string = str()
for column in columns:
if len(column_string) > 0:
column_string += ','
column_name = column[0]
column_data_type = column[1]
column_string += '{} {}'.format(column_name, column_data_type)
if len(column) > 2:
if column[2].__class__ == str:
default_value = '"{}"'.format(column[2])
else:
default_value = column[2]
column_string += ' DEFAULT {}'.format(default_value)
query_string = 'CREATE TABLE {} ({});'.format(name, column_string)
with self.get_connection() as connection:
from sqlite3 import OperationalError
try:
connection.execute(query_string)
except OperationalError as exception:
duplicate_table_message = 'table {} already exists'.format(name)
if exception.args[0] == duplicate_table_message:
from exceptions import DuplicateTableError
raise DuplicateTableError(duplicate_table_message) from exception
return self[name]
def __contains__(self, table):
from table import Table
if table == None or table.__class__ != Table or table.database != self:
return False
else:
with self.get_connection() as connection:
cursor = connection.execute("SELECT EXISTS(SELECT name FROM sqlite_master WHERE type='table' AND name=?);", [table.name])
return cursor.fetchone()[0]
def __getitem__(self, key):
from table import Table
return Table(self, key)
def __iter__(self):
# Return an iterator on the list of tables
return iter(self.tables)
def __eq__(self, other):
if isinstance(self, other.__class__):
return self.filename == other.filename
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
| StarcoderdataPython |
3547040 | <filename>setup.py<gh_stars>1-10
from setuptools import setup, find_packages
from Cython.Build import cythonize
with open("README.md") as f:
readme = f.read()
with open("LICENSE") as f:
license = f.read()
extras = {
'dev': ['bump2version'],
'docs': open('docs/requirements.txt').read().splitlines(),
}
setup(
name="optbeam",
version="2.1.2",
description=("Simulation of reflection and refraction of polarized "
"opticial beams at plane and curved dielectric interfaces"),
long_description=readme,
long_description_content_type='text/markdown',
url="https://github.com/DanielKotik/Optical-beams-MEEP",
author="<NAME> et al.",
author_email="<EMAIL>",
license=license,
packages=find_packages(exclude=("scripts")),
ext_modules=cythonize(["optbeam/**/beams.py",
"optbeam/**/helpers.py"],
compiler_directives={'language_level': 3}),
zip_safe=False,
include_package_data=True,
install_requires=["scipy", "cython"],
extras_require=extras,
)
| StarcoderdataPython |
6581761 | <reponame>eKMap/ekmap-publisher-for-qgis<filename>ekmap_core/qgslayer_parser/fill_symbol/simple_fill_parser.py
from .fill_layer_parser import FillLayerParser
from ...ekmap_common import *
from ...ekmap_converter import eKConverter
CURRENT_PATH = str(os.path.dirname(__file__))
class SimpleFillParser(FillLayerParser):
def __init__(self, simpleFillLayer, exporter):
super().__init__(simpleFillLayer)
fillStyle = self.properties.get('style')
fillColor = simpleFillLayer.color().name()
fillConfig = {}
if fillStyle == 'solid':
fillConfig['fill-color'] = fillColor
fillConfig['fill-opacity'] = simpleFillLayer.color().alpha() / 255
else: # Fill with pattern
# Need to replace because '_' is splitter
patternName = fillStyle.replace('_','') \
+ "_C" + fillColor
dstPath = TEMP_LOCATION + '/' + patternName + '.png'
shutil.copy2(CURRENT_PATH + '/img/' + fillStyle + '.png', dstPath)
exporter.externalGraphics.append(dstPath)
fillConfig['fill-pattern'] = patternName
fillConfig['fill-opacity'] = simpleFillLayer.color().alpha() / 255
fillStyleLayer = self.exportFillLayerFormat(fillConfig)
self.styles.append(fillStyleLayer)
if self.properties.get('outline_style') != 'no':
lineConfig = self.DEFAULT_LINE_CONFIG
lineConfig['line-width'] = self.outlineWidth
lineConfig['line-join'] = self.properties.get('joinstyle')
lineConfig['line-color'] = simpleFillLayer.strokeColor().name()
lineConfig['line-opacity'] = simpleFillLayer.strokeColor().alpha() / 255
outlineStyle = self.properties.get("outline_style")
lineConfig['line-dasharray'] = eKConverter.convertStrokeTypeToDashArray(outlineStyle, self.outlineWidth)
lineConfig['visibility'] = 'visible'
lineStyleLayer = self.exportLineLayerFormat(lineConfig)
self.styles.append(lineStyleLayer) | StarcoderdataPython |
9615603 | <reponame>DPNT-Sourcecode/CHK-uimw01<gh_stars>10-100
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Bytecode manipulation for coverage.py"""
import types
class CodeObjects(object):
"""Iterate over all the code objects in `code`."""
def __init__(self, code):
self.stack = [code]
def __iter__(self):
while self.stack:
# We're going to return the code object on the stack, but first
# push its children for later returning.
code = self.stack.pop()
for c in code.co_consts:
if isinstance(c, types.CodeType):
self.stack.append(c)
yield code
| StarcoderdataPython |
1672672 | <gh_stars>1-10
import sys
import urllib2
import csv
import json
from datetime import datetime, timedelta, date
if len(sys.argv) == 3 and sys.argv[1] and sys.argv[2]:
try:
start_date=datetime.strptime(sys.argv[1], '%Y-%m-%d')
end_date=datetime.strptime(sys.argv[2], '%Y-%m-%d')
except:
raise NameError('Error: Invalid format for start date and/or end date. Format should be YYYY-MM-DD')
if start_date <= end_date:
for n in range(int ((end_date - start_date).days)+1):
single_date = start_date + timedelta(n)
single_date_day = single_date.day
single_date_month = single_date.month
single_date_year = single_date.year
reports = [{
'type': 'sospechosos',
'id': 's',
'url': 'https://serendipia.digital/wp-content/uploads/%s/%02d/Tabla_casos_sospechosos_COVID-19_%s.%02d.%02d-Table-1.csv' % (single_date_year, single_date_month, single_date_year, single_date_month, single_date_day),
'header': ('caso','estado','sexo','edad','sintomas','rt-pcr','procedencia','llegada')},
{'type': 'confirmados',
'id': 'c',
'url': 'https://serendipia.digital/wp-content/uploads/%s/%02d/Tabla_casos_positivos_COVID-19_resultado_InDRE_%s.%02d.%02d-Table-1.csv' % (single_date_year, single_date_month, single_date_year, single_date_month, single_date_day),
'header':('caso','estado','sexo','edad','sintomas','rt-pcr','procedencia','llegada')}]
for report in reports:
sys.stdout.write('Downloading CSV from: %s \n' % report['url'])
try:
response = urllib2.urlopen(report['url'])
except urllib2.URLError as e:
sys.stdout.write(str(e.reason))
sys.stdout.write('\n\n')
continue
csv_content = response.read()
response.close()
# Remove the first line of the csv
csv_content = csv_content.split('\n',2)[2]
# Remove the last 2 lines of the content
#csv_content = csv_content[:csv_content[:csv_content.rfind('\n')].rfind('\n')]
# Save temp file
file = open('/tmp/covid-temp.txt', 'w')
file.write(csv_content)
file.close()
filename = 'json/%s-%02d-%02d-%s.json' % (single_date_year, single_date_month, single_date_day, report['id'])
json_file = open(filename, 'w')
json_file.write('{"datos":[')
sys.stdout.write('Processing')
first_line = True
with open('/tmp/covid-temp.txt', 'r') as csv_file:
reader = csv.DictReader(csv_file, report['header'])
for row in reader:
if unicode(row['caso'], 'utf-8').isnumeric():
if first_line is not True:
json_file.write(',\n')
# Clean Strings
row['estado'] = row['estado'].replace('*', '')
row['estado'] = row['estado'].replace('\n', '')
row['procedencia'] = row['procedencia'].replace('*', '')
row['procedencia'] = row['procedencia'].replace('\n', '')
sys.stdout.write('.')
json.dump(row, json_file, ensure_ascii=False, indent=3)
first_line = False
else:
sys.stdout.write(' [DONE]\n\n')
csv_file.close()
json_file.write(']}')
json_file.close()
else:
print 'Error: start date should be less or equal than end date'
else:
print 'Error: start date and end date required.'
| StarcoderdataPython |
6520167 | import os
os.system('rapydscript -p --screw-ie8 rapyd/Observable.pyj tests/test_Observable.pyj -o tests/js/test_Observable.js' )
os.system('node tests/js/test_Observable.js')
"""
script_list = list()
for root, dirs, files in os.walk("rapyd"):
path = root.split(os.sep)
path.pop(0)
for file in files:
if file.lower().endswith(".pyj"):
infile = os.path.abspath( os.path.join(root, file) )
script_list.append(infile)
for file in os.listdir("tests"):
path = os.path.join("tests/", file)
if os.path.isfile(path):
print file
script_list.append( path )
cmd = 'rapydscript -p --screw-ie8 "%s" -o tests/js/Cylinder_test.js' % ('" "'.join(script_list))
print cmd
os.system(cmd)
""" | StarcoderdataPython |
9652776 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#############################################################################
##
## This file is part of Tango Control System
##
## http://www.tango-controls.org/
##
## Author: <NAME>
##
## This is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
##
## This software is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
###########################################################################
"""
.. panic.py: python API for a PyAlarm based alarms system
:mod:`panic` -- Package for Alarms and Notification of Incidences from Controls
===============================================================================
.. This package is great.
.. raw:: html
<hr/>
.. rubric:: Usage
And here some usage examples.
.. raw:: html
<hr/>
"""
import traceback,re,time,os,sys
import fandango
import fandango as fn
from fandango import first,searchCl,matchCl,clmatch,clsearch,\
isString,isSequence,isNumber,isFalse,isCallable,isMapping,\
xor,now,str2time,time2str,END_OF_TIME,Cached
from fandango.dicts import defaultdict
from fandango.tango import CachedAttributeProxy, AttrDataFormat, retango
from fandango.tango import PyTango,get_tango_host, check_device_cached
from fandango.tango import parse_tango_model
from fandango.log import tracer,shortstr
from .properties import *
get_tango = fandango.tango.get_database
_proxies = fandango.ProxiesDict()
GetProxy = _proxies.get
SetProxy = _proxies.__setitem__
"""
The _proxies object allows to retrieve DeviceProxy or DeviceServer objects.
* GetProxy(a/dev/name) will return a DeviceProxy by default.
* SetProxy('a/dev/name',object) allows to set a different object
to be returned (e.g. a device running in the same process)
"""
## Methods for matching device/attribute/alarm names
def intersect(a,b):
a,b = str(a).lower(),str(b).lower()
return a in b or b in a
def anyendswith(a,b):
a,b = str(a).lower(),str(b).lower()
if '/' not in a: a = '/'+a
if '/' not in b: b = '/'+b
return a.endswith(b) or b.endswith(a)
def unicode2str(obj):
if isMapping(obj,strict=True):
n = dict(unicode2str(t) for t in obj.items())
elif isSequence(obj):
n = list(unicode2str(t) for t in obj)
elif isString(obj): n = str(obj)
else: n = obj
return n
def substitute(message, substitutions=[[], {}], depth=1):
"""
Substitute `{%x%}` items values provided by substitutions
:param message: message to be substituted
:param substitutions: list of list and dictionary.
List is used for {%number%} substitutions and dictionary for
{%name%} substitutions
:param depth: defines number of pass
:return: substituted message
"""
if not isinstance(message, str):
raise Exception('StringExpected!')
if depth <1 :
return message
new_message = message
# substitute numbered substitutions
i = 0
for value in substitutions[0]:
new_message = new_message.replace("{%%%d%%}" % i, value)
i += 1
# processing named substitutions
for (k, value) in substitutions[1].items():
new_message = new_message.replace("{%%%s%%}" % k, value)
return substitute(new_message, substitutions, depth-1)
###############################################################################
#@todo: Tango access methods
def getAttrValue(obj,default=Exception):
"""
Extracts rvalue in tango/taurus3/4 compatible way
If default = True, obj is returned
If default = Exception, only exception objects are returned (else None)
"""
rm = getattr(obj,'read',None)
if isCallable(rm):
try:
obj = rm()
except Exception,e:
obj = e
if default is Exception:
if isinstance(obj,(PyTango.DevError,Exception)):
return obj
default = None
r,v,d = getattr(obj,'rvalue',None),None,None
if r is None:
v = getattr(obj,'value',None)
if v is None:
d = obj if default is True else default
#print('getAttrValue(%s)'%fd.shortstr(obj)
#+': %s,%s,%s'%(r,v,d))
r = r or v or d
if r is None and \
getattr(obj,'data_format',None) == AttrDataFormat.SPECTRUM \
and obj.is_empty:
r = []
return r
def getPanicProperty(prop):
"""
Method to obtain global properties
It manages compatibility with PANIC <= 6 using PyAlarm properties
BUT!!! It triggers exceptions in device servers if called at Init()
"""
print('getPanicProperty(%s)' % prop)
k = [prop] if not fn.isSequence(prop) else prop
r = get_tango().get_property('PANIC',k)
if not any(r.values()):
r = get_tango().get_class_property('PyAlarm',k)
r = r if fn.isSequence(prop) else r[prop]
return list(r) if fn.isSequence(r) else r
def setPanicProperty(prop, value):
"""
Method to write global properties
It manages compatibility with PANIC <= 6 using PyAlarm properties
"""
print('setPanicProperty(%s, %s)' % (prop, value))
r = get_tango().get_property('PANIC',[prop])[prop]
o = get_tango().get_class_property('PyAlarm',[prop])[prop]
if o and not r:
get_tango().put_class_property('PyAlarm',{prop:value})
return 'PyAlarm'
else:
get_tango().put_property('PANIC',{prop:value})
return 'PANIC'
def getAlarmDeviceProperties(device):
""" Method used in all panic classes """
props = get_tango().get_device_property(device,ALARM_TABLES.keys())
#Updating old property names for backwards compatibility
if not props['AlarmList']:
props['AlarmList'] = get_tango().get_device_property(
device,['AlarmsList'])['AlarmsList']
if props['AlarmList']:
print('%s: AlarmsList property renamed to AlarmList'%device)
get_tango().put_device_property(
device,{'AlarmList':props['AlarmList'],'AlarmsList':[]})
return props
def getAlarmDeviceProperty(device, prop):
""" Gets the value of pointed property from the device """
return get_tango().get_device_property(device,[prop])[prop]
def setAlarmDeviceProperty(device, prop, value):
""" Sets property of the device """
get_tango().put_device_property(device,{prop:[value]})
###############################################################################
# Alarm object used by API
class Alarm(object):
"""
Alarm object used by API's to keep the state of alarms
It maintains 3 time variables:
self.updated : last time that set_active was called (active or not)
self.active : time at which the alarm was activated (only if active)
self._time : last time that the alarm state changed
"""
def __init__(self,tag,device='',formula='',description='',receivers='',
config='', severity='',api=None):
#Info from the database
self.api = api
self.setup(tag,device,formula,description,
receivers,config,severity,write=False)
self.clear()
def setup(self,tag=None,device=None,formula=None,description=None,
receivers=None,config=None, severity=None,write=False):
""" Assigns values to Alarm struct """
notNone = lambda v,default: default
setVar = lambda k,v: setattr(self,k,v if v is not None
else getattr(self,k,''))
[setVar(k,v) for k,v in (('tag',tag),('device',device),
('formula',formula),('description',description),
('receivers',receivers),('config',config),
('severity',severity or DEFAULT_SEVERITY))]
self.name = self.tag
if write: self.write()
def trace(self,msg):
print('%s: Alarm(%s): %s'%(fn.time2str(),self.tag,msg))
def clear(self):
""" This method just initializes Flags updated from PyAlarm devices,
it doesn't reset alarm in devices """
self._state = None
self._time = None
self.counter = 0 #N cycles being active
self.active = 0 #Last timestamp it was activated
self.updated = 0 #Last value check
self.recovered = 0 #Last time it was recovered
self.acknowledged = 0 #If active no more reminders will be sent
self.disabled = 0 #If disabled the alarm is not evaluated
self.sent = 0 #Messages sent
self.last_sent = 0 #Time when last message was sent
self.last_error = '' #Last exception
@staticmethod
def parse_formula(formula):
""" Gets "TAG:formula#comment" and returns (tag,formula) """
try:
tag,formula = formula.split('#')[0].split(':',1)
except:
tag,formula = None,None
return tag,formula
def parse_severity(self):
""" Replaces $TAG and $SEVERITY in Alarm severities """
return self.severity.replace('$TAG',self.tag).replace(
'$SEVERITY',self.device)
def parse_description(self):
""" Replaces $TAG and $NAME in Alarm descriptions """
return self.description.replace('$TAG',self.tag).replace(
'$NAME',self.device)
def get_attribute(self,full=False):
""" Gets the boolean attribute associated to this alarm """
m = (self.device+'/' if full else '')
m += self.tag.replace(' ','_').replace('/','_')
return m
def get_model(self):
model = self.get_attribute(full=True)
if ':' not in model: model = self.api.tango_host + '/' + model
return model
def get_ds(self):
""" Gets and AlarmDS object related to this alarm """
try: return self.api.devices[self.device]
except: return AlarmDS(self.device,api=self.api)
def get_engine(self):
"""@TODO this method should return the DevImpl
PyAlarm instance or a DeviceProxy to it"""
return self.get_ds().get_proxy()
def set_active(self,value,count=1,t=None):
"""
BE CAREFUL, IT MAY INTERFERE WITH COUNTER MANAGEMENT WITHIN PYALARM
Will increment/decrement counter and set active and time flags
if the count value (e.g. 1) has been reached
"""
self.updated = now()
if value:
if self.active == 0:
self.counter+=1
elif self.counter>0:
self.counter-=1
if value and value>0 and self.counter>=count and not self.active:
#ACTIVE
tracer('%s.set_active(%s,%s)'%(self.tag,value,self.counter))
self.last_sent = self.updated
self.active = value
self.set_time(t or (value>1 and value))
if value and value<0:
#ERROR
if self.active>=0: self.set_time(t)
self.active = -1
if not value and not self.counter:
#NORM
self.active = 0
self.set_time(t)
if not self.recovered:
#print('%s => %s'%(self.tag,0))
#self.recovered = self.get_time()
pass
return self.active
def get_active(self):
""" This method connects to the Device to get the value and timestamp
of the alarm attribute """
try:
self.active = self.get_time(attr_value = True)
except:
self.active = None
return self.active
def is_active(self,cache=True):
v = self.active if cache else self.get_active()
if self.disabled or v < 0: return -1
elif v > 0: return 1
else: return 0
# DO NOT CREATE PROPERTY DESCRIPTOR FOR SET/GET_ACTIVE!!!
def set_state(self,state=None):
"""
without arguments, this method will update the state from flags
with an state as argument, it will just update the value and flags
accordingly.
with an alarm summary as argument, it will update everything from it
with an activealarms row; it will update Active/Norm/Error states only
"""
t0 = now()
o,a,tag,stamp = self._state,state,self.tag,0
#tracer('%s._state was %s since %s(%s)'
#%(tag,o,self._time,time2str(self._time,us=True)))
if state is None:
#UPDATE THE STATE USING PREVIOUS FLAG VALUES
return self.get_state(True)
elif isinstance(state,(Exception,PyTango.DevError)):
self.last_error = shortstr(state)
state = 'state=ERROR;desc=%s'%shortstr(state)
elif state in AlarmStates.values():
state = AlarmStates.get_key(state)
elif isNumber(state):
# equals to set_active(timestamp)
state = 'state=UNACK;time=%s'%state
elif not str(state).strip():
state = 'NORM'
#print(1,1e3*(now()-t0))
#######################################################################
# Up to this point, state should be string
if ':' in state and state.split(':')[0]==tag:
#Parsing an ActiveAlarms row (Panic <6)
state,stamp = 'UNACK',0 # To be read from DS cache
try: state = 'ACKED' if self.get_acknowledged() else 'UNACK'
except: traceback.print_exc()
elif '=' in state:
#Dictionary-like (Panic >6.1, Elettra)
dct = dict(t.split('=',1) for t in state.split(';'))
stamp = dct.pop('time',stamp)
description = dct.get('description',self.description)
state = dct.get('state','NORM')
severity = dct.get('severity',self.severity)
tag = dct.get(tag,self.tag)
#locals().update(dct)
#Default sorting order (@DEPRECATED?)
elif ';' in state:
err = '%s.state=%s DEPRECATED!'%(self.tag,state)
state,self.last_error = 'ERROR',err
tracer(err)
#tag,state,severity,stamp,desc = state.split(';')
# Get Timestamp
if stamp:
try: stamp = float(stamp)
except: stamp = str2time(stamp)
elif state in ACTIVE_STATES and self.active:
stamp = self.active
elif state in ('ERROR','OOSRV'):
stamp = -1
else:
# reading activealarms array from ds
stamp = self.get_time(attr_value=True)
if state not in AlarmStates and stamp<=0:
tracer('%s: Setting state from timestamp: %s'%(self.tag,stamp))
if self.disabled:
state = 'DSUPR'
elif stamp:
# Negative stamp and unknown state?
tracer('%s.state=%s (%s)?, setting to ERROR'%(state,stamp))
state = 'ERROR'
elif check_device_cached(self.device):
# Device available and no stamp?
state = 'ERROR'
else:
# Device is Off
state = 'OOSRV'
self._state = AlarmStates[state]
#tracer('%s(%s).set_state(%s): %s,%s'
#%(self.tag,self._time,a,state,stamp))
#######################################################################
# UPDATE FLAGS (set_time() done at set_active()
if o != self._state or self._time is None:
tracer('%s state changed!: %s -> %s -> %s'
%(self.tag,o,state,self._state))
if state in ('NORM'):
self.recovered = stamp if self.active else 0
self.set_active(0,stamp)
self.acknowledged = 0
if state in ('ACTIVE','UNACK'):
self.set_active(stamp or t0,t=stamp)
self.recovered = 0
self.acknowledged = 0
if state in ('ACKED'):
self.acknowledged = stamp
self.set_active(self.active or stamp or t0,t=stamp)
if state in ('RTNUN'):
self.recovered = stamp
self.set_active(self.active or stamp or t0,t=stamp)
self.counter = 0
if state in ('DSUPR,SHLVD'):
self.disabled = stamp or -1
self.set_active(0,t=stamp)
elif state in ('OOSRV'):
self.disabled = -1
self.set_active(-1,t=stamp)
else:
self.disabled = 0
if state in ('ERROR'):
self.set_active(-1,t=stamp)
self.last_error = self.last_error or time2str(stamp)
else:
self.last_error = ''
#print(2,1e3*(now()-t0))
return self._state,self.get_time()
def get_state(self,force=False):
"""
This method will return the state of the alarm
If force is True or _state is None, then the state will be
initialized from the flags values.
Thus, flags must be updated prior to calling this method
"""
if force or self._state is None:
#tracer('%s.get_state(f=%s,d=%s,a=%s,r=%s,s=%s'%(self.tag,force,
#self.disabled,self.active,self.recovered,self._state))
#self.get_acknowledged(),self.get_disabled()
if self.disabled:
#NOTE: disabled<time() will be ignored
if self.disabled<0:
self._state = AlarmStates.OOSRV
elif time.time() < self.disabled:
self._state = AlarmStates.SHLVD
elif self.disabled in (1,True):
self._state = AlarmStates.DSUPR
elif self.active in (None,-1):
self._state = AlarmStates.ERROR
elif self.active>0:
if self.recovered > self.active:
self._state = AlarmStates.RTNUN
elif self.acknowledged:
self._state = AlarmStates.ACKED
else:
self._state = AlarmStates.UNACK
else: # no active
self._state = AlarmStates.NORM
self.updated = time.time()
return AlarmStates.get_key(self._state)
state = property(fget=get_state,fset=set_state)
def set_time(self,t=None):
self._time = t if t and t>0 else time.time()
#tracer('%s.set_time(%s,%s)'
#%(self.tag,time2str(self._time),self._state))
return self._time
def get_time(self,attr_value=None):
"""
This method extracts alarm activation timestamp
from the ActiveAlarms array.
It returns 0 if the alarm is not active.
"""
if attr_value is None and self._time is not None:
#tracer('%s.get_time(cached): %s'
#%(self.tag,time2str(self._time)))
return self._time
## Parsing ActiveAlarms attribute
#tracer('%s.get_time(%s): %s'
#%(self.tag,attr_value,time2str(self._time)))
if attr_value in (None,True): #Force attribute cache reading
actives = self.get_ds().get_active_alarms()
elif isSequence(attr_value): #Pass the attribute value
actives = self.get_ds().get_active_alarms(attr_value)
else:
actives = {self.tag:[attr_value]}
if isinstance(actives,(type(None),Exception)):
return -1
return actives.get(self.tag,0)
time = property(get_time,set_time)
def get_quality(self):
""" it just translates severity to the equivalent Tango quality,
but does NOT get actual attribute quality (which may be INVALID) """
qualities = {'DEBUG':'VALID',
'INFO':'VALID',
'WARNING':
'WARNING',
'ALARM':'ALARM',
'ERROR':'ALARM'
}
quality = PyTango.AttrQuality.names['ATTR_%s'
%qualities.get(self.severity,'WARNING')]
return quality
# Methods for PANIC 6 <> 7 compatibility
def get_priority(self): return self.severity
def set_priority(self,val): self.severity = val
priority = property(fget=get_priority,fset=set_priority)
def get_message(self): return self.description
def set_message(self,val): self.description = val
message = property(fget=get_message,fset=set_message)
def get_annunciators(self): return self.receivers
def set_annunciators(self,val): self.receivers = val
annunciators = property(fget=get_annunciators,fset=set_annunciators)
def get_condition(self): return self.get_ds().condition
condition = property(get_condition)
def parse_config(self):
""" Checks the Alarm config related to this alarm """
config = self.config
if isinstance(dict,config):
return config
elif config:
config = str(config)
try:
if '=' in config and ':' not in config:
config = config.replace('=',':')
if ';' not in config and config.count(':')>1:
config = config.replace(',',';')
config = dict(s.split(':') for s in config.split(';'))
except:
print('Alarm(%s): Unable to parse config(%s):\n%s'
%(tag,config,traceback.format_exc()))
config = {}
else:
config = {}
return config
def get_wiki_link(self):
"""Returns string with link to Wiki for this alarm or empty string"""
try:
wiki_prop = get_tango().get_class_property(
'PyAlarm','AlarmWikiLink').get('AlarmWikiLink',['',]) + ['',]
wiki_link = wiki_prop[0]
# change the property
wiki_link = substitute(wiki_link, [['',], {"ALARM": self.tag}])
return wiki_link
except:
traceback.print_exc()
return ''
def get_enabled(self,force=True):
if force:
try:
self.disabled = self.get_ds().get_disabled(self.tag)
except: return None
return not self.disabled
def enable(self,comment=''):
"""
Enables alarm evaluation
comment argumente needed by API (ignored by device)
"""
return self.get_ds().get().Enable(self.tag)
def disable(self, comment='',timeout=''):
""" Disables evaluation of Alarm in its PyAlarm device """
args = [self.tag, comment]
if timeout: args.append(str(timeout))
result = self.get_ds().get().Disable(args)
return result
def get_acknowledged(self,force=False):
if force:
self.acknowledged = self.get_ds().get_acknowledged(self.tag)
return self.acknowledged
def reset(self, comment):
""" Resets the Alarm in its PyAlarm device """
result = self.get_ds().reset(self.tag, comment)
self.clear()
return result
def acknowledge(self,comment):
""" Acknowledges the Alarm in its PyAlarm device """
result = self.get_ds().acknowledge(self.tag, comment)
return self.get_acknowledged(force=True)
def renounce(self,comment=''):
"""
Acknowledges the Alarm in its PyAlarm device
Comment is ignored by device
"""
result = self.get_engine().Renounce(self.tag)
return not self.get_acknowledged(force=True)
def write(self,device='',exclude='',update=True):
"""
Updates the Alarm config for the given device in the database
:param: update controls whether the device.init() will be called
or not, if not the alarm will not be applied yet
"""
self.device = device or self.device
if not self.device:
raise Exception('DeviceNameRequired')
props = self.get_ds().get_alarm_properties()
def update_lines(lines,new_line,exclude):
new_lines,added = [],False
#A copy of the array is needed due to a bug in PyTango!
tag = new_line.split(':',1)[0]
for l in lines:
if l.startswith(tag+':') and l!=new_line:
print('In Alarm(%s).write(): line updated: %s'
% (tag,new_line))
new_lines.append(new_line)
added = True
elif (not (exclude and re.match(exclude, l))
and l not in new_lines):
new_lines.append(l)
if not added and new_line not in new_lines:
print('In Alarm(%s).write(): line added: %s'%(tag,new_line))
new_lines.append(new_line)
return new_lines
new_props = {
'AlarmList': update_lines(props['AlarmList'],
self.tag+':'+self.formula,exclude),
'AlarmReceivers': update_lines(props['AlarmReceivers'],
self.tag+':'+self.receivers,exclude),
'AlarmDescriptions': update_lines(props['AlarmDescriptions'],
self.tag+':'+self.description,exclude),
'AlarmSeverities': update_lines(props['AlarmSeverities'],
self.tag+':'+self.severity,exclude)
}
#print 'New properties of %s are: \n%s' % (self.device,new_props)
self.api.put_db_properties(self.device,new_props)
if update: AlarmDS(self.device,api=self.api).init()
def set_severity(self,new_severity):
""" Sets the severity of Alarm and writes in DB """
allowed = panic.properties.SEVERITIES
new_severity = new_severity or DEFAULT_SEVERITY
if new_severity not in allowed:
raise Exception('Severity not allowed!')
else:
old = self.severity
self.severity = new_severity
self.write(exclude=old)
def rename(self,name):
""" Renames the Alarm and writes in DB """
old = self.tag
self.tag = name
self.write(exclude=old)
def add_receiver(self,receiver,write=True):
""" Adds a new receiver """
self.receivers = ','.join([r for r in self.receivers.split(',')
if r.strip()!=receiver]+[receiver])
if write: self.write()
def remove_receiver(self,receiver,write=True):
""" Removes a receiver """
self.receivers = ','.join([r for r in self.receivers.split(',')
if r.strip()!=receiver])
if write: self.write()
def replace_receiver(self,old,new,write=True):
""" Replaces a receiver """
self.remove_receiver(old,False)
self.add_receiver(new,write)
def get_any(self,k):
"""
this method will return public or private members calling
a getter when necessary
@TODO: this method as well as to_dict can be added to fd.Object
"""
#state/time as returned by getters
if k.startswith('get_'):
v = getattr(self,k)()
elif k not in self.__dict__ and hasattr(self,'get_%s'%k):
v = getattr(self,'get_'+k)()
# _time/_state will not be returned unless explicitly said
else:
v = self.__dict__.get(k,None)
return v
def to_dict(self,keys=None):
dct = {}
isprivate = lambda k: k.startswith('_') or k=='api'
for k in (keys or self.__dict__):
if keys or not isprivate(k):
dct[k] = self.get_any(k)
return dct
def to_str(self,keys=None):
d = self.to_dict(keys)
keys = keys or d.keys()
s = ';'.join('%s=%s'%(k,d[k]) for k in keys)
return s
def __repr__(self):
return 'Alarm(%s:%s:%s)' % (self.tag,self.device,self.active)
class AlarmDS(object):
""" This Class allows to manage the PyAlarm devices from the AlarmAPI """
def __init__(self,name,api=None):
self.name = name
self.api = api
self.alarms = {}
self.DeviceClass = ''
self._actives = None
self._acknowledged = None
self._disabled = None
self.proxy = None
self.version = None
self.get_config(True)
def init(self):
""" forces the device to reload its configuration"""
try:
self.read()
if self.DeviceClass == 'AlarmHandler':
print('Reloading %s' % self.name)
self.get().Load()
else:
print('Init %s device' % self.name)
self.get().init()
self.config = None
except:
print('Device %s is not running' % self.name)
def read(self,filters='*'):
"""
Updates from the database the Alarms related to this device
Only alarms which AlarmList row matches filters will be loaded
"""
props = self.get_alarm_properties()
self.alarms = {}
for line in props['AlarmList']:
#print('read:',line)
line = line.split('#',1)[0].strip()
if not line or not searchCl(filters,line):
#print('read:pass')
continue
try:
if ':' in line:
tag,formula = map(str.strip,line.split(':',1))
else:
tag,formula = line.strip(),'None'
self.alarms[tag] = {'formula':formula}
try:
local_receivers = [r for r in props['AlarmReceivers']
if r.startswith(tag+':')]
local_receivers = first(local_receivers or [''])
local_receivers = local_receivers.split(':',1)[-1].strip()
self.alarms[tag]['receivers'] = local_receivers
except:
traceback.print_exc()
self.alarms[tag]['receivers'] = ''
try:
d = first(r for r in props['AlarmDescriptions']
if r.startswith(tag+':')).split(':',1)[-1]
self.alarms[tag]['description'] = d.strip()
except:
self.alarms[tag]['description'] = ''
try:
s = first(r for r in props['AlarmSeverities']
if r.startswith(tag+':')).split(':',1)[-1]
self.alarms[tag]['severity'] = s.upper().strip()
except:
self.alarms[tag]['severity'] = DEFAULT_SEVERITY
except:
print('Unparsable Alarm!: %s' % line)
#print('%s device manages %d alarms: %s'
# %(self.name,len(self.alarms),self.alarms.keys()))
return self.alarms
def get(self,alarm=None):
""" Returns alarm object or device proxy
(for backwards compatibility) """
return self.alarms.get(alarm) if alarm else self.get_proxy()
def get_config(self,update=False):
if not getattr(self,'config',None) or update:
if not self.DeviceClass:
self.DeviceClass = fn.tango.get_device_class(self.name)
props = self.api.get_db_properties(self.name,ALARM_CONFIG)
for p,v in props.items():
if v in (False,True):
props[p] = v
elif v and v[0] not in ('',None):
props[p] = v[0]
else: #Using default property value
try:
props[p] = (PyAlarmDefaultProperties[p][-1] or [''])[0]
except:
print(traceback.format_exc())
self.config = props
return self.config
def get_property(self,prop):
if isSequence(prop): return self.api.get_db_properties(self.name,prop)
else: return self.api.get_db_property(self.name,prop)
def put_property(self,prop,value):
return self.api.put_db_property(self.name,prop,value)
def get_alarm_properties(self):
""" Method used in all panic classes """
props = self.api.get_db_properties(self.name,ALARM_TABLES.keys())
#Updating old property names
if not props['AlarmList']:
props['AlarmList'] = \
self.api.get_db_property(self.name,'AlarmsList')
if props['AlarmList']:
print('%s:AlarmsList property renamed to AlarmList'%self.name)
self.api.put_db_properties(self.name,
{'AlarmList':props['AlarmList'],'AlarmsList':[]})
return props
##########################################################################
# Device Proxy Methods
def get_proxy(self):
""" Returns a device proxy """
if self.proxy is None:
self.proxy = self.api.get_ds_proxy(self.name)
return self.proxy
def get_version(self):
""" Returns the VersionNumber for this device """
if self.version is None:
try:
assert check_device_cached(self.name)
v = self.get_proxy().read_attribute('VersionNumber').value
except:
tracer('AlarmDS(%s).get_version(): device not running'
%self.name)
v = self.config.get('VersionNumber',None)
self.version = fandango.objects.ReleaseNumber(v)
return self.version
def get_condition(self):
# Operation Mode (Enabled formula)
return self.config['Enabled']
condition = property(get_condition)
def get_model(self):
"""
Returns the proper alarm summary attribute to subscribe.
It depends on the PyAlarm version
"""
v = self.get_version()
if v >= '6.2.0':
model = self.name+'/alarmsummary'
else:
model = self.name+'/activealarms'
print('%s.get_model(%s): %s'%(self.name,v,model))
return model.lower()
def ping(self):
try:
return self.get().ping()
except:
return None
def state(self):
""" Returns device state """
try: return self.get().State()
except: return None
def status(self):
""" Returns device status """
return self.get().Status()
def enable(self,tag=None,comment=None):
"""
If Tag is None, this method will enable the whole device,
but individual disables will be kept
Comment is ignored by device
"""
if tag is None:
self.api.put_db_property(self.name,'Enabled',True)
self.init()
print('%s: Enabled!' %self.name)
else:
tags = [a for a in self.alarms if matchCl(tag,a)]
print('%s: Enabling %d alarms: %s' % (self.name,len(tags),tags))
[self.get().Enable([str(a)]) for t in tags]
def disable(self,tag=None,comment=None,timeout=None):
""" If Tag is None this method disables the whole device
"""
if tag is None:
self.api.put_db_property(self.name,'Enabled',False)
self.init()
print('%s: Disabled!' %self.name)
else:
tags = [a for a in self.alarms if matchCl(tag,a)]
print('%s: Disabling %d alarms: %s'%(self.name,len(tags),tags))
[self.get().Disable(
[str(a) for a in (t,comment,timeout) if a is not None])
for t in tags]
def get_active_alarms(self, value = None):
""" Returns the list of currently active alarms """
if self._actives is None:
self._actives = CachedAttributeProxy(self.name+'/ActiveAlarms',
keeptime=3000.)
if value is None:
try:
value = getAttrValue(self._actives.read(),None)
except Exception,e:
return e
if not value: return {}
#Parsing ActiveAlarms: TAG:DATE[:Formula]
r = defaultdict(float)
for line in value:
splitter = ';' if ';' in line else ':'
tag,line = str(line).split(splitter,1)
if not line:
r[tag] = 0
continue
date = ':'.join(line.split(':')[:3])
try:
#assumes ctime format = '%a %b %d %H:%M:%S %Y'
r[tag] = time.mktime(time.strptime(date))
except:
try:
r[tag] = str2time(date)
except:
# Format not compatible (e.g. old PyAlarm)
#print('AlarmDS.get_active_alarms():'
#'failed to parse date from %s'%line)
r[tag] = r[tag] or END_OF_TIME
return r
def get_acknowledged(self,alarm=None):
if self._acknowledged is None:
self._acknowledged = CachedAttributeProxy(
self.name+'/AcknowledgedAlarms',keeptime=3000.)
value = getAttrValue(self._acknowledged)
if alarm is not None:
return getattr(alarm,'tag',alarm) in value
else:
return value
def get_disabled(self,alarm=None):
if self._disabled is None:
self._disabled = CachedAttributeProxy(
self.name+'/DisabledAlarms',keeptime=3000.)
sep = ':' if self.get_version()>='6.3.0' else ' '
value = [t.split(sep)[0] for t in getAttrValue(self._disabled)]
if alarm is not None:
return getattr(alarm,'tag',alarm) in value
else:
return value
def reset(self,alarm,comment):
"""
Reset of an active Alarm
Returns True if there's no more active alarms, else returns False
"""
args=[]
args.append(str(alarm))
args.append(str(comment))
try:
return (False if self.get().ResetAlarm(args) else True)
except:
print('Device %s is not running' % self.name)
print(traceback.format_exc())
return None
def acknowledge(self,alarm,comment):
"""
Acknowledge of an active Alarm
Returns True if there's no more active alarms, else returns False
"""
args=[]
args.append(str(alarm))
args.append(str(comment))
try:
return (False if self.get().Acknowledge(args) else True)
except:
print('Device %s is not running' % self.name)
print(traceback.format_exc())
return None
def __repr__(self):
return 'AlarmDS(%s, %d alarms)' % (self.name,len(self.alarms))
class AlarmAPI(fandango.SingletonMap):
"""
Panic API is a dictionary-like object
It will load alarms matching the given filters from the given tango_host
Filters will apply only to device names by default.
Will be searched also in formulas if extended = True
(or None and an initial search returnd no results).
"""
CURRENT = None
_phonebooks = {}
def __init__(self,filters = '*',tango_host = None,
extended = None,
logger = fandango.log.WARNING):
self.__init_logger(logger)
self.warning('In AlarmAPI(%s)'%filters)
self.alarms = {}
self.devices = fandango.CaselessDict()
self.filters = filters
self.tango_host = tango_host or get_tango_host()
self._global_receivers = [],0
for method in ['__getitem__','__setitem__','keys',
'values','__iter__','items','__len__']:
setattr(self,method,getattr(self.alarms,method))
self._eval = fandango.TangoEval(cache=2*3,use_tau=False,timeout=10000)
self.macros = [
('GROUP(%s)',self.GROUP_EXP,self.group_macro)
]
[self._eval.add_macro(*m) for m in self.macros]
try: self.servers = fandango.servers.ServersDict(tango_host=tango_host)
except: self.servers = fandango.servers.ServersDict()
self.db = self.servers.db
self.load(self.filters,extended=extended)
if extended is None and not len(self.keys()):
self.load(self.filters, extended = True)
def __init_logger(self,logger):
if fandango.isCallable(logger):
self.log = self.debug = self.info = \
self.warning = self.error = logger
elif fandango.isNumber(logger) or fandango.isString(logger):
self._logger = fandango.log.Logger('PANIC',level=logger)
for l in fandango.log.LogLevels:
setattr(self,l.lower(),getattr(self._logger,l.lower()))
self.log = self.debug
## Dictionary-like methods
def __get_tag(self,k):
if isinstance(k,Alarm):
return self.__get_tag(k.tag)
elif clmatch(retango,k):
if ':' in k:
self.warning('[%s]: AlarmAPI does not support multi-host!'%k)
else:
k = k.split('/')[-1]
return k
def __getitem__(self,k): #*a,**k):
return self.alarms.__getitem__(self.__get_tag(k)) #*a,**k)
def __setitem__(self,k,v):
return self.alarms.__setitem__(self.__get_tag(k),v)
def __contains__(self,k):
return self.has_tag(self.__get_tag(k),False)
def __len__(self): return self.alarms.__len__()
def __iter__(self): return self.alarms.__iter__()
def keys(self): return self.alarms.keys()
def values(self): return self.alarms.values()
def items(self): return self.alarms.items()
def load(self,filters=None,exported=False,extended=False):
"""
Reloads all alarm properties from the database
Alarms will be loaded if filters match the device or server name
If exported, only running devices will be checked.
If extended, other alarms will be loaded if the AlarmList row
matches the filter
"""
#Loading alarm devices list
filters = filters or self.filters or '*'
if isSequence(filters): filters = '|'.join(filters)
filters = filters.lower()
all_alarms = {}
self.log('Loading Alarm devices matching %s'%(filters))
t0 = tdevs = time.time()
dbd = fandango.tango.get_database_device(db=self.db)
all_devices = []
all_servers = []
for cl in ('PyAlarm','PanicEngineDS','PanicViewDS','AlarmHandler'):
all_devices.extend(map(str.lower,dbd.DbGetDeviceList(['*',cl])))
all_servers.extend(map(str.lower,dbd.DbGetServerList(cl+'/*')))
if exported:
dev_exported = fandango.get_all_devices(
exported=True,host=self.tango_host)
all_devices = [d for d in all_devices if d in dev_exported]
#If filter is the exact name of a device, only this will be loaded
if filters in all_devices:
all_devices = matched = [filters]
elif (filters!='*' and '/' in filters
and any(matchCl(filters,s) for s in all_servers)):
self.servers.load_by_name(filters)
matched = [d.lower() for d in self.servers.get_all_devices()
if d.lower() in all_devices]
#If filter is the exact name of a server, only this will be loaded
if filters in self.servers:
all_devices = matched
else:
# If no server is matched, all devices are checked
matched = []
tdevs = time.time() - tdevs
tprops = time.time()
all_devices = [d.lower().strip() for d in all_devices]
for d in all_devices:
self.log('Loading device: %s'%d)
ad = AlarmDS(d,api=self)
if filters=='*' or d in matched or matchCl(filters,d):
self.devices[d],all_alarms[d] = ad,ad.read()
elif extended:
#Parsing also if the filters are referenced in the formula
#This kind of extended filter exceeds the domain concept
alarms = ad.read(filters=filters)
if alarms:
self.devices[d],all_alarms[d] = ad,alarms
removed = [d for d in self.devices.keys()
if d.lower().strip() not in all_devices]
for r in removed:
self.devices.pop(r)
print('>>> Removed: %s'%r)
tprops=(time.time()-tprops)
self.log('\t%d Alarm devices loaded, %d alarms'%(
len(self.devices),sum(len(v) for v in all_alarms.values())))
######################################################################
tcheck = time.time()
#Loading phonebook
#self.get_phonebook(load=True)
#Verifying that previously loaded alarms still exist
for k,v in self.alarms.items()[:]:
found = False
for d,vals in all_alarms.items():
if d.lower() == v.device.lower():
if k in vals:
found = True
else:
self.warning('%s not in %s: %s'%(k,d,vals))
if not found:
self.warning('AlarmAPI.load(): WARNING!: Alarm %s has been '
'removed from device %s' % (k,v.device))
self.alarms.pop(k)
#Updating alarms dictionary
for d,vals in sorted(all_alarms.items()):
for k,v in vals.items():
self.log('Loading alarm %s.%s (new=%s): %s'%(
d,k,k not in self.alarms,v))
if k in self.alarms: #Updating
if self.alarms[k].device.lower()!=d.lower():
self.warning('AlarmAPI.load(): WARNING!: Alarm %s '
'duplicated in devices %s and %s' %
(k,self.alarms[k].device,d))
#ALARM State is not changed here, if the formula changed
# something it will be managed by the
# AutoReset/Reminder/Recovered cycle
self.alarms[k].setup(k,device=d,formula=v['formula'],
description=v['description'],receivers=v['receivers'],
severity=v['severity'])
else: #Creating a new alarm
self.alarms[k] = Alarm(k,api=self,device=d,
formula=v['formula'],description=v['description'],
receivers=v['receivers'],severity=v['severity'])
tcheck = time.time()-tcheck
self.log('AlarmAPI.load(%s): %d alarms loaded'%(
filters,len(self.alarms)))
AlarmAPI.CURRENT = self
self.info('%ss dedicated to,\n load devices %s\n load properties %s\n'
'other checks %s'% (time.time()-t0,tdevs,tprops,tcheck))
return
def load_from_csv(self,filename,device=None,write=True):
#fun.tango.add_new_device('PyAlarm/RF','PyAlarm','SR/RF/ALARMS')
#DEVICE='sr/rf/alarms'
#f = '/data/Archiving/Alarms/RF_Alarms_jocampo_20120601.csv')
alarms = {}
csv = fandango.CSVArray(filename,header=0,comment='#',offset=1)
for i in range(len(csv)):
line = fandango.CaselessDict(csv.getd(i))
line['tag'] = line.get('tag',line.get('alarm_name'))
line['device'] = str(device
or line.get('device')
or '%s/%s/ALARMS'%(line.get('system'),line.get('subsystem')
or 'CT')).lower()
alarms[line['tag']] = dict([('load',False)]
+[(k,line.get(k)) for k in CSV_FIELDS] )
loaded = alarms.keys()[:]
for i,tag in enumerate(loaded):
new,old = alarms[tag],self.alarms.get(tag,None)
if old and all(new.get(k)==getattr(old,k)
for k in CSV_FIELDS):
alarms.pop(tag)
elif write:
print('%d/%d: Loading %s from %s: %s'%(
i,len(loaded),tag,filename,new))
if write:
devs = set(v['device'] for v in alarms.values())
for d in devs:
if d not in self.devices:
raise Exception('Alarm device %s does not exist!'%d)
for i,(tag,v) in enumerate(alarms.items()):
if tag not in self:
self.add(**v)
else:
self.modify(**v)
[self.devices[d].init() for d in devs]
self.load()
return alarms
def export_to_csv(self,filename,regexp=None,
alarms=None,config=False,states=False):
"""
Saves the alarms currently loaded to a .csv file
"""
csv = fandango.CSVArray(header=0,comment='#',offset=1)
alarms = self.filter_alarms(regexp,alarms=alarms)
columns = CSV_FIELDS + (['ACTIVE'] if states else [])
csv.resize(1+len(alarms),len(CSV_FIELDS))
csv.setRow(0,map(str.upper,CSV_FIELDS))
for i,(d,alarm) in enumerate(sorted((a.device,a) for a in alarms)):
row = [getattr(alarm,k) for k in CSV_FIELDS]
if states: row += alarm.get_active()
csv.setRow(i+1,row)
csv.save(filename)
return
def export_to_dict(self,regexp=None,alarms=None,config=True,states=False):
"""
If config is True, the returned dictionary contains a double key:
- data['alarms'][TAG] = {alarm config}
- data['devices'] = {PyAlarm properties}
"""
alarms = self.filter_alarms(regexp,alarms=alarms)
data = dict((a.tag,a.to_dict()) for a in alarms)
if states:
for a,s in data.items():
s['active'] = self[a].get_active()
s['date'] = time2str(s['active'])
if config:
data = {'alarms':data}
data['devices'] = dict((d,t.get_config())
for d,t in self.devices.items())
return data
def load_configurations(self,filename,regexp=None):
"""
Updates devices properties values from a .csv file
"""
csv = fandango.CSVArray(filename,header=0,comment='#',offset=1)
print('Loading %s file'%filename)
for i in range(csv.size()[0]):
l = csv.getd(i)
if not matchCl(l['Host'],self.tango_host):
continue
d = l['Device']
if (not d or d not in self.devices
or regexp and not matchCl(regexp,d)):
continue
diff = [k for k,v in self.devices[d].get_config().items()
if str(v).lower()!=str(l[k]).lower()]
if diff:
print('Updating %s properties: %s'%(d,diff))
self.put_db_properties(d,dict((k,[l[k]]) for k in diff))
self.devices[d].init()
return
def export_configurations(self,filename,regexp=None):
"""
Save devices property values to a .csv file
"""
lines = [['Host','Device']+ALARM_CONFIG]
for d,v in self.devices.items():
if regexp and not matchCl(regexp,d): continue
c = v.get_config()
lines.append([self.tango_host,d]+[str(c[k]) for k in ALARM_CONFIG])
open(filename,'w').write('\n'.join('\t'.join(l) for l in lines))
print('%s devices exported to %s'%(len(lines),filename))
def has_tag(self,tag,raise_=False):
""" check for tags is case independent """
nt = first((k for k in self.keys() if k.lower()==tag.lower()),None)
if raise_ and nt is None:
raise Exception('TagDoesntExist:%s'%tag)
return nt
def save_tag(self,tag):
""" Shortcut to force alarm update in database """
self[self.has_tag(tag,True)].write()
def check_tag(self,tag,raise_=False):
""" Checks if tag is a valid tag """
if clmatch('^[a-zA-Z_][a-zA-Z_0-9]*$',tag):
return True
elif raise_:
raise Exception("TagContainsInvalidCharacters")
else:
return False
def get_device(self,key,full=False):
""" Given a device or alarm name returns an AlarmDS object """
if key in self.alarms:
return self.devices[self.alarms[key].device]
if not full and ':' in key:
key = parse_tango_model(key).devicename
#key = key.split(':',1)[-1].split('/',1)[1]
if key in self.devices:
return self.devices[key]
return None
def get_ds_proxy(self,dev):
try:
return self.servers.proxies[dev]
except:
# If failed, convert into a local tango name
return self.get_device(dev).get_proxy()
def get_db_properties(self,ref,props):
if '/' not in ref:
return self.db.get_property(ref,props)
elif ref.count('/')>=2:
return self.db.get_device_property(ref,props)
else:
raise Exception,'Unknown %s'%ref
def get_db_property(self,ref,prop):
return list(self.get_db_properties(ref,[prop])[prop])
def put_db_properties(self,ref,props):
"""
Inserts multiple properties into database as a dict {keys:values}
"""
if '/' not in ref:
self.db.put_property(ref,props)
elif ref.count('/')>=2:
self.db.put_device_property(ref,props)
else:
raise Exception,'Unknown %s'%ref
def put_db_property(self,ref,prop,value):
"""
Insert a single property into Tango Database
Value is converted into list
"""
if not isSequence(value):
value = [value]
self.put_db_properties(ref,{prop:value})
def get_class_property(self,klass,prop):
#return list(self.servers.db.get_class_property(klass,[prop])[prop])
return list(getPanicProperty(prop))
def put_class_property(self,klass,prop,value):
if not isSequence(value): value = [value]
#self.servers.db.put_class_property(klass,{prop:value})
setPanicProperty(prop,value)
@staticmethod
def get_phonebook(host='', load=False):
"""
gets the phonebook for the selected host
"""
tango_host = getattr(host,'tango_host',host) or get_tango_host()
if load or not AlarmAPI._phonebooks.get(tango_host,None):
print('%s: AlarmAPI.get_phonebook(%s, True)' %
(fn.time2str(), tango_host))
ph,prop = {}, getPanicProperty('Phonebook')
for line in prop:
line = line.split('#',1)[0]
if line:
ph[line.split(':',1)[0]] = line.split(':',1)[-1]
#Replacing nested keys
for k,v in ph.items():
for s in v.split(','):
for x,w in ph.items():
if s==x:
ph[k] = v.replace(s,w)
AlarmAPI._phonebooks[tango_host] = ph
return AlarmAPI._phonebooks[tango_host]
def parse_phonebook(self,receivers):
"""
Replaces phonebook entries in a receivers list
The behavior of phonebook parsing is dependent
on using '%' to mark phonebook entries.
"""
ph = self.get_phonebook()
result,receivers = [],[s.strip() for s in receivers.split(',')]
for r in receivers:
if r in ph:
r = ph[r]
elif '%' in r:
for p in ph:
#re.split used to discard partial matches
if p in re.split('[,:;/\)\(]',r):
r = r.replace(p,ph[p])
result.append(r)
return ','.join(result)
def remove_phonebook(self, tag):
""" Removes a person from the phonebook """
prop = getPanicProperty('Phonebook')
if tag not in str(prop): raise Exception('NotFound:%s'%tag)
self.save_phonebook([p for p in prop if not p.split(':',1)[0]==tag])
self.on_phonebook_changed(tag)
def edit_phonebook(self, tag, value, section='',notify=True):
""" Adds a person to the phonebook """
prop = getPanicProperty('Phonebook')
name = tag.upper()
value = '%s:%s'%(name,value)
lines = [line.strip().split(':',1)[0].upper() for line in prop]
if name in lines: #Replacing
index = lines.index(name)
print('AlarmAPI.edit_phonebook(%s,%s,%s), replacing at [%d]'%(
tag,value,section,index))
prop = prop[:index]+[value]+prop[index+1:]
else: #Adding
if section and '#' not in section: section = '#%s'%section
index = len(lines) if not section or section not in lines \
else lines.index(section)+1
print('AlarmAPI.edit_phonebook(%s,%s,%s), adding at [%d]'%(
tag,value,section,index))
prop = prop[:index]+[value]+prop[index:]
self.save_phonebook(prop)
if notify: self.on_phonebook_changed(name)
def on_phonebook_changed(self,tag):
devs = set()
for a in self.alarms.values():
recs = a.receivers
if not isSequence(recs):
recs = [r.strip() for r in recs.split(',')]
if tag in recs:
devs.add(a.device.lower())
print('on_phonebook_changed(%s): updating %s'%(tag,devs))
[self.devices[d].init() for d in devs]
def save_phonebook(self, new_prop):
""" Saves a new phonebook in the database """
setPanicProperty('Phonebook',new_prop)
AlarmAPI._phonebooks[self.tango_host] = None #Force to reload
return new_prop
@Cached(expire=10.)
def get_user_filters(self):
"""
returns a name:filter dictionary
"""
import json
prop = self.get_db_property('PANIC','UserFilters')
prop = [t.split(':',1) for t in prop]
return dict((t[0],unicode2str(json.loads(t[1]))) for t in prop)
def set_user_filters(self,filters,overwrite=True):
"""
filters should be a name:filter dictionary
"""
import json
assert isMapping(filters),'Should be a dictionary!'
if not overwrite:
prevs = self.get_user_filters()
prevs.update(filters)
filters = prevs
value = []
for name,f in filters.items():
value.append('%s:%s'%(name,f if isString(f) else json.dumps(f)))
self.put_db_property('PANIC','UserFilters',value)
def get_global_receivers(self,tag='',renew=False):
try:
if (renew or self._global_receivers[-1]<time.time()-3600):
prop = getPanicProperty('GlobalReceivers')
self._global_receivers = (prop,time.time())
else:
prop = self._global_receivers[0]
if not tag:
return prop
else:
prop = [p.split(':',1) for p in prop]
rows = []
for line in prop:
mask = (line[0] if len(line)>1 else '*').split(',')
neg = [m[1:] for m in mask if m.startswith('!')]
if neg and any(matchCl(m,tag) for m in neg):
continue
pos = [m for m in mask if not m.startswith('!')]
if not pos or any(matchCl(m,tag) for m in pos):
rows.append(line[-1])
return ','.join(rows)
except:
print('>>> Exception at get_global_receivers(%s)'%tag)
traceback.print_exc()
return ''
GROUP_EXP = fandango.tango.TangoEval.FIND_EXP.replace('FIND','GROUP')
def group_macro(self,match):
"""
For usage details see:
https://github.com/tango-controls/PANIC/
blob/documentation/doc/recipes/AlarmsHierarchy.rst
"""
match,cond = match.split(';',1) if ';' in match else (match,'')
#if '/' not in match and self._eval._locals.get('DEVICE',None):
#match = self._eval._locals['DEVICE']+'/'+match
exps = match.split(',')
attrs = []
for e in exps:
if '/' in e:
attrs.extend(d+'/'+a
for dev,attr in [e.rsplit('/',1)]
for d,dd in self.devices.items()
for a in dd.alarms
if matchCl(dev,d) and matchCl(attr,a))
else:
attrs.extend(self[a].get_attribute(full=True)
for a in self if matchCl(e,a))
if not cond:
attrs = [m+'.delta' for m in attrs]
cond = 'x > 0'
exp = 'any([%s for x in [ %s ]])'%(cond,' , '.join(attrs))
return exp
def split_formula(self,formula,keep_operators=False):
f = self[formula].formula if formula in self else formula
i,count,buff,final = 0,0,'',[]
while i<len(f):
s = f[i]
if s in '([{': count+=1
if s in ')]}': count-=1
if not count and s in ' \t':
if f[i:i+4].strip().lower() == 'or':
nx = 'or'
i+=len(nx)+2
elif f[i:i+5].strip().lower() == 'and':
nx = 'and'
i+=len(nx)+2
else:
nx = ''
if nx:
final.append(buff.strip())
if keep_operators:
final.append(nx)
buff = ''
continue
buff+=s
i+=1
nx=''
return final
def parse_alarms(self, formula):
"""
Searches for alarm tags used in the formula
"""
alnum = '(?:^|[^/a-zA-Z0-9-_])([a-zA-Z0-9-_]+)'#(?:$|[^/a-zA-Z0-9-_])'
#It's redundant to check for the terminal character, re already does it
var = re.findall(alnum,formula)
#print '\tparse_alarms(%s): %s'%(formula,var)
return [a for a in self.keys() if a in var]
def replace_alarms(self, formula):
"""
Replaces alarm tags by its equivalent device/alarm attributes
"""
try:
var = self.parse_alarms(formula)
#print 'replace_alarms(%s): %s'%(formula,var)
if var:
for l,a in reversed([(len(s),s) for s in var]):
x = '[^/a-zA-Z0-9-_\"\']'
x = '(?:^|%s)(%s)(?:$|%s)'%(x,a,x)
attr = self[a].device+'/'+a
m,new_formula = True,''
#print 'replacing %s by %s'%(a,attr)
while m:
m = re.search(x,formula)
if m:
start,end = m.start(),m.end()
if not formula.startswith(a): start+=1
if not formula.endswith(a): end-=1
new_formula += formula[:start]+attr
formula = formula[end:]
formula = new_formula+formula
return formula
except:
print('Exception in replace_alarms():%s'%traceback.format_exc())
return formula
def parse_attributes(self, formula, replace = True):
""" Returns all tango attributes that appear in a formula """
if formula in self.alarms: formula = self.alarms[formula].formula
formula = getattr(formula,'formula',formula)
attributes = self._eval.parse_variables(self.replace_alarms(formula)
if replace else formula)
return sorted('%s/%s'%(t[:2]) for t in attributes)
def evaluate(self, formula, device=None,timeout=1000,_locals=None,
_raise=True):
#Returns the result of evaluation on formula
#Both result and attribute values are kept!,
#be careful to not generate memory leaks
try:
if formula.strip().lower() in ('and','or'):
return None
if device and not check_device_cached(device):
device = None
if device and device in self.devices:
d = self.devices[device].get()
t = d.get_timeout_millis()
d.set_timeout_millis(timeout)
try:
r = d.evaluateFormula(formula)
return r
except Exception,e:
raise e
finally:
d.set_timeout_millis(t)
else:
self._eval.set_timeout(timeout)
self._eval.update_locals({'PANIC':self})
if _locals: self._eval.update_locals(_locals)
formula = self.replace_alarms(formula)
self.debug('AlarmAPI.evaluate(%s,%s)'%(formula,_locals))
return self._eval.eval(formula,_raise=_raise)
except Exception,e:
return e
def get(self,tag='',device='',attribute='',receiver='', severity='',
alarms = None,limit=0,strict=False):
"""
Gets alarms matching the given filters
(tag,device,attribute,receiver,severity)
"""
result=[]
alarms = alarms or self.values()
m = fn.parse_tango_model(tag)
if m:
tag = m.attribute
device = m.device
if limit==1 and tag in self.alarms:
found = [self[tag]]
else:
filters = {'tag':tag,'device':device,'attribute':attribute,
'receivers':receiver,'severity':severity}
if strict:
found = [a for a in alarms if
all([getattr(a,f)==v for f,v in filters.items() if v])]
else:
found = self.filter_alarms(filters,alarms)
if not limit: return found
elif limit==1: return found[0]
else: return found[:limit]
#if tag and not tag.endswith('$'): tag+='$'
#if attribute and not attribute.endswith('$'): attribute+='$'
#if device and not device.endswith('$'): device+='$'
##if receiver and not receiver.startswith('%'): receiver='%'+receiver
#if severity and not severity.endswith('$'): severity+='$'
#for alarm in (alarms or self.alarms.values()):
#if ((not tag or searchCl(tag,alarm.tag)) and
#(not device or searchCl(device,alarm.device)) and
#(not attribute or searchCl(attribute,alarm.formula)) and
#(not receiver or receiver in alarm.receivers) and
#(not severity or searchCl(severity,alarm.severity))):
#result.append(alarm)
#return result
def get_basic_alarms(self):
"""
Children are those alarms that have no alarms below or have
a higher alarm that depends from them.
"""
self.log('Getting Alarm children ...')
result=[]
for a,v in self.items():
children = self.parse_alarms(v.formula)
if children:
result.extend(children)
else:
result.append(a)
result = set(result)
return [v for a,v in self.items() if a in result]
def filter_alarms(self, filters, alarms = None):
"""
filters must be a dictionary: {filter:regexp}
alarms must be a list of alarm objects
regexps accept '!' to exclude a certain match
Tries to apply all default filters:
'tag','name',
'device','active','severity','regexp','receivers'
'formula','attribute','history','failed','hierarchy'
"""
alarms = alarms or self.values()
filters = filters or {}
if isString(filters): filters = {'regexp':filters}
exclude = []
self.log('AlarmAPI.filter_alarms(%s)'%filters)
for f,r in filters.items():
if f in ('name','alarm'): f = 'tag'
if not r: continue
result = []
for a in alarms:
ok = False
if isString(a): a = self[a]
if f == 'regexp':
## Regexp will be used to explicitly reject an alarm
regexp = r.split(',')
for e in regexp:
n,e = '!' in e,e.strip('!')
s = str(map(str,a.to_dict().values()))
m = searchCl(e,s)
if m and n: exclude.append(a.tag)
elif m and not n: ok = True
elif not m and n and len(regexp)==1: ok = True
if a.tag in exclude: continue
if f == 'attribute':
attrs = self.parse_attributes(a.formula)
if any(searchCl(r,t,0,1) for t in attrs): ok = True
elif f == 'hierarchy':
r = r.upper()
is_top = self.parse_alarms(a.formula)
if not xor(is_top,r == 'TOP'): ok = True
elif f == 'severity':
r,s = r.upper().strip(),a.severity.upper().strip()
s = s or DEFAULT_SEVERITY
if SEVERITIES[s]>=SEVERITIES[r]: ok = True
elif f == 'receivers':
v = self.parse_phonebook(a.receivers)
if searchCl(r,v,0,1): ok = True
else:
v = getattr(a,f,'')
if isString(v):
if v and searchCl(r,v,0,1): ok = True
else: ok = not xor(isFalse(r),isFalse(v))
if ok: result.append(a)
alarms = result
return alarms
def filter_hierarchy(self, rel, alarms = None):
"""
TOP are those alarms which state is evaluated using other Alarms values.
BOTTOM are those alarms that have no alarms below or
have a TOP alarm that depends from them.
"""
return self.filter_alarms({'hierarchy':rel})
def filter_severity(self, sev, alarms = None):
return self.filter_alarms({'severity':sev})
def get_states(self,tag='',device=''):
device = device.lower()
if tag:
if not tag in self.alarms: return None
return self.alarms[tag].get_active()
elif device:
if device not in self.devices: return {}
d = self.devices[device]
try:
dp = d.get()
if dp.ping():
als = sorted(self.devices[device].alarms.keys())
ats = [self.alarms[a].get_attribute() for a in als]
vals = [v.value for v in dp.read_attributes(ats)]
return dict((a,t) for a,t in zip(als,vals))
else:
raise Exception('')
except Exception,e:
print 'device %s is not running'%device
traceback.print_exc()
[setattr(self.alarms[a],'active',None) for a in d.alarms]
return dict((a,None) for a in d.alarms)
else:
vals = dict()
[vals.update(self.get_states(device=d)) for d in self.devices]
return vals
def get_configs(self,tag='*'):
result = {}
for alarm in self.get(tag):
reks = self.parse_phonebook(alarm.receivers)
result[alarm.tag] = {
'Device':alarm.device,
'Severity':alarm.severity,
'Snap':'SNAP' in reks,
'Email':'@' in reks,
'Action':'ACTION' in reks,
'SMS':'SMS' in reks,
}
result[alarm.tag].update((k,v)
for k,v in self.devices[alarm.device].get_config().items()
if k in ALARM_CONFIG)
return result
def get_admins_for_alarm(self,alarm=''):
users = filter(bool,
getPanicProperty('PanicAdminUsers'))
if users:
if alarm:
users = users+[r.strip().split('@')[0] for r in
self.parse_phonebook(self[alarm].receivers).split(',')
if '@' in r]
return users
def add(self,tag,device,formula='',description='',receivers='',
severity=DEFAULT_SEVERITY, load=True, config=None,overwrite=False):
""" Adds a new Alarm to the database """
device,match = device.lower(),self.has_tag(tag)
if match:
tag = match
if not overwrite:
raise Exception('TagAlreadyExists:%s'%tag)
else:
self.modify(tag=tag,device=device,formula=formula,
description=description,receivers=receivers,
severity=severity,load=load,config=config)
#Creating a new alarm:
self.check_tag(tag,raise_=True)
if device not in self.devices:
raise Exception('DeviceDoesntExist')
alarm = Alarm(tag, api=self, device=device, formula=formula,
description=description, receivers=receivers, severity=severity)
if config is not None:
self.set_alarm_configuration(tag,device,config)
alarm.write()
if load: self.load()
return tag
def modify(self,tag,device,formula='',description='',receivers='',
severity=DEFAULT_SEVERITY, config=None, load=True):
""" Modfies an Alarm in the database """
device = device.lower()
tag = self.has_tag(tag,raise_=True)
if device not in self.devices:
raise Exception('DeviceDescriptiondDoesntExist:%s'%device)
alarm = self[tag]
old_device,new_device = alarm.device,device
alarm.setup(tag=tag,device=old_device,formula=formula,
description=description,receivers=receivers,
severity=severity,write=False)
if config is not None: self.set_alarm_configuration(tag,device,config)
self.rename(tag,tag,new_device,load=True)
def set_alarm_configuration(self,tag,device,config):
"""
This method is not operative yet, in the future will be used to
do customized setups for each alarm.
"""
self.info('In panic.set_alarm_configuration(%s,%s)'%(device,tag))
self.error('\tNotImplemented!')
return
props=self.devices[device].get_config(True)
dictlist=[]
for key, value in props.iteritems():
temp = str(key)+'='+str(value[0] if isSequence(value) else value)
print '%s.%s.%s'%(device,alarm,temp)
dictlist.append(temp)
l=';'.join(dictlist)
l=str(tag)+':'+l
old_props=self.get_db_property(device, 'AlarmConfigurations')
new_props=str(old_props).strip("]'[")+l+';'
#return new_props
try: self.put_device_property(device, 'AlarmConfigurations', new_props)
except: Exception('Cant append the database!')
def purge(self,device,tag,load=False):
"""
Removes any alarm from a device matching the given tag.
Database must be reloaded afterwards to update the alarm list.
"""
props = self.devices[device].get_alarm_properties()
self.put_db_properties(device,
{'AlarmList':[p for p in props['AlarmList']
if not p.startswith(tag+':')],
'AlarmReceivers':[p for p in props['AlarmReceivers']
if not p.startswith(tag+':')],
'AlarmDescriptions':[p for p in props['AlarmDescriptions']
if not p.startswith(tag+':')],
'AlarmSeverities':[p for p in props['AlarmSeverities']
if not p.startswith(tag+':')],})
self.devices[device].init()
if load: self.load()
return
def remove(self,tag,load=True):
""" Removes an alarm from the system. """
tag = self.has_tag(tag,True)
val = self.alarms.pop(tag) #Order matters!
self.purge(val.device,tag)
if load: self.load()
return val
def rename(self,tag,new_tag='',new_device='',load=True):
""" Renames an existing tag, it also allows to move to a new device.
"""
new_device = new_device.lower()
if new_device and new_device not in self.devices:
raise Exception('DeviceDoesntExist:%s'%new_device)
tag = self.has_tag(tag,raise_=True)
self.check_tag(new_tag,raise_=True)
alarm = self.remove(tag)
new_device = new_device or alarm.device
new_tag = new_tag or alarm.tag
self.add(new_tag,new_device,alarm.formula,alarm.description,
alarm.receivers,alarm.severity,load=load)
return
def update_servers(self,targets):
""" Forces PyAlarm devices to reload selected alarms """
devs = set((self[t].device if t in self.alarms else t)
for t in targets)
self.warning('re-Initializing devices: %s'%devs)
[self.devices[d].init() for d in devs]
def start_servers(self,tag='',device='',host=''):
""" Starts Alarm Servers matching the filters """
host = host or self.tango_host
self.servers.start_servers(set(self.servers.get_device_server(a.device)
for a in self.get_alarms(tag,device)),host=host)
def stop_servers(self,tag='',device=''):
""" Stops Alarm Servers matching the filters """
host = host or self.tango_host
self.servers.stop_servers(set(self.servers.get_device_server(a.device)
for a in self.get_alarms(tag,device)))
def __repr__(self):
#return '\n'.join(sorted('%s: %s' % (
# a.tag,a.description) for a in self.values()))
return 'AlarmAPI(%s,%s,[%d])'%(self.filters,self.tango_host,len(self))
api = AlarmAPI
def current():
return AlarmAPI.CURRENT or AlarmAPI()
def main():
import sys,fandango as Fn
try:
from fandango.doc import get_fn_autodoc
__doc__ = get_fn_autodoc(__name__,vars())
except:
#import traceback
#traceback.print_exc()
pass
| StarcoderdataPython |
1828630 | <filename>notebooks/exp_decay.py<gh_stars>0
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
from IPython import get_ipython
# %%
import os
# os.environ['MKL_NUM_THREADS'] = '1'
# os.environ['OPENBLAS_NUM_THREADS'] = '1'
# %%
get_ipython().run_line_magic('matplotlib', 'inline')
import numpy as np
import pandas as pd
import pymc3 as pm
from pymc3.gp.util import plot_gp_dist
import arviz as az
import matplotlib.pyplot as plt
import warnings
warnings.simplefilter('ignore')
# %%
Ts = [1.5, 4.5]
As = [0.7, 0.3]
n_samples = 128
edc = np.zeros(n_samples)
times = np.linspace(0, 3, n_samples)
for T_i, A_i in zip(Ts, As):
edc += A_i*np.exp(-13.8/T_i*times)
edc = 10**((10*np.log10(edc) + np.random.normal(0, 0.25, n_samples))/10)
# %%
plt.plot(times, 10*np.log10(edc))
# %% [markdown]
# ## Gaussian Process
# export CPATH=/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include
# %%
rho_val = np.round(np.abs(times[0]-times[1])*10, decimals=2)
with pm.Model() as gp_edc_model:
# Lengthscale
rho = pm.HalfCauchy('rho', rho_val)
eta = pm.HalfCauchy('eta', 25)
M = pm.gp.mean.Linear(coeffs=1/Ts[0])
K = (eta**2) * pm.gp.cov.ExpQuad(1, rho)
sigma = pm.Normal('sigma', 1)
recruit_gp = pm.gp.Marginal(mean_func=M, cov_func=K)
recruit_gp.marginal_likelihood(
'edc', X=times.reshape(-1, 1),
y=10*np.log10(np.abs(edc)),
noise=sigma)
# %%
with gp_edc_model:
trace = pm.sample(1000, tune=1000, init='advi', cores=1)
# %%
az.plot_trace(trace, var_names=['rho', 'eta', 'sigma'])
# %%
with gp_edc_model:
fit = pm.fit(20000)
trace = fit.sample(draws=1000)
# %%
az.plot_trace(trace, var_names=['rho', 'eta', 'sigma'])
# %%
times_pred = np.linspace(0, times[-1], 3*times.shape[-1])
dt = np.abs(np.diff(times_pred)[0])
with gp_edc_model:
edc_pred = recruit_gp.conditional("edc_pred", times_pred.reshape(-1, 1))
gp_edc_samples = pm.sample_posterior_predictive(
trace, var_names=['edc_pred'], samples=500)
# %%
from pymc3.gp.util import plot_gp_dist
fig, ax = plt.subplots(figsize=(8, 6))
plot_gp_dist(ax, gp_edc_samples['edc_pred'], times_pred)
plt.plot(times, 10*np.log10(edc))
ax.plot(
times_pred,
np.mean(gp_edc_samples['edc_pred'], axis=0),
label='mean', color='green', linestyle='--')
# salmon_data.plot.scatter(x='spawners', y='recruits', c='k', s=50, ax=ax)
# ax.set_ylim(0, 350);
# %%
fig, ax = plt.subplots(figsize=(8, 6))
plot_gp_dist(ax, gp_edc_samples['edc_pred'], times_pred)
ax.plot(
times_pred,
np.mean(gp_edc_samples['edc_pred'], axis=0),
label='mean', color='green', linestyle='--')
# ax.set_ylim(0, 350);
# np.mean(gp_edc_samples['edc_pred'], axis=0) - 10*np.log10(np.abs(edc))
# %%
# fig, ax = plt.subplots(figsize=(8, 6))
# plt.plot(np.gradient(np.mean(gp_edc_samples['edc_pred'], axis=0), np.diff(times_pred)[0], edge_order=2))
# plt.plot(np.gradient(np.mean(gp_edc_samples['edc_pred'], axis=0)))
# %%
grad = np.gradient(np.mean(gp_edc_samples['edc_pred'], axis=0), dt, edge_order=1)
# %%
plt.plot(times_pred, np.abs(1/grad*60))
ax = plt.gca()
ax.set_ylim(0, 5)
# %%
| StarcoderdataPython |
9626934 | def histogram(s):
d = dict()
for c in s:
d[c] = d.get(c,0) + 1
return d
if __name__ == '__main__':
print histogram('supercalifrigilisticexpialidocious')
| StarcoderdataPython |
195923 | <gh_stars>1000+
# Owner(s): ["oncall: fx"]
import torch
import torch.fx.experimental.fx_acc.acc_ops as acc_ops
from caffe2.torch.fb.fx2trt.tests.test_utils import AccTestCase, InputTensorSpec
class TestBatchNormConverter(AccTestCase):
def test_batchnorm(self):
class TestModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.bn = torch.nn.BatchNorm2d(3)
def forward(self, x):
return self.bn(x)
inputs = [torch.randn(1, 3, 224, 224)]
self.run_test(TestModule(), inputs, expected_ops={acc_ops.batch_norm})
def test_batchnorm_with_dynamic_shape(self):
class TestModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.bn = torch.nn.BatchNorm2d(3)
def forward(self, x):
return self.bn(x)
input_specs = [
InputTensorSpec(
shape=(-1, 3, -1, -1),
dtype=torch.float32,
shape_ranges=[((1, 3, 1, 1), (1, 3, 5, 5), (2, 3, 10, 10))],
),
]
self.run_test_with_dynamic_shape(
TestModule(), input_specs, expected_ops={acc_ops.batch_norm}
)
| StarcoderdataPython |
8165367 | <reponame>thebigmunch/google-music-scripts<filename>src/google_music_scripts/cli.py
import argparse
import math
import re
import warnings
from pathlib import Path
from attr import attrib, attrs
from audio_metadata import AudioMetadataWarning
from loguru import logger
from tbm_utils import (
Namespace,
SubcommandHelpFormatter,
UsageHelpFormatter,
create_parser_dry_run,
create_parser_filter_dates,
create_parser_local,
create_parser_logging,
create_parser_meta,
create_parser_yes,
custom_path,
datetime_string_to_time_period,
get_defaults,
merge_defaults,
parse_args
)
from .__about__ import __title__, __version__
from .commands import (
do_delete,
do_download,
do_quota,
do_search,
do_upload,
)
from .config import configure_logging, read_config_file
COMMAND_ALIASES = {
'del': 'delete',
'delete': 'del',
'down': 'delete',
'download': 'down',
'up': 'upload',
'upload': 'up'
}
COMMAND_KEYS = {
'del',
'delete',
'down',
'download',
'quota',
'search',
'up',
'upload',
}
FILTER_RE = re.compile(r'(([+-]+)?(.*?)\[(.*?)\])', re.I)
@attrs(slots=True, frozen=True)
class FilterCondition:
oper = attrib(converter=lambda o: '+' if o == '' else o)
field = attrib()
pattern = attrib()
def parse_filter(value):
conditions = FILTER_RE.findall(value)
if not conditions:
raise ValueError(f"'{value}' is not a valid filter.")
filter_ = [
FilterCondition(*condition[1:])
for condition in conditions
]
return filter_
def split_album_art_paths(value):
paths = value
if value:
paths = []
if not isinstance(value, list):
value = value.split(',')
for val in value:
paths.append(custom_path(val.strip()))
return paths
########
# Meta #
########
meta = create_parser_meta(__title__, __version__)
##########
# Action #
##########
dry_run = create_parser_dry_run()
yes = create_parser_yes()
###########
# Logging #
###########
logging_ = create_parser_logging()
##################
# Identification #
##################
ident = argparse.ArgumentParser(
argument_default=argparse.SUPPRESS,
add_help=False)
ident_options = ident.add_argument_group("Identification")
ident_options.add_argument(
'-u', '--username',
metavar='USER',
help=(
"Your Google username or e-mail address.\n"
"Used to separate saved credentials."
)
)
# Mobile Client
mc_ident = argparse.ArgumentParser(
argument_default=argparse.SUPPRESS,
add_help=False
)
mc_ident_options = mc_ident.add_argument_group("Identification")
mc_ident_options.add_argument(
'--device-id',
metavar='ID',
help="A mobile device id."
)
# Music Manager
mm_ident = argparse.ArgumentParser(
argument_default=argparse.SUPPRESS,
add_help=False
)
mm_ident_options = mm_ident.add_argument_group("Identification")
mm_ident_options.add_argument(
'--uploader-id',
metavar='ID',
help=(
"A unique id given as a MAC address (e.g. '00:11:22:33:AA:BB').\n"
"This should only be provided when the default does not work."
)
)
#########
# Local #
#########
local = create_parser_local()
##########
# Filter #
##########
# Metadata
filter_metadata = argparse.ArgumentParser(
argument_default=argparse.SUPPRESS,
add_help=False
)
metadata_options = filter_metadata.add_argument_group("Filter")
metadata_options.add_argument(
'-f', '--filter',
metavar='FILTER',
action='append',
dest='filters',
type=parse_filter,
help=(
"Metadata filters.\n"
"Can be specified multiple times."
)
)
# Dates
filter_dates = create_parser_filter_dates()
###############
# Upload Misc #
###############
upload_misc = argparse.ArgumentParser(
argument_default=argparse.SUPPRESS,
add_help=False
)
upload_misc_options = upload_misc.add_argument_group("Misc")
upload_misc_options.add_argument(
'--delete-on-success',
action='store_true',
help="Delete successfully uploaded local files."
)
upload_misc_options.add_argument(
'--no-sample',
action='store_true',
help=(
"Don't create audio sample with ffmpeg/avconv.\n"
"Send empty audio sample."
)
)
upload_misc_options.add_argument(
'--album-art',
metavar='ART_PATHS',
type=split_album_art_paths,
help=(
"Comma-separated list of album art filepaths.\n"
"Can be relative filenames and/or absolute filepaths."
)
)
########
# Sync #
########
sync = argparse.ArgumentParser(
argument_default=argparse.SUPPRESS,
add_help=False
)
sync_options = sync.add_argument_group("Sync")
sync_options.add_argument(
'--use-hash',
action='store_true',
help="Use audio hash to sync songs."
)
sync_options.add_argument(
'--no-use-hash',
action='store_true',
help="Don't use audio hash to sync songs."
)
sync_options.add_argument(
'--use-metadata',
action='store_true',
help="Use metadata to sync songs."
)
sync_options.add_argument(
'--no-use-metadata',
action='store_true',
help="Don't use metadata to sync songs."
)
##########
# Output #
##########
output = argparse.ArgumentParser(
argument_default=argparse.SUPPRESS,
add_help=False
)
output_options = output.add_argument_group("Output")
output_options.add_argument(
'-o', '--output',
metavar='TEMPLATE_PATH',
type=lambda t: str(custom_path(t)),
help="Output file or directory name which can include template patterns."
)
###########
# Include #
###########
include = argparse.ArgumentParser(
argument_default=argparse.SUPPRESS,
add_help=False
)
include_options = include.add_argument_group("Include")
include_options.add_argument(
'include',
metavar='PATH',
type=lambda p: custom_path(p).resolve(),
nargs='*',
help="Local paths to include songs from."
)
#######
# gms #
#######
gms = argparse.ArgumentParser(
prog='gms',
description="A collection of scripts to interact with Google Music.",
usage=argparse.SUPPRESS,
parents=[meta],
formatter_class=SubcommandHelpFormatter,
add_help=False
)
subcommands = gms.add_subparsers(
title="Commands",
dest='_command',
metavar="<command>"
)
##########
# Delete #
##########
delete_command = subcommands.add_parser(
'delete',
aliases=['del'],
description="Delete song(s) from Google Music.",
help="Delete song(s) from Google Music.",
formatter_class=UsageHelpFormatter,
usage="gms delete [OPTIONS]",
parents=[
meta,
dry_run,
yes,
logging_,
ident,
mc_ident,
filter_metadata,
filter_dates,
],
add_help=False
)
delete_command.set_defaults(func=do_delete)
############
# Download #
############
download_command = subcommands.add_parser(
'download',
aliases=['down'],
description="Download song(s) from Google Music.",
help="Download song(s) from Google Music.",
formatter_class=UsageHelpFormatter,
usage="gms download [OPTIONS]",
parents=[
meta,
dry_run,
logging_,
ident,
mm_ident,
mc_ident,
local,
filter_metadata,
filter_dates,
sync,
output,
include,
],
add_help=False
)
download_command.set_defaults(func=do_download)
#########
# Quota #
#########
quota_command = subcommands.add_parser(
'quota',
description="Get the uploaded song count and allowance.",
help="Get the uploaded song count and allowance.",
formatter_class=UsageHelpFormatter,
usage="gms quota [OPTIONS]",
parents=[
meta,
logging_,
ident,
mm_ident,
],
add_help=False
)
quota_command.set_defaults(func=do_quota)
##########
# Search #
##########
search_command = subcommands.add_parser(
'search',
description="Search a Google Music library for songs.",
help="Search for Google Music library songs.",
formatter_class=UsageHelpFormatter,
usage="gms search [OPTIONS]",
parents=[
meta,
yes,
logging_,
mc_ident,
filter_metadata,
],
add_help=False
)
search_command.set_defaults(func=do_search)
##########
# Upload #
##########
upload_command = subcommands.add_parser(
'upload',
aliases=['up'],
description="Upload song(s) to Google Music.",
help="Upload song(s) to Google Music.",
formatter_class=UsageHelpFormatter,
usage="gms upload [OPTIONS] [INCLUDE_PATH]...",
parents=[
meta,
dry_run,
logging_,
ident,
mm_ident,
mc_ident,
local,
filter_metadata,
filter_dates,
upload_misc,
sync,
include,
],
add_help=False
)
upload_command.set_defaults(func=do_upload)
def check_args(args):
if all(
option in args
for option in ['use_hash', 'no_use_hash']
):
raise ValueError(
"Use one of --use-hash/--no-use-hash', not both."
)
if all(
option in args
for option in ['use_metadata', 'no_use_metadata']
):
raise ValueError(
"Use one of --use-metadata/--no-use-metadata', not both."
)
def default_args(args):
defaults = Namespace()
# Set defaults.
defaults.verbose = 0
defaults.quiet = 0
defaults.debug = False
defaults.dry_run = False
defaults.username = ''
defaults.filters = []
if 'no_log_to_stdout' in args:
defaults.log_to_stdout = False
defaults.no_log_to_stdout = True
else:
defaults.log_to_stdout = True
defaults.no_log_to_stdout = False
if 'log_to_file' in args:
defaults.log_to_file = True
defaults.no_log_to_file = False
else:
defaults.log_to_file = False
defaults.no_log_to_file = True
if args._command in ['down', 'download', 'up', 'upload']:
defaults.uploader_id = None
defaults.device_id = None
elif args._command in ['quota']:
defaults.uploader_id = None
else:
defaults.device_id = None
if args._command in ['down', 'download', 'up', 'upload']:
defaults.no_recursion = False
defaults.max_depth = math.inf
defaults.exclude_paths = []
defaults.exclude_regexes = []
defaults.exclude_globs = []
if 'no_use_hash' in args:
defaults.use_hash = False
defaults.no_use_hash = True
else:
defaults.use_hash = True
defaults.no_use_hash = False
if 'no_use_metadata' in args:
defaults.use_metadata = False
defaults.no_use_metadata = True
else:
defaults.use_metadata = True
defaults.no_use_metadata = False
if args._command in ['down', 'download']:
defaults.output = str(Path('.').resolve())
defaults.include = []
elif args._command in ['up', 'upload']:
defaults.include = [custom_path('.').resolve()]
defaults.delete_on_success = False
defaults.no_sample = False
defaults.album_art = None
if args._command in ['del', 'delete', 'search']:
defaults.yes = False
config_defaults = get_defaults(
args._command,
read_config_file(
username=args.get('username')
),
command_keys=COMMAND_KEYS,
command_aliases=COMMAND_ALIASES
)
for k, v in config_defaults.items():
if k == 'album_art':
defaults.album_art = split_album_art_paths(v)
elif k == 'filters':
defaults.filters = [
parse_filter(filter_)
for filter_ in v
]
elif k == 'max_depth':
defaults.max_depth = int(v)
elif k == 'output':
defaults.output = str(custom_path(v))
elif k == 'include':
defaults.include = [
custom_path(val)
for val in v
]
elif k in [
'log_to_stdout',
'log_to_file',
'use_hash',
'use_metadata',
]:
defaults[k] = v
defaults[f"no_{k}"] = not v
elif k in [
'no_log_to_stdout',
'no_log_to_file',
'no_use_hash',
'no_use_metadata',
]:
defaults[k] = v
defaults[k.replace('no_', '')] = not v
elif k.startswith(('created', 'modified')):
if k.endswith('in'):
defaults[k] = datetime_string_to_time_period(v, in_=True)
elif k.endswith('on'):
defaults[k] = datetime_string_to_time_period(v, on=True)
elif k.endswith('before'):
defaults[k] = datetime_string_to_time_period(v, before=True)
elif k.endswith('after'):
defaults[k] = datetime_string_to_time_period(v, after=True)
else:
defaults[k] = v
return defaults
def run():
warnings.simplefilter(
'ignore',
category=AudioMetadataWarning,
)
try:
parsed = parse_args(gms)
if parsed._command is None:
gms.parse_args(['-h'])
check_args(parsed)
defaults = default_args(parsed)
args = merge_defaults(defaults, parsed)
if args.get('no_recursion'):
args.max_depth = 0
configure_logging(
args.verbose - args.quiet,
username=args.username,
debug=args.debug,
log_to_stdout=args.log_to_stdout,
log_to_file=args.log_to_file
)
args.func(args)
logger.log('NORMAL', "All done!")
except KeyboardInterrupt:
gms.exit(130, "\nInterrupted by user")
| StarcoderdataPython |
1615217 | import warnings
import pytest
from django.test import TestCase
TestCase.pytestmark = pytest.mark.django_db(transaction=True, reset_sequences=True)
@pytest.fixture(autouse=True)
def suppress_warnings():
warnings.simplefilter("error", Warning)
warnings.filterwarnings(
"ignore",
"name used for saved screenshot does not match file type",
UserWarning)
| StarcoderdataPython |
6493546 | <gh_stars>0
from django.contrib.syndication.views import Feed
from academicPhylogeny.models import PhD
class PhDFeed(Feed):
title = "PhD feed"
link = "/feed/"
description = "RSS listing of bioanth PhDs"
def items(self):
return PhD.objects.all()
def item_title(self, item):
return item
def item_description(self, item):
return "%s received a PhD from %s in %s." % (item, item.school, item.year)
def item_link(self, item):
return "https://www.bioanthtree.org/detail/%s" %(item.URL_for_detail,) | StarcoderdataPython |
3245209 | from pyspark.sql import SparkSession
spark=SparkSession.builder.master("local").appName("SparkandOracledbTest").getOrCreate()
from datetime import datetime
from pyspark.sql.functions import lit
print("Start Reading Data from CSV")
df = spark.read.csv("test.csv", header=True, inferSchema=True)
print("Printing the Data from CSV .... ")
df.show()
print("Printing Schema of Data")
df.printSchema()
print("Adding TimeStamp")
df1 = df1.withColumn("Now", lit(str(datetime.now().strftime("%d-%m-%Y"))))
df1.show()
print("Amending the Oralce Table ... ")
df1.write.format("jdbc").option("driver","oracle.jdbc.driver.OracleDriver")\
.option("url","jdbc:oracle:thin:@127.0.0.1:1521/XE")\
.option("user","simran")\
.option("password","<PASSWORD>").mode("append").option("dbtable","TEMP_DEP").save()
print("Reading again .... ")
df2=spark.read.format("jdbc").option("url","jdbc:oracle:thin:@127.0.0.1:1521/XE")\
.option("dbtable","TEMP_DEP")\
.option("user","simran")\
.option("password","<PASSWORD>")\
.option("driver","oracle.jdbc.driver.OracleDriver")\
.load()
df2.show() | StarcoderdataPython |
1833784 | <filename>integ_identification_deep/train.py
import importlib
try:
importlib.reload(pre_processing)
except:
pass
from pre_processing import pre_process
import numpy as np
import matplotlib.pyplot as plt
from sklearn.neural_network import MLPRegressor
from sklearn.model_selection import train_test_split
import time
# OPTION ROBOTIQUE 2021-2022 PROJET INTEG -IDENTIFICATION- N.FRAPPEREAU & J.DELACOUX
# In this file you'll find our training algorithm
# The data has already been "collected" and extracted
# All that's left to do is to choose a model architecture and fit it to
# our datasets.
# This function lists the files in the /dataset folder, reads them,
# extracts the data from them and put it in a np.array
# When this is done for a .bag file, we close it and go to the next one
# in the /dataset folder, until there aren't any .bag file left
start_time = time.time()
# ~ Preprocess the existing data
# ~ It could be interesting to save this pre-processed data somewhere
# ~ and load it when this script is executed
data = pre_process()
print(f'Size of the dataset : {data.shape}')
# ~ Defines the input of our system. As explained in the
# ~ pre_processing.py script, the first 8 columns of the data file
# ~ represent the (theta1, omega1, effort1, acceleration1, theta2,
# ~ omega2, effort2, acceleration2) which are what we feed our network
X = data[:,:8]
# ~ Defines the desired output of our system. We try to predict the
# ~ parameter values independently in our first approach. It is quite
# ~ frankly bound to fail as these parameters aren't observable on their
# ~ own, and only linear combinations of them are.
# ~ Nevertheless, in this first approach, this is what we do.
Y_I1 = data[:,8]
Y_I2 = data[:,9]
Y_m1= data[:,10]
Y_m2 = data[:,11]
Y_com1 = data[:,12]
Y_com2 = data[:,13]
# ~ Split the input data into train and test data then fit our model to
# ~ best evaluate the parameters.
x_train, x_test, y_train_I1, y_test_I1 = train_test_split(X, Y_I1, random_state=16)
# ~ We use the MLP regressor with its default parameters. It could be
# ~ interesting to change the model and or it's parameters
# ~ in order to get better results
inertia1_reg = MLPRegressor()
# ~ Fit our model to best evaluate the parameters.
inertia1_reg.fit(x_train, y_train_I1)
# ~ The .score() function returns the r² of the regression :
# ~ closer to 1.0 ==> better
print(f'R² of the Inertia1 estimator : {inertia1_reg.score(x_test, y_test_I1)}')
# ~ We do that for every parameter we want to estimate
x_train, x_test, y_train_I2, y_test_I2 = train_test_split(X, Y_I2, random_state=16)
inertia2_reg = MLPRegressor()
inertia2_reg.fit(x_train, y_train_I2)
print(f'R² of the Inertia2 estimator : {inertia2_reg.score(x_test, y_test_I2)}')
x_train, x_test, y_train_m1, y_test_m1 = train_test_split(X, Y_m1, random_state=16)
mass1_reg = MLPRegressor()
mass1_reg.fit(x_train, y_train_m1)
print(f'R² of the mass1 estimator : {mass1_reg.score(x_test, y_test_m1)}')
x_train, x_test, y_train_m2, y_test_m2 = train_test_split(X, Y_m2, random_state=16)
mass2_reg = MLPRegressor()
mass2_reg.fit(x_train, y_train_m2)
print(f'R² of the mass2 estimator : {mass2_reg.score(x_test, y_test_m2)}')
x_train, x_test, y_train_com1, y_test_com1 = train_test_split(X, Y_com1, random_state=16)
com1_reg = MLPRegressor()
com1_reg.fit(x_train, y_train_com1)
print(f'R² of the center_of_mass1 estimator : {com1_reg.score(x_test, y_test_com1)}')
x_train, x_test, y_train_com2, y_test_com2 = train_test_split(X, Y_com2, random_state=16)
com2_reg = MLPRegressor()
com2_reg.fit(x_train, y_train_com2)
print(f'R² of the center_of_mass1 estimator : {com2_reg.score(x_test, y_test_com2)}')
print("--- Global training and evaluation time : %s seconds ---" % (time.time() - start_time))
| StarcoderdataPython |
4829217 | <gh_stars>1-10
import unittest
import solution
class TestQ(unittest.TestCase):
def test_case_0(self):
self.assertEqual(solution.nimbleGame([0, 2, 3, 0, 6]), 'First')
self.assertEqual(solution.nimbleGame([0, 0, 0, 0]), 'Second')
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
8103010 | <reponame>suprajasridhara/scion
# Copyright 2014 ETH Zurich
# Copyright 2018 ETH Zurich, Anapaya Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`supervisor` --- SCION topology supervisor generator
=============================================
"""
# Stdlib
import configparser
import os
from io import StringIO
# SCION
from python.lib.util import write_file
from python.topology.common import (
ArgsTopoDicts,
BR_CONFIG_NAME,
COMMON_DIR,
CS_CONFIG_NAME,
DISP_CONFIG_NAME,
SD_CONFIG_NAME,
)
SUPERVISOR_CONF = 'supervisord.conf'
class SupervisorGenArgs(ArgsTopoDicts):
pass
class SupervisorGenerator(object):
def __init__(self, args):
"""
:param SupervisorGenArgs args: Contains the passed command line arguments and topo dicts.
"""
self.args = args
def generate(self):
self._write_dispatcher_conf()
for topo_id, topo in self.args.topo_dicts.items():
base = topo_id.base_dir(self.args.output_dir)
entries = self._as_conf(topo, base)
self._write_as_conf(topo_id, entries)
def _as_conf(self, topo, base):
entries = []
entries.extend(self._br_entries(topo, "bin/border", base))
entries.extend(self._control_service_entries(topo, base))
return entries
def _br_entries(self, topo, cmd, base):
entries = []
for k, v in topo.get("border_routers", {}).items():
conf = os.path.join(base, k, BR_CONFIG_NAME)
entries.append((k, [cmd, "--config", conf]))
return entries
def _control_service_entries(self, topo, base):
entries = []
for k, v in topo.get("control_service", {}).items():
# only a single control service instance per AS is currently supported
if k.endswith("-1"):
conf = os.path.join(base, k, CS_CONFIG_NAME)
entries.append((k, ["bin/cs", "--config", conf]))
return entries
def _sciond_entry(self, name, conf_dir):
return self._common_entry(
name, ["bin/sciond", "--config", os.path.join(conf_dir, SD_CONFIG_NAME)])
def _write_as_conf(self, topo_id, entries):
config = configparser.ConfigParser(interpolation=None)
names = []
base = topo_id.base_dir(self.args.output_dir)
for elem, entry in sorted(entries, key=lambda x: x[0]):
names.append(elem)
elem_dir = os.path.join(base, elem)
self._write_elem_conf(elem, entry, elem_dir, topo_id)
sd_name = "sd%s" % topo_id.file_fmt()
names.append(sd_name)
conf_dir = os.path.join(base, COMMON_DIR)
config["program:%s" % sd_name] = self._sciond_entry(sd_name, conf_dir)
config["group:as%s" % topo_id.file_fmt()] = {
"programs": ",".join(names)}
text = StringIO()
config.write(text)
conf_path = os.path.join(topo_id.base_dir(
self.args.output_dir), SUPERVISOR_CONF)
write_file(conf_path, text.getvalue())
def _write_elem_conf(self, elem, entry, elem_dir, topo_id=None):
config = configparser.ConfigParser(interpolation=None)
prog = self._common_entry(elem, entry, elem_dir)
if elem.startswith("br"):
prog['environment'] += ',GODEBUG="cgocheck=0"'
config["program:%s" % elem] = prog
text = StringIO()
config.write(text)
write_file(os.path.join(elem_dir, SUPERVISOR_CONF), text.getvalue())
def _write_dispatcher_conf(self):
elem = "dispatcher"
elem_dir = os.path.join(self.args.output_dir, elem)
config_file_path = os.path.join(elem_dir, DISP_CONFIG_NAME)
self._write_elem_conf(
elem, ["bin/dispatcher", "--config", config_file_path], elem_dir)
def _common_entry(self, name, cmd_args, elem_dir=None):
entry = {
'autostart': 'false',
'autorestart': 'false',
'environment': 'TZ=UTC',
'stdout_logfile': "NONE",
'stderr_logfile': "NONE",
'startretries': 0,
'startsecs': 5,
'priority': 100,
'command': self._mk_cmd(name, cmd_args),
}
if name == "dispatcher":
entry['startsecs'] = 1
entry['priority'] = 50
return entry
def _mk_cmd(self, name, cmd_args):
return "bash -c 'exec %s &>logs/%s.log'" % (
" ".join(['"%s"' % arg for arg in cmd_args]), name)
| StarcoderdataPython |
11330102 | <filename>work_division.py
#Copyright (C) 2013, <NAME>
#Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import time
from time import strftime, clock
import os
import sys
import glob
import math
import random
import copy
import multiprocessing
def write_logs(logs, log_file):
file_out = file(log_file, 'a')
file_out.writelines(logs)
file_out.close()
def add_log_time_now(message, start_time):
global logs
logs.append(str(strftime("%a %b %d %H:%M:%S %Y")) + ',' + message + ',' + str(time.time() - start_time) + '\n' )
def read_movielens_items(input_path):
item_indexes = list()
item_files_ordered = ("movies.dat", "tags.dat")
items_seen = set()
for item_file in item_files_ordered:
file_in = file(input_path + '/' + item_file, 'r')
lines = file_in.readlines()
file_in.close()
begin_time = time.time()
for line in lines:
line_arr = line.split('::')
if item_file == 'movies.dat':
item = int(line_arr[0])
else:
item = int(line_arr[1])
if item not in items_seen:
item_indexes.append(item)
items_seen.add(item)
lines = []
add_log_time_now('work_division,serial,-1', begin_time)
lines = []
return item_indexes
def get_item_skips(item, part_of_users, ratings_reverse, ratings_loads):
item_skips = 0
try:
users_who_rated_item = part_of_users.intersection(ratings_reverse[item])
except:
#not a single user has rated this item
return 0
for user in users_who_rated_item:
item_skips += ratings_loads[user]
return item_skips
def read_ratings(ratings_file):
global logs
file_in = file(ratings_file, 'r')
lines = file_in.readlines()
file_in.close()
begin_time = time.time()
#var init
ratings_lines = dict() #user => string (of the rating lines)
ratings_loads = dict() #user => int (number of ratings)
ratings_reverse = dict() #item => set (users who rated item)
#parse the rating lines
for line in lines:
#line=> uid::itemid::rating::timestamp
line_arr = line.split('::')
user = int(line_arr[0])
item = int(line_arr[1])
try:
ratings_lines[user].append(line)
ratings_loads[user] += 1
except:
ratings_lines[user] = [line]
ratings_loads[user] = 1
#reverse ratings
try:
ratings_reverse[item].add(user)
except:
ratings_reverse[item] = set([user])
lines = [] #allow memory to be freed faster?
add_log_time_now('work_division,serial,-1', begin_time)
return ratings_lines, ratings_loads, ratings_reverse
def get_minimum_node(indexes_loads):
minimum = float('inf') #infinity
minimum_node = -1
for node in indexes_loads:
the_load = indexes_loads[node]
#this is the poorest
if the_load == 0:
return node
if the_load < minimum:
minimum = the_load
minimum_node = node
return minimum_node
def get_max_min_node(indexes_loads):
max = -1
max_node = -1
min = float('inf') #infinity
min_node = -1
for node in indexes_loads:
node_load = indexes_loads[node]
if node_load < min:
min = node_load
min_node = node
if node_load > max:
max = node_load
max_node = node
return max_node, min_node
def random_split(the_indexes, number_of_parts):
random.shuffle(the_indexes)
indexes = dict()
for node in range(number_of_parts):
indexes[node] = list()
#randomly divide across the parts
for item in the_indexes:
random_node = random.randint(0,number_of_parts-1)
indexes[random_node].append(item)
return indexes
def robin_hood_split(item_indexes, item_load, number_of_parts, diff_goal=500, max_iterations=500000):
#sort items according load
item_indexes_sorted = list()
for item in item_indexes:
item_indexes_sorted.append((item_load[item],item))
item_indexes_sorted.sort(reverse=True)
#initialize vars
iterations = 0
minimum_indexes = dict()
indexes = dict()
indexes_loads = dict()
for node in range(number_of_parts):
indexes[node] = list()
indexes_loads[node] = 0
log_them_all = ''
#Divide the items over the poor
for couple in item_indexes_sorted:
the_load = couple[0]
the_item = couple[1]
node = get_minimum_node(indexes_loads)
indexes[node].append(the_item)
indexes_loads[node] += the_load
minimum_indexes = copy.deepcopy(indexes)
#calculate the difference of this situation
vals = indexes_loads.values()
diff = max(vals) - min(vals)
min_diff = diff
#Redistribute work in 'robin hood mode'
while diff > diff_goal and iterations < max_iterations:
#select the richest and poorest node
rich, poor = get_max_min_node(indexes_loads)
#pick something from the rich ...
something = indexes[rich].pop(random.randint(0,len(indexes[rich])-1))
#... and give it to the poor
indexes[poor].append(something)
#correct the load counters
some_load = item_load[something]
indexes_loads[rich] -= some_load
indexes_loads[poor] += some_load
vals = indexes_loads.values()
diff = max(vals) - min(vals)
if diff <= min_diff:
#store minimum results
min_diff = diff
#minimum_indexes = copy.deepcopy(indexes)
iterations += 1
log_them_all += str(iterations) + ',' + str(min_diff) + '\n'
return minimum_indexes
def calc_item_load(item_indexes, part_of_users, ratings_reverse, ratings_loads):
num_ratings = sum([ratings_loads[user] for user in part_of_users])
item_load = dict()
for item in item_indexes:
item_load[item] = num_ratings - get_item_skips(item, part_of_users, ratings_reverse, ratings_loads)
return item_load
def write_user_division(user_division, ratings_lines, output_path, base_file_name):
for chunk in user_division:
lines = list()
for user in user_division[chunk]:
lines += ratings_lines[user]
file_out = file(output_path + '/' + base_file_name + '_' + str(chunk), 'w')
file_out.writelines(lines)
file_out.close()
def write_item_division(item_division, output_path, base_file_name, userjob):
for chunk in item_division:
lines = list()
for item in item_division[chunk]:
lines.append(str(item) + '\n')
file_out = file(output_path + '/' + base_file_name + '_'+ str(userjob) + '_' + str(chunk), 'w')
file_out.writelines(lines)
file_out.close()
def make_all_as_one(ratings_loads):
for user in ratings_loads:
ratings_loads[user] = 1
return ratings_loads
def parallel_item_division(userjob):
if recommender_split_item_data == 'byiteration':
item_load = calc_item_load(item_indexes, set(user_division[userjob]), ratings_reverse, ratings_loads)
item_division = robin_hood_split(item_indexes, item_load, number_of_item_jobs, robin_hood_min_diff, robin_hood_max_iterations)
elif recommender_split_item_data == 'byitem':
item_load = dict()
for item in item_indexes:
item_load[item] = 1
random.shuffle(item_indexes)
item_division = robin_hood_split(item_indexes, item_load, number_of_item_jobs, 2, 1000000)
elif recommender_split_item_data == 'random':
item_division = random_split(item_indexes, number_of_item_jobs)
write_item_division(item_division, output_path, 'item_indexes.dat', userjob)
if __name__ == "__main__":
input_path = str(sys.argv[1])
output_path = str(sys.argv[2])
number_of_user_jobs = int(sys.argv[3])
number_of_item_jobs = int(sys.argv[4])
number_of_cores = int(sys.argv[5])
log_file = str(sys.argv[6])
recommender_split_user_data = 'byrating' #can also be 'byuser' or 'random'
recommender_split_item_data = 'byiteration' #can also be 'byitem' or 'random'
if len(sys.argv) > 8:
robin_hood_min_diff = int(sys.argv[7])
robin_hood_max_iterations = int(sys.argv[8])
else: #default values for backwards compatibility
robin_hood_min_diff = 5
robin_hood_max_iterations = 10000000
#list containing log messages
logs = list()
#----------------------
# User ratings division
#----------------------
#logging inside this function (to split out network activity time)
ratings_lines, ratings_loads, ratings_reverse = read_ratings(input_path + '/ratings.dat')
begin_time = time.time()
if recommender_split_user_data == 'byrating':
user_division = robin_hood_split(ratings_lines.keys(), ratings_loads, number_of_user_jobs, 1, 500000)
elif recommender_split_user_data == 'byuser':
ratings_loads = make_all_as_one(ratings_loads)
user_keys = ratings_lines.keys()
random.shuffle(user_keys)
user_division = robin_hood_split(user_keys, ratings_loads, number_of_user_jobs, 1, 500000)
elif recommender_split_user_data == 'random':
user_division = random_split(ratings_lines.keys(), number_of_user_jobs)
add_log_time_now('work_division,serial,-1', begin_time)
write_user_division(user_division, ratings_lines, output_path, 'ratings.dat')
#cleanup no longer needed vars (maybe free memory?)
ratings_lines = []
#-------------------
# Item data division
#-------------------
#logging inside this function (to split out network activity time)
item_indexes = read_movielens_items(input_path)
begin_time = time.time()
#
#parallel way
#
#pool = multiprocessing.Pool(number_of_cores)
#pool.map(parallel_item_division, user_division, 1)
#pool.close()
#
#serial way
#
for userjob in user_division:
parallel_item_division(userjob)
add_log_time_now('work_division,serial,-1', begin_time)
write_logs(logs, log_file) | StarcoderdataPython |
1790 | <filename>qstklearn/1knn.py<gh_stars>100-1000
'''
(c) 2011, 2012 Georgia Tech Research Corporation
This source code is released under the New BSD license. Please see
http://wiki.quantsoftware.org/index.php?title=QSTK_License
for license details.
Created on Feb 20, 2011
@author: <NAME>
@organization: Georgia Institute of Technology
@contact: <EMAIL>
@summary: This is an implementation of the 1-KNN algorithm for ranking features quickly.
It uses the knn implementation.
@status: oneKNN functions correctly, optimized to use n^2/2 algorithm.
'''
import matplotlib.pyplot as plt
from pylab import gca
import itertools
import string
import numpy as np
import math
import knn
from time import clock
'''
@summary: Query function for 1KNN, return value is a double between 0 and 1.
@param naData: A 2D numpy array. Each row is a data point with the final column containing the classification.
'''
def oneKnn( naData ):
if naData.ndim != 2:
raise Exception( "Data should have two dimensions" )
lLen = naData.shape[0]
''' # of dimensions, subtract one for classification '''
lDim = naData.shape[1] - 1
''' Start best distances as very large '''
ldDistances = [1E300] * lLen
llIndexes = [-1] * lLen
dDistance = 0.0;
''' Loop through finding closest neighbors '''
for i in range( lLen ):
for j in range( i+1, lLen ):
dDistance = 0.0
for k in range( 0, lDim ):
dDistance += (naData[i][k] - naData[j][k])**2
dDistance = math.sqrt( dDistance )
''' Two distances to check, for i's best, and j's best '''
if dDistance < ldDistances[i]:
ldDistances[i] = dDistance
llIndexes[i] = j
if dDistance < ldDistances[j]:
ldDistances[j] = dDistance
llIndexes[j] = i
lCount = 0
''' Now count # of matching pairs '''
for i in range( lLen ):
if naData[i][-1] == naData[ llIndexes[i] ][-1]:
lCount = lCount + 1
return float(lCount) / lLen
''' Test function to plot results '''
def _plotResults( naDist1, naDist2, lfOneKnn, lf5Knn ):
plt.clf()
plt.subplot(311)
plt.scatter( naDist1[:,0], naDist1[:,1] )
plt.scatter( naDist2[:,0], naDist2[:,1], color='r' )
#plt.ylabel( 'Feature 2' )
#plt.xlabel( 'Feature 1' )
#gca().annotate( '', xy=( .8, 0 ), xytext=( -.3 , 0 ), arrowprops=dict(facecolor='red', shrink=0.05) )
gca().annotate( '', xy=( .7, 0 ), xytext=( 1.5 , 0 ), arrowprops=dict(facecolor='black', shrink=0.05) )
plt.title( 'Data Distribution' )
plt.subplot(312)
plt.plot( range( len(lfOneKnn) ), lfOneKnn )
plt.ylabel( '1-KNN Value' )
#plt.xlabel( 'Distribution Merge' )
plt.title( '1-KNN Performance' )
plt.subplot(313)
plt.plot( range( len(lf5Knn) ), lf5Knn )
plt.ylabel( '% Correct Classification' )
#plt.xlabel( 'Distribution Merge' )
plt.title( '5-KNN Performance' )
plt.subplots_adjust()
plt.show()
''' Function to plot 2 distributions '''
def _plotDist( naDist1, naDist2, i ):
plt.clf()
plt.scatter( naDist1[:,0], naDist1[:,1] )
plt.scatter( naDist2[:,0], naDist2[:,1], color='r' )
plt.ylabel( 'Feature 2' )
plt.xlabel( 'Feature 1' )
plt.title( 'Iteration ' + str(i) )
plt.show()
''' Function to test KNN performance '''
def _knnResult( naData ):
''' Split up data into training/testing '''
lSplit = naData.shape[0] * .7
naTrain = naData[:lSplit, :]
naTest = naData[lSplit:, :]
knn.addEvidence( naTrain.astype(float), 1 );
''' Query with last column omitted and 5 nearest neighbors '''
naResults = knn.query( naTest[:,:-1], 5, 'mode')
''' Count returns which are correct '''
lCount = 0
for i, dVal in enumerate(naResults):
if dVal == naTest[i,-1]:
lCount = lCount + 1
dResult = float(lCount) / naResults.size
return dResult
''' Tests performance of 1-KNN '''
def _test1():
''' Generate three random samples to show the value of 1-KNN compared to 5KNN learner performance '''
for i in range(3):
''' Select one of three distributions '''
if i == 0:
naTest1 = np.random.normal( loc=[0,0],scale=.25,size=[500,2] )
naTest1 = np.hstack( (naTest1, np.zeros(500).reshape(-1,1) ) )
naTest2 = np.random.normal( loc=[1.5,0],scale=.25,size=[500,2] )
naTest2 = np.hstack( (naTest2, np.ones(500).reshape(-1,1) ) )
elif i == 1:
naTest1 = np.random.normal( loc=[0,0],scale=.25,size=[500,2] )
naTest1 = np.hstack( (naTest1, np.zeros(500).reshape(-1,1) ) )
naTest2 = np.random.normal( loc=[1.5,0],scale=.1,size=[500,2] )
naTest2 = np.hstack( (naTest2, np.ones(500).reshape(-1,1) ) )
else:
naTest1 = np.random.normal( loc=[0,0],scale=.25,size=[500,2] )
naTest1 = np.hstack( (naTest1, np.zeros(500).reshape(-1,1) ) )
naTest2 = np.random.normal( loc=[1.5,0],scale=.25,size=[250,2] )
naTest2 = np.hstack( (naTest2, np.ones(250).reshape(-1,1) ) )
naOrig = np.vstack( (naTest1, naTest2) )
naBoth = np.vstack( (naTest1, naTest2) )
''' Keep track of runtimes '''
t = clock()
cOneRuntime = t-t;
cKnnRuntime = t-t;
lfResults = []
lfKnnResults = []
for i in range( 15 ):
#_plotDist( naTest1, naBoth[100:,:], i )
t = clock()
lfResults.append( oneKnn( naBoth ) )
cOneRuntime = cOneRuntime + (clock() - t)
t = clock()
lfKnnResults.append( _knnResult( np.random.permutation(naBoth) ) )
cKnnRuntime = cKnnRuntime + (clock() - t)
naBoth[500:,0] = naBoth[500:,0] - .1
print 'Runtime OneKnn:', cOneRuntime
print 'Runtime 5-KNN:', cKnnRuntime
_plotResults( naTest1, naTest2, lfResults, lfKnnResults )
''' Tests performance of 1-KNN '''
def _test2():
''' Generate three random samples to show the value of 1-KNN compared to 5KNN learner performance '''
np.random.seed( 12345 )
''' Create 5 distributions for each of the 5 attributes '''
dist1 = np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 )
dist2 = np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 )
dist3 = np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 )
dist4 = np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 )
dist5 = np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 )
lDists = [ dist1, dist2, dist3, dist4, dist5 ]
''' All features used except for distribution 4 '''
distY = np.sin( dist1 ) + np.sin( dist2 ) + np.sin( dist3 ) + np.sin( dist5 )
distY = distY.reshape( -1, 1 )
for i, fVal in enumerate( distY ):
if fVal >= 0:
distY[i] = 1
else:
distY[i] = 0
for i in range( 1, 6 ):
lsNames = []
lf1Vals = []
lfVals = []
for perm in itertools.combinations( '12345', i ):
''' set test distribution to first element '''
naTest = lDists[ int(perm[0]) - 1 ]
sPerm = perm[0]
''' stack other distributions on '''
for j in range( 1, len(perm) ):
sPerm = sPerm + str(perm[j])
naTest = np.hstack( (naTest, lDists[ int(perm[j]) - 1 ] ) )
''' finally stack y values '''
naTest = np.hstack( (naTest, distY) )
lf1Vals.append( oneKnn( naTest ) )
lfVals.append( _knnResult( np.random.permutation(naTest) ) )
lsNames.append( sPerm )
''' Plot results '''
plt1 = plt.bar( np.arange(len(lf1Vals)), lf1Vals, .2, color='r' )
plt2 = plt.bar( np.arange(len(lfVals)) + 0.2, lfVals, .2, color='b' )
plt.legend( (plt1[0], plt2[0]), ('1-KNN', 'KNN, K=5') )
plt.ylabel('1-KNN Value/KNN Classification')
plt.xlabel('Feature Set')
plt.title('Combinations of ' + str(i) + ' Features')
plt.ylim( (0,1) )
if len(lf1Vals) < 2:
plt.xlim( (-1,1) )
gca().xaxis.set_ticks( np.arange(len(lf1Vals)) + .2 )
gca().xaxis.set_ticklabels( lsNames )
plt.show()
if __name__ == '__main__':
_test1()
#_test2()
| StarcoderdataPython |
6455626 | import logging.config
import os
import platform
import pip
from subprocess import call
logging.config.fileConfig('logging.conf')
logr = logging.getLogger('pylog')
def main():
logr.info('start')
try:
print_sys_info()
do_pip_update()
except Exception:
logr.exception('Exception')
logr.info('stop')
def print_sys_info():
logr.info('login|hostname|os|python : {0}|{1}|{2}|{3}.'.format(os.getlogin(), platform.node() , platform.system() + '-' + platform.release() , platform.python_version()))
def do_pip_update():
for dist in pip.get_installed_distributions():
call("pip install --upgrade " + dist.project_name, shell=True)
if __name__ == '__main__':
main()
| StarcoderdataPython |
8020784 | <reponame>datalad/datalad-registry
import re
import time
from unittest.mock import patch
import pytest
from datalad_registry.tests.utils import create_and_register_repos, register_dataset
def test_overview_pager(client, tmp_path):
create_and_register_repos(client, tmp_path, 5)
r_overview = client.get("/overview/")
assert b"previous" not in r_overview.data
assert b"next" not in r_overview.data
with patch("datalad_registry.overview._PAGE_NITEMS", 2):
r_overview_pg1 = client.get("/overview/")
assert b"previous" not in r_overview_pg1.data
assert b"next" in r_overview_pg1.data
assert r_overview_pg1.data == client.get("/overview/?page=1").data
r_overview_pg2 = client.get("/overview/?page=2")
assert b"previous" in r_overview_pg2.data
assert b"next" in r_overview_pg2.data
r_overview_pg3 = client.get("/overview/?page=3")
assert b"previous" in r_overview_pg3.data
assert b"next" not in r_overview_pg3.data
@pytest.mark.slow
def test_overview_sort(client, tmp_path):
import datalad.api as dl
from datalad_registry import tasks
for name in ["ds1", "ds2", "ds3"]:
ds = dl.Dataset(tmp_path / name).create()
if name == "ds1":
repo = ds.repo
repo.tag("v1", message="Version 2")
repo.call_git(["commit", "--allow-empty", "-mc1"])
else:
(ds.pathobj / "foo").write_text("foo")
if name == "ds2":
(ds.pathobj / "bar").write_text("bar")
ds.save()
url = "file:///" + ds.path
register_dataset(ds, url, client)
tasks.collect_dataset_info()
time.sleep(0.01)
def assert_ds_order(order, output):
match = re.finditer(b"/(ds[123])</td>", output)
assert match, "regexp unexpectedly didn't match"
assert [x.group(1) for x in match] == order
# By default, most recently updated comes first.
r_default = client.get("/overview/")
assert_ds_order([b"ds3", b"ds2", b"ds1"], r_default.data)
assert r_default.data == client.get("/overview/?sort=update-desc").data
r_update_asc = client.get("/overview/?sort=update-asc")
assert_ds_order([b"ds1", b"ds2", b"ds3"], r_update_asc.data)
r_keys_asc = client.get("/overview/?sort=keys-asc")
assert_ds_order([b"ds1", b"ds3", b"ds2"], r_keys_asc.data)
r_keys_desc = client.get("/overview/?sort=keys-desc")
assert_ds_order([b"ds2", b"ds3", b"ds1"], r_keys_desc.data)
r_url_desc = client.get("/overview/?sort=url-desc")
assert_ds_order([b"ds3", b"ds2", b"ds1"], r_url_desc.data)
r_url_asc = client.get("/overview/?sort=url-asc")
assert_ds_order([b"ds1", b"ds2", b"ds3"], r_url_asc.data)
# Unknown falls back to default.
assert r_default.data == client.get("/overview/?sort=unknown").data
@pytest.mark.slow
def test_overview_filter(client, tmp_path):
import datalad.api as dl
from datalad_registry import tasks
for name in ["foo", "foobar", "baz"]:
ds = dl.Dataset(tmp_path / name).create()
url = "file:///" + ds.path
register_dataset(ds, url, client)
tasks.collect_dataset_info()
r_no_filter = client.get("/overview/")
for name in [b"foo", b"foobar", b"baz"]:
assert name in r_no_filter.data
r_ba_filter = client.get("/overview/?filter=ba")
for name in [b"foobar", b"baz"]:
assert name in r_ba_filter.data
assert b"foo</td>" not in r_ba_filter.data
r_foo_filter = client.get("/overview/?filter=foo")
for name in [b"foo", b"foobar"]:
assert name in r_foo_filter.data
assert b"baz" not in r_foo_filter.data
| StarcoderdataPython |
1603 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import atexit
import os
import shutil
import signal
import subprocess
import sys
import tempfile
import threading
import grpc
from apache_beam.portability.api import beam_job_api_pb2_grpc
from apache_beam.runners.portability import local_job_service
from apache_beam.utils import subprocess_server
from apache_beam.version import __version__ as beam_version
class JobServer(object):
def start(self):
"""Starts this JobServer, returning a grpc service to which to submit jobs.
"""
raise NotImplementedError(type(self))
def stop(self):
"""Stops this job server."""
raise NotImplementedError(type(self))
class ExternalJobServer(JobServer):
def __init__(self, endpoint, timeout=None):
self._endpoint = endpoint
self._timeout = timeout
def start(self):
channel = grpc.insecure_channel(self._endpoint)
grpc.channel_ready_future(channel).result(timeout=self._timeout)
return beam_job_api_pb2_grpc.JobServiceStub(channel)
def stop(self):
pass
class EmbeddedJobServer(JobServer):
def start(self):
return local_job_service.LocalJobServicer()
def stop(self):
pass
class StopOnExitJobServer(JobServer):
"""Wraps a JobServer such that its stop will automatically be called on exit.
"""
def __init__(self, job_server):
self._lock = threading.Lock()
self._job_server = job_server
self._started = False
def start(self):
with self._lock:
if not self._started:
self._endpoint = self._job_server.start()
self._started = True
atexit.register(self.stop)
signal.signal(signal.SIGINT, self.stop)
return self._endpoint
def stop(self):
with self._lock:
if self._started:
self._job_server.stop()
self._started = False
class SubprocessJobServer(JobServer):
"""An abstract base class for JobServers run as an external process."""
def __init__(self):
self._local_temp_root = None
self._server = None
def subprocess_cmd_and_endpoint(self):
raise NotImplementedError(type(self))
def start(self):
if self._server is None:
self._local_temp_root = tempfile.mkdtemp(prefix='beam-temp')
cmd, endpoint = self.subprocess_cmd_and_endpoint()
port = int(endpoint.split(':')[-1])
self._server = subprocess_server.SubprocessServer(
beam_job_api_pb2_grpc.JobServiceStub, cmd, port=port)
return self._server.start()
def stop(self):
if self._local_temp_root:
shutil.rmtree(self._local_temp_root)
self._local_temp_root = None
return self._server.stop()
def local_temp_dir(self, **kwargs):
return tempfile.mkdtemp(dir=self._local_temp_root, **kwargs)
class JavaJarJobServer(SubprocessJobServer):
MAVEN_REPOSITORY = 'https://repo.maven.apache.org/maven2/org/apache/beam'
JAR_CACHE = os.path.expanduser("~/.apache_beam/cache")
def java_arguments(self, job_port, artifacts_dir):
raise NotImplementedError(type(self))
def path_to_jar(self):
raise NotImplementedError(type(self))
@staticmethod
def path_to_beam_jar(gradle_target):
return subprocess_server.JavaJarServer.path_to_beam_jar(gradle_target)
@staticmethod
def local_jar(url):
return subprocess_server.JavaJarServer.local_jar(url)
def subprocess_cmd_and_endpoint(self):
jar_path = self.local_jar(self.path_to_jar())
artifacts_dir = self.local_temp_dir(prefix='artifacts')
job_port, = subprocess_server.pick_port(None)
return (
['java', '-jar', jar_path] + list(
self.java_arguments(job_port, artifacts_dir)),
'localhost:%s' % job_port)
class DockerizedJobServer(SubprocessJobServer):
"""
Spins up the JobServer in a docker container for local execution.
"""
def __init__(self, job_host="localhost",
job_port=None,
artifact_port=None,
expansion_port=None,
harness_port_range=(8100, 8200),
max_connection_retries=5):
super(DockerizedJobServer, self).__init__()
self.job_host = job_host
self.job_port = job_port
self.expansion_port = expansion_port
self.artifact_port = artifact_port
self.harness_port_range = harness_port_range
self.max_connection_retries = max_connection_retries
def subprocess_cmd_and_endpoint(self):
# TODO This is hardcoded to Flink at the moment but should be changed
job_server_image_name = os.environ['USER'] + \
"-docker-apache.bintray.io/beam/flink-job-server:latest"
docker_path = subprocess.check_output(
['which', 'docker']).strip().decode('utf-8')
cmd = ["docker", "run",
# We mount the docker binary and socket to be able to spin up
# "sibling" containers for the SDK harness.
"-v", ':'.join([docker_path, "/bin/docker"]),
"-v", "/var/run/docker.sock:/var/run/docker.sock"]
self.job_port, self.artifact_port, self.expansion_port = (
subprocess_server.pick_port(
self.job_port, self.artifact_port, self.expansion_port))
args = ['--job-host', self.job_host,
'--job-port', str(self.job_port),
'--artifact-port', str(self.artifact_port),
'--expansion-port', str(self.expansion_port)]
if sys.platform == "darwin":
# Docker-for-Mac doesn't support host networking, so we need to explictly
# publish ports from the Docker container to be able to connect to it.
# Also, all other containers need to be aware that they run Docker-on-Mac
# to connect against the internal Docker-for-Mac address.
cmd += ["-e", "DOCKER_MAC_CONTAINER=1"]
cmd += ["-p", "{}:{}".format(self.job_port, self.job_port)]
cmd += ["-p", "{}:{}".format(self.artifact_port, self.artifact_port)]
cmd += ["-p", "{}:{}".format(self.expansion_port, self.expansion_port)]
cmd += ["-p", "{0}-{1}:{0}-{1}".format(
self.harness_port_range[0], self.harness_port_range[1])]
else:
# This shouldn't be set for MacOS because it detroys port forwardings,
# even though host networking is not supported on MacOS.
cmd.append("--network=host")
cmd.append(job_server_image_name)
return cmd + args, '%s:%s' % (self.job_host, self.job_port)
| StarcoderdataPython |
4841941 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="python-bufflog",
version="0.1.3",
author="<NAME>",
author_email="<EMAIL>",
description="Python logger for Buffer services",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/bufferapp/python-bufflog",
install_requires=["structlog"],
packages=setuptools.find_packages(),
)
| StarcoderdataPython |
8039542 | <reponame>TomVethaak/qiskit-metal
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# modified by Chalmers/SK 20210611 to add DesignFlipChip
"""
=================================================
Designs (:mod:`qiskit_metal.designs`)
=================================================
.. currentmodule:: qiskit_metal.designs
Module containing all Qiskit Metal designs.
.. _qdesign:
QDesign
---------------
.. autosummary::
:toctree: ../stubs/
QDesign
DesignPlanar
---------------
.. autosummary::
:toctree: ../stubs/
DesignPlanar
DesignFlipChip
---------------
.. autosummary::
:toctree: ../stubs/
DesignFlipChip
QNet
---------------
.. autosummary::
:toctree: ../stubs/
QNet
InterfaceComponents
-------------------
.. autosummary::
:toctree: ../stubs/
Components
"""
from .. import Dict
from .. import is_design
from .design_base import QDesign
from .design_planar import DesignPlanar
from .design_flipchip import DesignFlipChip
from .net_info import QNet
from .interface_components import Components
| StarcoderdataPython |
377101 | <gh_stars>0
bin1=input("what kind of fruit do you want?")
if bin1=="apples":
print("apples in bin 1")
elif bin1=="orenges":
print("orenges in bin 2")
elif bin1=="bananas":
print("bananas in bin 3")
else:
print("Error! I dont recognize this fruit!")
| StarcoderdataPython |
1895337 | import hashlib
import json
from itertools import chain
import logging
from typing import List, Optional, Dict, Any, Union
import time
from enum import Enum
import uuid
import boto3 # type: ignore
from bugout.data import BugoutSearchResults, BugoutSearchResult
from bugout.journal import SearchOrder
from ens.utils import is_valid_ens_name # type: ignore
from eth_utils.address import is_address # type: ignore
from moonstreamdb.models import (
EthereumLabel,
)
from sqlalchemy import text
from sqlalchemy.orm import Session
from web3._utils.validation import validate_abi
from .middleware import MoonstreamHTTPException
from . import data
from .reporter import reporter
from .middleware import MoonstreamHTTPException
from .settings import ETHERSCAN_SMARTCONTRACTS_BUCKET
from bugout.data import BugoutResource
from .settings import (
MOONSTREAM_APPLICATION_ID,
bugout_client as bc,
BUGOUT_REQUEST_TIMEOUT_SECONDS,
MOONSTREAM_ADMIN_ACCESS_TOKEN,
MOONSTREAM_DATA_JOURNAL_ID,
MOONSTREAM_S3_SMARTCONTRACTS_ABI_BUCKET,
MOONSTREAM_S3_SMARTCONTRACTS_ABI_PREFIX,
MOONSTREAM_MOONWORM_TASKS_JOURNAL,
)
from web3 import Web3
logger = logging.getLogger(__name__)
blockchain_by_subscription_id = {
"ethereum_blockchain": "ethereum",
"polygon_blockchain": "polygon",
"ethereum_smartcontract": "ethereum",
"polygon_smartcontract": "polygon",
}
class StatusAPIException(Exception):
"""
Raised during checking Moonstream API statuses.
"""
class LabelNames(Enum):
ETHERSCAN_SMARTCONTRACT = "etherscan_smartcontract"
COINMARKETCAP_TOKEN = "coinmarketcap_token"
ERC721 = "erc721"
def get_contract_source_info(
db_session: Session, contract_address: str
) -> Optional[data.EthereumSmartContractSourceInfo]:
label = (
db_session.query(EthereumLabel)
.filter(EthereumLabel.address == contract_address)
.filter(EthereumLabel.label == LabelNames.ETHERSCAN_SMARTCONTRACT.value)
.one_or_none()
)
if label is None:
return None
object_uri = label.label_data["object_uri"]
key = object_uri.split("s3://etherscan-smart-contracts/")[1]
s3 = boto3.client("s3")
bucket = ETHERSCAN_SMARTCONTRACTS_BUCKET
try:
raw_obj = s3.get_object(Bucket=bucket, Key=key)
obj_data = json.loads(raw_obj["Body"].read().decode("utf-8"))["data"]
contract_source_info = data.EthereumSmartContractSourceInfo(
name=obj_data["ContractName"],
source_code=obj_data["SourceCode"],
compiler_version=obj_data["CompilerVersion"],
abi=obj_data["ABI"],
)
return contract_source_info
except Exception as e:
logger.error(f"Failed to load smart contract {object_uri}")
reporter.error_report(e)
return None
def get_ens_name(web3: Web3, address: str) -> Optional[str]:
try:
checksum_address = web3.toChecksumAddress(address)
except:
raise ValueError(f"{address} is invalid ethereum address is passed")
try:
ens_name = web3.ens.name(checksum_address)
return ens_name
except Exception as e:
reporter.error_report(e, ["web3", "ens"])
logger.error(
f"Cannot get ens name for address {checksum_address}. Probably node is down"
)
raise e
def get_ens_address(web3: Web3, name: str) -> Optional[str]:
if not is_valid_ens_name(name):
raise ValueError(f"{name} is not valid ens name")
try:
ens_checksum_address = web3.ens.address(name)
if ens_checksum_address is not None:
ordinary_address = ens_checksum_address.lower()
return ordinary_address
return None
except Exception as e:
reporter.error_report(e, ["web3", "ens"])
logger.error(f"Cannot get ens address for name {name}. Probably node is down")
raise e
def get_ethereum_address_info(
db_session: Session, web3: Web3, address: str
) -> Optional[data.EthereumAddressInfo]:
if not is_address(address):
raise ValueError(f"Invalid ethereum address : {address}")
address_info = data.EthereumAddressInfo(address=address)
try:
address_info.ens_name = get_ens_name(web3, address)
except:
pass
etherscan_address_url = f"https://etherscan.io/address/{address}"
etherscan_token_url = f"https://etherscan.io/token/{address}"
blockchain_com_url = f"https://www.blockchain.com/eth/address/{address}"
coinmarketcap_label: Optional[EthereumLabel] = (
db_session.query(EthereumLabel)
.filter(EthereumLabel.address == address)
.filter(EthereumLabel.label == LabelNames.COINMARKETCAP_TOKEN.value)
.order_by(text("created_at desc"))
.limit(1)
.one_or_none()
)
if coinmarketcap_label is not None:
address_info.token = data.EthereumTokenDetails(
name=coinmarketcap_label.label_data["name"],
symbol=coinmarketcap_label.label_data["symbol"],
external_url=[
coinmarketcap_label.label_data["coinmarketcap_url"],
etherscan_token_url,
blockchain_com_url,
],
)
# Checking for smart contract
etherscan_label: Optional[EthereumLabel] = (
db_session.query(EthereumLabel)
.filter(EthereumLabel.address == address)
.filter(EthereumLabel.label == LabelNames.ETHERSCAN_SMARTCONTRACT.value)
.order_by(text("created_at desc"))
.limit(1)
.one_or_none()
)
if etherscan_label is not None:
address_info.smart_contract = data.EthereumSmartContractDetails(
name=etherscan_label.label_data["name"],
external_url=[etherscan_address_url, blockchain_com_url],
)
# Checking for NFT
# Checking for smart contract
erc721_label: Optional[EthereumLabel] = (
db_session.query(EthereumLabel)
.filter(EthereumLabel.address == address)
.filter(EthereumLabel.label == LabelNames.ERC721.value)
.order_by(text("created_at desc"))
.limit(1)
.one_or_none()
)
if erc721_label is not None:
address_info.nft = data.EthereumNFTDetails(
name=erc721_label.label_data.get("name"),
symbol=erc721_label.label_data.get("symbol"),
total_supply=erc721_label.label_data.get("totalSupply"),
external_url=[etherscan_token_url, blockchain_com_url],
)
return address_info
def get_address_labels(
db_session: Session, start: int, limit: int, addresses: Optional[str] = None
) -> data.AddressListLabelsResponse:
"""
Attach labels to addresses.
"""
if addresses is not None:
addresses_list = addresses.split(",")
addresses_obj = addresses_list[start : start + limit]
else:
addresses_obj = []
addresses_response = data.AddressListLabelsResponse(addresses=[])
for address in addresses_obj:
labels_obj = (
db_session.query(EthereumLabel)
.filter(EthereumLabel.address == address)
.all()
)
addresses_response.addresses.append(
data.AddressLabelsResponse(
address=address,
labels=[
data.AddressLabelResponse(
label=label.label, label_data=label.label_data
)
for label in labels_obj
],
)
)
return addresses_response
def create_onboarding_resource(
token: uuid.UUID,
resource_data: Dict[str, Any] = {
"type": data.USER_ONBOARDING_STATE,
"steps": {
"welcome": 0,
"subscriptions": 0,
"stream": 0,
},
"is_complete": False,
},
) -> BugoutResource:
resource = bc.create_resource(
token=token,
application_id=MOONSTREAM_APPLICATION_ID,
resource_data=resource_data,
timeout=BUGOUT_REQUEST_TIMEOUT_SECONDS,
)
return resource
def check_api_status():
crawl_types_timestamp: Dict[str, Any] = {
"ethereum_txpool": None,
"ethereum_trending": None,
}
for crawl_type in crawl_types_timestamp.keys():
try:
search_results: BugoutSearchResults = bc.search(
token=MOONSTREAM_ADMIN_ACCESS_TOKEN,
journal_id=MOONSTREAM_DATA_JOURNAL_ID,
query=f"tag:crawl_type:{crawl_type}",
limit=1,
content=False,
timeout=10.0,
order=SearchOrder.DESCENDING,
)
if len(search_results.results) == 1:
crawl_types_timestamp[crawl_type] = search_results.results[0].created_at
except Exception:
raise StatusAPIException(
f"Unable to get status for crawler with type: {crawl_type}"
)
return crawl_types_timestamp
def json_type(evm_type: str) -> type:
if evm_type.startswith(("uint", "int")):
return int
elif evm_type.startswith("bytes") or evm_type == "string" or evm_type == "address":
return str
elif evm_type == "bool":
return bool
else:
raise ValueError(f"Cannot convert to python type {evm_type}")
def dashboards_abi_validation(
dashboard_subscription: data.DashboardMeta,
abi: Any,
s3_path: str,
):
"""
Validate current dashboard subscription : https://github.com/bugout-dev/moonstream/issues/345#issuecomment-953052444
with contract abi on S3
"""
# maybe its over but not found beter way
abi_functions = {
item["name"]: {inputs["name"]: inputs["type"] for inputs in item["inputs"]}
for item in abi
if item["type"] == "function"
}
if not dashboard_subscription.all_methods:
for method in dashboard_subscription.methods:
if method["name"] not in abi_functions:
# Method not exists
logger.error(
f"Error on dashboard resource validation method:{method['name']}"
f" of subscription: {dashboard_subscription.subscription_id}"
f"does not exists in Abi {s3_path}"
)
raise MoonstreamHTTPException(status_code=400)
if method.get("filters") and isinstance(method["filters"], dict):
for input_argument_name, value in method["filters"].items():
if input_argument_name not in abi_functions[method["name"]]:
# Argument not exists
logger.error(
f"Error on dashboard resource validation type argument: {input_argument_name} of method:{method['name']} "
f" of subscription: {dashboard_subscription.subscription_id} has incorrect"
f"does not exists in Abi {s3_path}"
)
raise MoonstreamHTTPException(status_code=400)
if not isinstance(
value,
json_type(abi_functions[method["name"]][input_argument_name]),
):
# Argument has incorrect type
logger.error(
f"Error on dashboard resource validation type argument: {input_argument_name} of method:{method['name']} "
f" of subscription: {dashboard_subscription.subscription_id} has incorrect type {type(value)}"
f" when {abi_functions[method['name']][input_argument_name]} required."
)
raise MoonstreamHTTPException(status_code=400)
abi_events = {
item["name"]: {inputs["name"]: inputs["type"] for inputs in item["inputs"]}
for item in abi
if item["type"] == "event"
}
if not dashboard_subscription.all_events:
for event in dashboard_subscription.events:
if event["name"] not in abi_events:
logger.error(
f"Error on dashboard resource validation event:{event['name']}"
f" of subscription: {dashboard_subscription.subscription_id}"
f"does not exists in Abi {s3_path}"
)
raise MoonstreamHTTPException(status_code=400)
if event.get("filters") and isinstance(event["filters"], dict):
for input_argument_name, value in event["filters"].items():
if input_argument_name not in abi_events[event["name"]]:
# Argument not exists
logger.error(
f"Error on dashboard resource validation type argument: {input_argument_name} of method:{event['name']} "
f" of subscription: {dashboard_subscription.subscription_id} has incorrect"
f"does not exists in Abi {s3_path}"
)
raise MoonstreamHTTPException(status_code=400)
if not isinstance(
value,
json_type(abi_events[event["name"]][input_argument_name]),
):
logger.error(
f"Error on dashboard resource validation type argument: {input_argument_name} of method:{event['name']} "
f" of subscription: {dashboard_subscription.subscription_id} has incorrect type {type(value)}"
f" when {abi_events[event['name']][input_argument_name]} required."
)
raise MoonstreamHTTPException(status_code=400)
return True
def validate_abi_json(abi: Any) -> None:
"""
Transform string to json and run validation
"""
try:
validate_abi(abi)
except ValueError as e:
raise MoonstreamHTTPException(status_code=400, detail=e)
except:
raise MoonstreamHTTPException(
status_code=400, detail="Error on abi valiadation."
)
def upload_abi_to_s3(
resource: BugoutResource,
abi: str,
update: Dict[str, Any],
) -> Dict[str, Any]:
"""
Uploading ABI to s3 bucket. Return object for updating resource.
"""
s3_client = boto3.client("s3")
bucket = MOONSTREAM_S3_SMARTCONTRACTS_ABI_BUCKET
result_bytes = abi.encode("utf-8")
result_key = f"{MOONSTREAM_S3_SMARTCONTRACTS_ABI_PREFIX}/{blockchain_by_subscription_id[resource.resource_data['subscription_type_id']]}/abi/{resource.resource_data['address']}/{resource.id}/abi.json"
s3_client.put_object(
Body=result_bytes,
Bucket=bucket,
Key=result_key,
ContentType="application/json",
Metadata={"Moonstream": "Abi data"},
)
update["abi"] = True
update["bucket"] = MOONSTREAM_S3_SMARTCONTRACTS_ABI_BUCKET
update["s3_path"] = result_key
return update
def get_all_entries_from_search(
journal_id: str, search_query: str, limit: int, token: str
) -> List[BugoutSearchResult]:
"""
Get all required entries from journal using search interface
"""
offset = 0
results: List[BugoutSearchResult] = []
try:
existing_metods = bc.search(
token=token,
journal_id=journal_id,
query=search_query,
content=False,
timeout=10.0,
limit=limit,
offset=offset,
)
results.extend(existing_metods.results)
except Exception as e:
reporter.error_report(e)
if len(results) != existing_metods.total_results:
for offset in range(limit, existing_metods.total_results, limit):
existing_metods = bc.search(
token=token,
journal_id=journal_id,
query=search_query,
content=False,
timeout=10.0,
limit=limit,
offset=offset,
)
results.extend(existing_metods.results)
return results
def apply_moonworm_tasks(
subscription_type: str,
abi: Any,
address: str,
) -> None:
"""
Get list of subscriptions loads abi and apply them as moonworm tasks if it not exist
"""
entries_pack = []
try:
entries = get_all_entries_from_search(
journal_id=MOONSTREAM_MOONWORM_TASKS_JOURNAL,
search_query=f"tag:address:{address} tag:subscription_type:{subscription_type}",
limit=100,
token=MOONSTREAM_ADMIN_ACCESS_TOKEN,
)
existing_tags = [entry.tags for entry in entries]
existing_hashes = [
tag.split(":")[-1]
for tag in chain(*existing_tags)
if "abi_method_hash" in tag
]
abi_hashes_dict = {
hashlib.md5(json.dumps(method).encode("utf-8")).hexdigest(): method
for method in abi
if (method["type"] in ("event", "function"))
and (method.get("stateMutability", "") != "view")
}
for hash in abi_hashes_dict:
if hash not in existing_hashes:
entries_pack.append(
{
"title": address,
"content": json.dumps(abi_hashes_dict[hash], indent=4),
"tags": [
f"address:{address}",
f"type:{abi_hashes_dict[hash]['type']}",
f"abi_method_hash:{hash}",
f"subscription_type:{subscription_type}",
f"abi_name:{abi_hashes_dict[hash]['name']}",
f"status:active",
],
}
)
except Exception as e:
reporter.error_report(e)
if len(entries_pack) > 0:
bc.create_entries_pack(
token=MOONSTREAM_ADMIN_ACCESS_TOKEN,
journal_id=MOONSTREAM_MOONWORM_TASKS_JOURNAL,
entries=entries_pack,
timeout=15,
)
| StarcoderdataPython |
5019051 | <reponame>bayeslabs/Deepcan
import numpy as np
import pandas as pd
import torch
from torch import distributions
def nan2zero(x):
return torch.where(torch.isnan(x), torch.zeros_like(x), x)
def nan2inf(x):
return torch.where(torch.isnan(x), torch.zeros_like(x)+np.inf, x)
def _nelem(x):
nelem = torch.sum(torch.tensor(~torch.isnan(x), dtype=torch.float32))
return torch.where((torch.eq(nelem, torch.tensor(0.))), torch.tensor(1.), nelem)
def reduce_mean(x):
nelem = _nelem(x)
x = nan2zero(x)
return torch.sum(x) / nelem
# used the proposed implementation as in
# https://discuss.pytorch.org/t/implementing-truncated-normal-initializer/4778/21
def parameterized_truncated_normal(uniform, mu, sigma, a, b):
normal = distributions.normal.Normal(0, 1)
if sigma == 0:
x = torch.zeros(size=uniform.shape)
x = x + mu
return x
alpha = (a - mu) / sigma
beta = (b - mu) / sigma
alpha_normal_cdf = normal.cdf(alpha)
p = alpha_normal_cdf + (normal.cdf(beta) - alpha_normal_cdf) * uniform
p = p.numpy()
one = np.array(1, dtype=p.dtype)
epsilon = np.array(np.finfo(p.dtype).eps, dtype=p.dtype)
v = np.clip(2 * p - 1, -one + epsilon, one - epsilon)
x = mu + sigma * np.sqrt(2) * torch.erfinv(torch.from_numpy(v))
x = torch.clamp(x, a, b)
return x
def truncated_normal(mean, stddev, shape):
uniform = torch.from_numpy(np.random.normal(loc=mean, scale=stddev, size=shape))
return parameterized_truncated_normal(uniform, mu=mean, sigma=stddev, a=-(2*stddev), b=(2*stddev))
| StarcoderdataPython |
5096731 | <gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 4 13:37:04 2019
@author: paskali
"""
import os, nrrd, json
import numpy as np
import tensorflow as tf
class DataManager():
def __init__(self, train_folder='data/train', val_folder='data/val',
test_folder='data/test', result_folder='data/result',
image_shape=None, patch_size=None, stride_size=None):
"""
Data manager prepares the data for loading in training and testing
function. It can load whole image volume, and load extracted patches.
Parameters
----------
train_folder : str, optional
path to train folder. The default is 'data/train'.
val_folder : str, optional
path to validation folder. The default is 'data/val'.
test_folder : str, optional
path to test folder. The default is 'data/test'.
result_folder : str
DESCRIPTION. The default is 'data/result'
image_shape : tuple
The size of input image.
patch_size : tuple, optional
The size of each patch. The default is None.
stride_size : tuple, optional
The size of the stride. The default is None.
If patch_size and stride_size are define, data manager is ready for
patch extraction. Otherwise, its ready for whole image loading.
Returns
-------
None.
"""
self.train_folder = train_folder
self.val_folder = val_folder
self.test_folder = test_folder
self.result_folder = result_folder
self.train_list = self._read_image_paths(train_folder)
self.val_list = self._read_image_paths(val_folder)
self.test_list = self._read_image_paths(test_folder)
self.image_shape = image_shape
self.patch_size = patch_size
self.stride_size = stride_size
if (patch_size and stride_size):
self.train_size, self.val_size, self.test_size = self._calculate_dataset_size()
self.input_size = patch_size + (1,)
else:
self.train_size, self.val_size, self.test_size = len(self.train_list), len(self.val_list), len(self.test_list)
self.input_size = image_shape + (1,)
def train_generator(self):
while True:
for item in self.train_list:
image = nrrd.read(os.path.join(self.train_folder, 'image', item))[0]
mask = nrrd.read(os.path.join(self.train_folder, 'mask', item))[0]
yield image, mask
def train_patches_generator(self, epochs):
"""
Train generator that loads all images in memory, extract patches.Finally
yields image_patch and mask_patch.
Parameters
----------
epochs : int
number of training epochs.
Yields
------
image_patch : tensor
image patch tensor.
mask_patch : tensor
binary mask patch tensor.
"""
# Loading images
images = []
masks = []
for item in self.train_list:
image = nrrd.read(os.path.join(self.train_folder, 'image', item))[0]
mask = nrrd.read(os.path.join(self.train_folder, 'mask', item))[0]
images.append(image)
masks.append(mask)
# Patches extraction
for _ in range(epochs):
for i in range(len(images)):
print('\nImage', f'[{i+1}/{len(images)}] - Epoch {_+1}/{epochs}')
image = images[i]
mask = masks[i]
print('Shape', image.shape, mask.shape)
for image_patch, mask_patch in self._extract_patches(image, mask, self.patch_size, self.stride_size):
image_patch = tf.convert_to_tensor(image_patch)
mask_patch = tf.convert_to_tensor(mask_patch)
yield image_patch, mask_patch
def _extract_patches(self, image, mask, patch_size, stride):
"""
Patch generator.
Parameters
----------
image : nparray
3D nparray of image.
mask : nparray
3D nparray of binary mask.
patch_size : tuple
The size of each patch.
stride : tuple
The size of the stride.
Yields
------
image_patch : nparray
a patch of original image.
mask_patch : nparray
a matching patch of binary mask.
"""
image_h, image_w, image_d = image.shape
for z in range(0, image_d-patch_size[2]+1, stride[2]):
for y in range(0, image_h-patch_size[1]+1, stride[1]):
for x in range(0, image_w-patch_size[0]+1, stride[0]):
image_patch = np.zeros(patch_size)
image_slice = image[x:x+patch_size[0], y:y+patch_size[1], z:z+patch_size[2]]
image_patch[0:image_slice.shape[0], 0:image_slice.shape[1], 0:image_slice.shape[2]] += image_slice
mask_patch = np.zeros(patch_size)
mask_slice = mask[x:x+patch_size[0], y:y+patch_size[1], z:z+patch_size[2]]
mask_patch[0:image_slice.shape[0], 0:image_slice.shape[1], 0:image_slice.shape[2]] += mask_slice
image_patch = np.reshape(image_patch, image_patch.shape + (1,))
image_patch = np.reshape(image_patch, (1,) + image_patch.shape)
mask_patch = np.reshape(mask_patch, mask_patch.shape + (1,))
mask_patch = np.reshape(mask_patch, (1,) + mask_patch.shape)
yield image_patch, mask_patch
def _count_patches(self):
"""
Count the number of patches extracted from image with specific shape.
Returns
-------
count : int
the number of patches extracted of each image.
"""
patch_size = self.patch_size
stride = self.stride_size
image_h, image_w, image_d = self.image_shape
count = 0
for z in range(0, image_d-patch_size[2]+1, stride[2]):
for y in range(0, image_h-patch_size[1]+1, stride[1]):
for x in range(0, image_w-patch_size[0]+1, stride[0]):
count += 1
return count
def _calculate_dataset_size(self):
"""
Compute the total number of patches for train, validation and test set.
Returns
-------
int
Total number of pathces for train set.
int
Total number of pathces for validation set.
int
Total number of pathces for test set.
"""
patches_nu = self._count_patches()
return len(self.train_list) * patches_nu, len(self.val_list) * patches_nu, len(self.test_list) * patches_nu
def get_augmented_train_size(self, rotate, deform, combo):
# TODO filters should be added in future.
return len(self.train_list) * (rotate+deform+combo)
def _extract_test_patches(self, image, image_title, patch_size, stride):
image_h, image_w, image_d = image.shape
patches = []
patches_info = {}
idx = 0
for z in range(0, image_d-patch_size[2]+1, stride[2]):
for y in range(0, image_h-patch_size[1]+1, stride[1]):
for x in range(0, image_w-patch_size[0]+1, stride[0]):
image_patch = np.zeros(patch_size)
image_slice = image[x:x+patch_size[0], y:y+patch_size[1], z:z+patch_size[2]]
image_patch[0:image_slice.shape[0], 0:image_slice.shape[1], 0:image_slice.shape[2]] += image_slice
patches_info[idx] = x, y, z
patches.append(image_patch)
idx += 1
patches_info['image_res'] = image.shape
patches_info['size'] = patch_size
patches_info['stride'] = stride
patches_info['len'] = idx
with open(f'{self.result_folder}/{image_title}.json', 'w') as file:
json.dump(patches_info, file)
return patches
def val_generator(self):
"""
Validation set generator. Load image and binary mask, then yield
image binary and mask tensor.
Parameters
----------
epochs : int
number of training epochs.
Yields
------
image : tensor
image tensor.
mask : tensor
mask tensor.
"""
# for i in range(epochs):
while True:
for item in self.val_list:
image = nrrd.read(os.path.join(self.val_folder,'image', item))[0]
image = np.reshape(image, image.shape + (1,))
image = np.reshape(image, (1,) + image.shape)
image = tf.convert_to_tensor(image)
mask = nrrd.read(os.path.join(self.val_folder, 'mask', item))[0]
mask = np.reshape(mask, mask.shape + (1,))
mask = np.reshape(mask, (1,) + mask.shape)
mask = tf.convert_to_tensor(mask)
yield (image, mask)
def val_patches_generator(self, epochs):
'''
Validation set generator that extract and yield patches from each validation image.
Parameters
----------
epochs : int
number of training epochs.
Yields
------
image_patch : tensor
image patch tensor.
mask_patch : tensor
binary mask patch tensor.
'''
# Loading images
images = []
masks = []
for item in self.val_list:
image = nrrd.read(os.path.join(self.val_folder, 'image', item))[0]
mask = nrrd.read(os.path.join(self.val_folder, 'mask', item))[0]
images.append(image)
masks.append(mask)
# Patches extraction
for _ in range(epochs):
for i in range(len(images)):
for image, mask in self._extract_patches(images[i], masks[i], self.patch_size, self.stride_size):
image = tf.convert_to_tensor(image)
mask = tf.convert_to_tensor(mask)
yield image, mask
def test_generator(self):
"""
Test set generator. Load only image, then yield
image tensor.
Yields
------
image : tensor
image tensor.
"""
for item in self.test_list:
image = nrrd.read(os.path.join(self.test_folder,'image', item))[0]
image = np.reshape(image, image.shape + (1,))
image = np.reshape(image, (1,) + image.shape)
image = tf.convert_to_tensor(image, dtype=tf.float32)
yield image
def test_patches_generator(self):
"""
Test set generator that extract and yield patches from each image.
Yields
------
patch : tensor
patch tensor.
"""
# Loading images
image_names = []
images = []
for item in self.test_list:
image = nrrd.read(os.path.join(self.test_folder, 'image', item))[0]
image_names.append(item)
images.append(image)
# Patches extraction
for i in range(len(images)):
for patch in self._extract_test_patches(images[i], image_names[i], self.patch_size, self.stride_size):
patch = np.expand_dims(patch, axis=-1)
patch = np.expand_dims(patch, axis=0)
patch = tf.convert_to_tensor(patch, dtype=tf.float32)
yield patch
def _read_image_paths(self, folder):
"""
Read the names of images in [folder]/image/.
Return a list of sorted names.
Parameters
----------
folder : str
path to folder where image folder is located.
Returns
-------
image_path_list : list
list with image names.
"""
image_path_list = os.listdir(os.path.join(folder, 'image'))
image_path_list.sort()
return image_path_list
def _fuse_patches(self, patches, patches_info_json):
'''
Load list of patches as numpy arrays, and info about the target images as json file.
Fuse the patches into target image.
Parameters
----------
patches : nparray
nparray containing all patches.
patches_info_json : json
file containing info for patches, generated automatically when
patches are extracted.
Returns
-------
numpy_array
Fused image.
'''
with open(patches_info_json, 'r') as file:
patch_info = json.load(file)
image_h, image_w, image_d = patch_info['image_res']
patch_size = patch_info['size']
fusion_image = np.zeros((image_h + patch_size[0], image_w + patch_size[1], image_d + patch_size[2]))
fusion_matrix = np.zeros((image_h + patch_size[0], image_w + patch_size[1], image_d + patch_size[2]), dtype=np.uint8)
for i in range(patch_info['len']):
x, y, z = patch_info[str(i)]
patch = patches[i]
assert len(patch.shape) == 3, "The patch has more or less than 3 dimensions."
fusion_image[x:x+patch_size[0], y:y+patch_size[1], z:z+patch_size[2]] += patch
fusion_matrix[x:x+patch_size[0], y:y+patch_size[1], z:z+patch_size[2]] += 1
print(f"\rFusing patches...[{i/patch_info['len']:.2%}]", end='')
print()
fusion_matrix = np.where(fusion_matrix == 0, 1, fusion_matrix)
# Averaging the patches values
fusion_image = fusion_image / fusion_matrix
# Saving fusion matrix used for averaging...
nrrd.write(f'{self.result_folder}/fusion_matrix.nrrd', fusion_matrix)
return fusion_image[:image_h, :image_w, :image_d]
def save_result(self, results):
"""
Transform the results to NRRD and save it in result folder.
Parameters
----------
results : tensor
output of model.predict().
"""
if not os.path.exists(self.result_folder):
os.makedirs(self.result_folder)
print('Result shape: ', results.shape)
for result, name in zip(results, self.test_list):
result = np.reshape(result, result.shape[:3])
nrrd.write(f'{self.result_folder}/{name}', result, header=None)
print(result.shape, f'{self.result_folder}/{name} saved.')
def save_result_patches(self, results):
"""
Fuse patches to whole image, transform the image to NRRD and
save it in result folder.
Parameters
----------
results : tensor
output of model.predict().
"""
if not os.path.exists(self.result_folder):
os.makedirs(self.result_folder)
results = np.squeeze(results, axis=-1)
patches = []
for patch in results:
patches.append(patch)
fused_image = self._fuse_patches(patches, f'{self.result_folder}/{self.test_list[0]}.json')
print("Saving nrrd image...")
nrrd.write(f'{self.result_folder}/{self.test_list[0]}', fused_image)
def get_train_size(self):
#This is not true anymore
return self.train_size
def get_val_size(self):
#This is not true anymore
return self.val_size
def get_test_size(self):
#This is not true anymore
return self.test_size
def get_input_size(self):
return self.input_size | StarcoderdataPython |
1726558 | <reponame>UnDeR-The-mAsK/lab4<filename>PyCharm/individual2.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import math
if __name__ == "__main__":
print("Есть ли среди трёх заданных чисел нечётные?")
a1 = int(input("Первое число: "))
a2 = int(input("Второе число: "))
a3 = int(input("Третье число: "))
if a1 % 2 == 0 or a2 % 2 == 0 or a3 % 2 == 0:
print("Есть")
else:
print("Нет")
exit(1)
| StarcoderdataPython |
6558444 | <gh_stars>0
yesterday_seat_assignments = [
"Moses",
"Ashley",
]
today_seat_assignments = [
"Nick",
"Ashley",
]
for seat in range(0,len(yesterday_seat_assignments)):
if yesterday_seat_assignments[seat] == today_seat_assignments[seat]:
print(f"Hey, {yesterday_seat_assignments[seat]} can't sit here");
| StarcoderdataPython |
8126318 | import pytest
from celery import current_app
from app import app, db
@pytest.fixture
def test_app():
"""Sets up a test app."""
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///:memory:'
app.config['TESTING'] = True
app.config['WTF_CSRF_ENABLED'] = False
app.config['NO_EMAIL'] = True
app.config['SES_DEFAULT_EMAIL_SOURCE'] = '<EMAIL>'
current_app.conf.update(CELERY_ALWAYS_EAGER=True)
with app.app_context():
db.create_all()
yield app
db.drop_all()
@pytest.fixture
def client(test_app):
"""Sets up a test client."""
client = test_app.test_client()
yield client
| StarcoderdataPython |
11385094 | import argparse
import torch
import sys
import os
import json
from collections import defaultdict
import h5py
from sentence_transformers import SentenceTransformer, util
import numpy
import pandas
import tqdm
from itertools import zip_longest
from utils import grouper, load_sentences, load_bnids, load_visualsem_bnids
import torchmetrics
def retrieve_nodes_given_sentences(out_fname, batch_size, mapping_bnids_idxs_to_gloss_idxs, glosses_feats, query_bnids_idxs_for_each_gloss, query_feats, topk, bnids_idxs_for_each_gloss, gloss_bnids, query_languages):
"""
out_fname(str): Output file to write retrieved node ids to.
batch_size(int): Batch size for Sentence BERT.
glosses_feats(numpy.array): Numpy array with VisualSem gloss features (to be used in search) computed with Sentence BERT.
query_bnids_idxs_for_each_gloss(numpy.array(int)): Gloss BNID ids for each gloss in query.
query_feats(numpy.array): Numpy array with VisualSem gloss features (to be used as queries) computed with Sentence BERT.
topk(int): Number of nodes to retrieve for each input sentence.
bnids_idxs_for_each_gloss(numpy.array(int)): Gloss BNID ids for each gloss in searchable nodes, aligned to `glosses_feats`.
"""
if os.path.isfile(out_fname):
raise Exception("File already exists: '%s'. Please remove it manually to avoid tampering."%out_fname)
n_examples = query_feats.shape[0]
print("Number of input/query examples: ", n_examples)
lang_codes = sorted([i.item() for i in numpy.unique(query_languages)])
print("Number of languages: %i"%(len(lang_codes)))
# shape: [n_queries, 1]
# for each query, we store the rank we predicted the correct bnid (ranges from 0/best to number of nodes/worst)
true_predicted_ranks = numpy.empty(shape=(len(query_bnids_idxs_for_each_gloss), 1))
print("true_predicted_ranks.shape: ", true_predicted_ranks.shape)
def first_nonzero_idxs_2dtensor(t):
""" Given a 2D tensor, returns the first non-zero index in each row.
If the input tensor `t` has shape `[n,m]`, the resulting tensor has shape `[n]`.
"""
idx = torch.arange(t.shape[1], 0, -1)
tmp2= t * idx.cuda()
indices = torch.argmax(tmp2, 1, keepdim=True)
return indices
all_idxs = []
with open(out_fname, 'w', encoding='utf8') as fh_out:
ranks_predicted = []
for idxs_ in tqdm.tqdm(grouper(batch_size, range(n_examples)), total=(n_examples//batch_size)):
idxs = []
for i in idxs_:
if not i is None:
idxs.append(i)
all_idxs.extend( idxs )
# run search on CPU to avoid out-of-memory issues
queries_embs = query_feats[ idxs ]
queries_embs = queries_embs.cpu()
glosses_feats = glosses_feats.cpu()
scores = util.pytorch_cos_sim(queries_embs, glosses_feats)
ranks = torch.argsort(scores, descending=True) # sort by cosine similarity (high to low)
scores = scores.cuda()
ranks = ranks.cuda()
# shape: [n_queries, n_glosses]
bnids_idxs_for_each_gloss_pred = bnids_idxs_for_each_gloss[ ranks ]
# query idxs include all queries for valid/test set
# first slice only the queries that apply to the current minibatch
batch_query_bnids_idxs_for_each_gloss = query_bnids_idxs_for_each_gloss[ idxs ]
# shape: [len(idxs), n_nodes]
bnids_idxs_for_each_gloss_ranks_onehot = torch.where(
bnids_idxs_for_each_gloss_pred == batch_query_bnids_idxs_for_each_gloss.unsqueeze(1),
torch.tensor(1, device=torch.device('cuda')),
torch.tensor(0, device=torch.device('cuda'))
)
# shape: [len(idxs), 1]
bnids_idxs_for_each_gloss_ranks_ = first_nonzero_idxs_2dtensor( bnids_idxs_for_each_gloss_ranks_onehot )
true_predicted_ranks[ idxs, : ] = bnids_idxs_for_each_gloss_ranks_.cpu().numpy()
# write retrieval results to output file
for rank_idx in range(len(idxs[:ranks.shape[0]])):
bnids_predicted = []
for rank_predicted in range(topk*10):
bnid_pred = gloss_bnids[ ranks[rank_idx,rank_predicted] ]
bnid_pred_score = scores[rank_idx, ranks[rank_idx, rank_predicted]].item()
if not bnid_pred in bnids_predicted:
bnids_predicted.append((bnid_pred,bnid_pred_score))
if len(bnids_predicted)>=topk:
break
# write top-k predicted BNids, their scores and ranks
for iii, (bnid, score) in enumerate(bnids_predicted[:topk]):
fh_out.write(bnid+"\t"+"%.4f"%score)
if iii < topk-1:
fh_out.write("\t")
else: # iii == topk-1
fh_out.write("\n")
print("Processed %i queries"%len(all_idxs))
#print(true_predicted_ranks.shape)
print("Mean ranks (std): ", true_predicted_ranks.mean(), "(", true_predicted_ranks.std(), ")")
for k in [1,2,3,5,10]:
p_at_k = (true_predicted_ranks[:] < k).sum() / true_predicted_ranks.shape[0]
print("Hits@%i: %.4f"%(k, p_at_k))
for lidx in lang_codes:
print("Language: %i"%lidx)
print("... Mean ranks (std): ", true_predicted_ranks[query_languages==lidx].mean(), "(", true_predicted_ranks[query_languages==lidx].std(), ")")
for k in [1,2,3,5,10]:
p_at_k = (true_predicted_ranks[query_languages==lidx] < k).sum() / true_predicted_ranks[query_languages==lidx].shape[0]
print("... Hits@%i: %.4f"%(k, p_at_k))
if __name__=="__main__":
visualsem_path = os.path.dirname(os.path.realpath(__file__))
visualsem_nodes_path = "%s/dataset/nodes.v2.json"%visualsem_path
visualsem_images_path = "%s/dataset/images/"%visualsem_path
glosses_sentence_bert_path = "%s/dataset/gloss_files/glosses.en.txt.sentencebert.h5"%visualsem_path
glosses_bnids_path = "%s/dataset/gloss_files/glosses.en.txt.bnids"%visualsem_path
os.makedirs("%s/dataset/gloss_files/"%visualsem_path, exist_ok=True)
p = argparse.ArgumentParser()
g = p.add_argument_group()
g.add_argument('--input_valid', action='store_true',
help="""Perform retrieval for the glosses in the validation set. (See paper for reference)""")
g.add_argument('--input_test', action='store_true',
help="""Perform retrieval for the glosses in the test set. (See paper for reference)""")
p.add_argument('--topk', type=int, default=1, help="Retrieve topk nodes for each input sentence.")
p.add_argument('--batch_size', type=int, default=128)
p.add_argument('--visualsem_path', type=str, default=visualsem_path,
help="Path to directory containing VisualSem knowledge graph.")
p.add_argument('--visualsem_nodes_path', type=str, default=visualsem_nodes_path,
help="Path to file containing VisualSem nodes.")
p.add_argument('--visualsem_images_path', type=str, default=visualsem_images_path,
help="Path to directory containing VisualSem images.")
p.add_argument('--glosses_sentence_bert_path', type=str, default=glosses_sentence_bert_path,
help="""HDF5 file containing glosses index computed with Sentence BERT (computed with `extract_glosses_visualsem.py`).""")
p.add_argument('--glosses_bnids_path', type=str, default=glosses_bnids_path,
help="""Text file containing glosses BabelNet ids, one per line (computed with `extract_glosses_visualsem.py`).""")
args = p.parse_args()
assert(torch.cuda.is_available()), "Must have at least one GPU available."
if not args.input_valid and not args.input_test:
p.print_usage()
sys.exit(1)
print(args)
# load all nodes (bnids) in VisualSem
all_bnids = load_visualsem_bnids(args.visualsem_nodes_path, args.visualsem_images_path)
# load all glosses (bnids) in VisualSem
gloss_bnids = load_bnids( args.glosses_bnids_path )
mapping_bnids_strs_to_bnids_idxs = {}
mapping_gloss_idxs_to_bnids_idxs = {}
mapping_bnids_idxs_to_gloss_idxs = defaultdict(list)
# the can be multiple glosses per node/bnid
# create mappings from gloss idxs to node idxs (and bnids) and vice-versa
for idx, bnid in enumerate(all_bnids):
mapping_bnids_strs_to_bnids_idxs[ bnid ] = idx
for idx, bnid_str in enumerate(gloss_bnids):
mapping_gloss_idxs_to_bnids_idxs[ idx ] = mapping_bnids_strs_to_bnids_idxs[ bnid_str ]
mapping_bnids_idxs_to_gloss_idxs[ mapping_bnids_strs_to_bnids_idxs[bnid_str] ].append( idx )
# vector with all gloss idxs (~1M)
gloss_idxs = torch.tensor(list(mapping_gloss_idxs_to_bnids_idxs.keys()), dtype=torch.int32)
# numpy array the same size as `gloss_idxs` but where instead of the gloss idx we directly have the corresponding node idx (bnid idx)
bnids_idxs_for_each_gloss = torch.tensor(list(mapping_gloss_idxs_to_bnids_idxs.values()))
if torch.cuda.is_available():
gloss_idxs = gloss_idxs.cuda()
bnids_idxs_for_each_gloss = bnids_idxs_for_each_gloss.cuda()
with h5py.File(args.glosses_sentence_bert_path, 'r') as fh_glosses:
# load sentence bert features for each gloss
glosses_feats = fh_glosses["features"][:]
glosses_feats = torch.tensor(glosses_feats)
if torch.cuda.is_available():
glosses_feats = glosses_feats.cuda()
# load train/valid/test gloss splits
glosses_splits = fh_glosses["split_idxs"][:]
train_idxs = (glosses_splits==0).nonzero()[0]
train_feats = glosses_feats[train_idxs]
train_bnids_idxs_for_each_gloss = bnids_idxs_for_each_gloss[train_idxs]
# load gloss language splits
language_splits = fh_glosses["language_idxs"][:]
if args.input_valid:
print("Processing validation set glosses ...")
valid_idxs = (glosses_splits==1).nonzero()[0]
valid_feats = glosses_feats[valid_idxs]
valid_languages = language_splits[valid_idxs]
valid_bnids_idxs_for_each_gloss = bnids_idxs_for_each_gloss[ valid_idxs ]
# file names, input/output
input_file = "valid."+ args.glosses_bnids_path.rsplit("/", 1)[-1].replace(".h5", "")
out_fname = os.path.join(args.visualsem_path, 'dataset', input_file+".bnids.retrieved_nodes")
retrieve_nodes_given_sentences(out_fname, args.batch_size, mapping_bnids_idxs_to_gloss_idxs,
train_feats, valid_bnids_idxs_for_each_gloss, valid_feats, args.topk,
train_bnids_idxs_for_each_gloss, gloss_bnids, valid_languages)
print("Retrieved glosses: %s"%out_fname)
if args.input_test:
print("Processing test set glosses ...")
test_idxs = (glosses_splits==2).nonzero()[0]
test_feats = glosses_feats[test_idxs]
test_languages = language_splits[test_idxs]
test_bnids_idxs_for_each_gloss = bnids_idxs_for_each_gloss[ test_idxs ]
# file names, input/output
input_file = "test."+ args.glosses_bnids_path.rsplit("/", 1)[-1].replace(".h5", "")
out_fname = input_file+".bnids.retrieved_nodes"
retrieve_nodes_given_sentences(out_fname, args.batch_size, mapping_bnids_idxs_to_gloss_idxs,
train_feats, test_bnids_idxs_for_each_gloss, test_feats, args.topk,
train_bnids_idxs_for_each_gloss, gloss_bnids, test_languages)
print("Retrieved glosses: %s"%out_fname)
| StarcoderdataPython |
5120634 | # Generated by Django 2.1.1 on 2019-02-18 07:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('adventure', '0052_auto_20190215_2228'),
]
operations = [
migrations.AlterField(
model_name='artifact',
name='armor_class',
field=models.IntegerField(default=0, help_text='(Armor only) How many hits does this armor protect against?', null=True),
),
]
| StarcoderdataPython |
1612187 | from __future__ import annotations
from typing import List
from typing import Union
# TypeAnnotations
Number = Union[int, float]
Values = List[Number]
# Dtypes
uint8 = 'B'
int8 = 'b'
int16 = 'h'
uint16 = 'H'
int32 = 'l'
uint32 = 'L'
int64 = 'q'
uint64 = 'Q'
float32 = 'f'
float64 = 'd'
| StarcoderdataPython |
6576603 | """
Load up rudimentary XLSX file.
worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main"
Note that Excel files have a space-saving device to reuse formula that are identical from
one cell to another within a region. I saw this in the Fibonacci Example.
<c r="A3">
<f>(A2+1)</f>
<v>1</v>
</c>
<c r="A4">
<f t="shared" ref="A4:A8" si="0">(A3+1)</f>
<v>2</v>
</c>
This cell shares the formula (with index si="0") in the region A4:A8, as you can see from
the definition of 'A5' which is 'shared' as marked by the 't' tag, and it is recast to
become (A4+1) in relation to the other one above.
<c r="A5">
<f t="shared" si="0"/>
<v>3</v>
</c>
"""
from xml.dom import minidom
class Cell:
"""Represents a cell in the spreadsheet XML."""
def __init__(self, label, value, formula):
self.label = label
self.value = value
self.formula = formula
def load_xlsx(file):
"""Load up XLS file as rudimentary spreadsheet."""
from zipfile import ZipFile
# Will return entries, where each key is cell and contents is either value or proper formula
entries = {}
shared_formula = {}
def diff(cell, base):
# quick and dirty. Only works for single letters
return (ord(cell[0]) - ord(base[0]), int(cell[1:]) - int(base[1:]))
def adjust_formula(cell, si):
"""
Adjust shared formula for new context, based on the 'base' cell. Note that the reference
is likely also needed for more complicated examples, but I didn't need it for my
Fibonacci example.
"""
from ch06.expression import build_expression, Reference, Value
(ref, base) = shared_formula[si]
(delta_c, delta_r) = diff(cell, base)
base_formula = entries[base]
expr = build_expression(base_formula[1:])
def modify_in_place(node):
"""Hack/quick-and-dirty way to modify EXPR in place."""
if isinstance(node, Value):
return node
if isinstance(node, Reference):
oldref = str(node)
newref = chr(ord(oldref[0]) + delta_c) + str(int(oldref[1:]) + delta_r)
return Reference(newref)
node.left = modify_in_place(node.left)
node.right = modify_in_place(node.right)
return node
# replace each reference with delta
expr = modify_in_place(expr)
return '=' + str(expr)
with ZipFile(file, 'r') as zip_file:
data = zip_file.read('xl/worksheets/sheet1.xml').decode('utf-8')
def get_all_text(node):
"""Grab up all text in children and make it available in one step."""
if node.nodeType == node.TEXT_NODE:
return node.data
text_string = ""
for child_node in node.childNodes:
text_string += get_all_text( child_node )
return text_string
doc = minidom.parseString(data)
access_points = doc.getElementsByTagName('c') # TAG for cell
for acc in access_points:
cell = acc.getAttribute('r')
value = 0
t = None
si = None
ref = None
formula = None
for v in acc.getElementsByTagName('v'): # TAG for value (may be present with formula)
value = get_all_text(v)
for f in acc.getElementsByTagName('f'): # TAG for formula
formula = get_all_text(f)
t = f.getAttribute('t') # ATTRIB tag to declare sharing
ref = f.getAttribute('ref') # ATTRIB region where sharing is scoped [unused]
si = f.getAttribute('si') # ATTRIB for shared index
# Be sure to represent formula signaled by starting '='
if formula:
formula = '=' + formula
if formula or si:
if not si:
# This is a straight formula that is not (yet) shared
entries[cell] = str(formula)
else:
if formula:
entries[cell] = str(formula) # This formula will be shared
shared_formula[si] = (ref, cell) # Remember base reference and cell range of scope
else:
# find formula with reference AND adjust accordingly
entries[cell] = adjust_formula(cell, si)
else:
entries[cell] = str(value)
return entries
| StarcoderdataPython |
105536 | import sys
from typing import Any, Optional, Iterable
from httpie.cookies import HTTPieCookiePolicy
from http import cookiejar # noqa
# Request does not carry the original policy attached to the
# cookie jar, so until it is resolved we change the global cookie
# policy. <https://github.com/psf/requests/issues/5449>
cookiejar.DefaultCookiePolicy = HTTPieCookiePolicy
is_windows = 'win32' in str(sys.platform).lower()
is_frozen = getattr(sys, 'frozen', False)
MIN_SUPPORTED_PY_VERSION = (3, 7)
MAX_SUPPORTED_PY_VERSION = (3, 11)
try:
from functools import cached_property
except ImportError:
# Can be removed once we drop Python <3.8 support.
# Taken from `django.utils.functional.cached_property`.
class cached_property:
"""
Decorator that converts a method with a single self argument into a
property cached on the instance.
A cached property can be made out of an existing method:
(e.g. ``url = cached_property(get_absolute_url)``).
The optional ``name`` argument is obsolete as of Python 3.6 and will be
deprecated in Django 4.0 (#30127).
"""
name = None
@staticmethod
def func(instance):
raise TypeError(
'Cannot use cached_property instance without calling '
'__set_name__() on it.'
)
def __init__(self, func, name=None):
self.real_func = func
self.__doc__ = getattr(func, '__doc__')
def __set_name__(self, owner, name):
if self.name is None:
self.name = name
self.func = self.real_func
elif name != self.name:
raise TypeError(
"Cannot assign the same cached_property to two different names "
"(%r and %r)." % (self.name, name)
)
def __get__(self, instance, cls=None):
"""
Call the function and put the return value in instance.__dict__ so that
subsequent attribute access on the instance returns the cached value
instead of calling cached_property.__get__().
"""
if instance is None:
return self
res = instance.__dict__[self.name] = self.func(instance)
return res
# importlib_metadata was a provisional module, so the APIs changed quite a few times
# between 3.8-3.10. It was also not included in the standard library until 3.8, so
# we install the backport for <3.8.
if sys.version_info >= (3, 8):
import importlib.metadata as importlib_metadata
else:
import importlib_metadata
def find_entry_points(entry_points: Any, group: str) -> Iterable[importlib_metadata.EntryPoint]:
if hasattr(entry_points, "select"): # Python 3.10+ / importlib_metadata >= 3.9.0
return entry_points.select(group=group)
else:
return set(entry_points.get(group, ()))
def get_dist_name(entry_point: importlib_metadata.EntryPoint) -> Optional[str]:
dist = getattr(entry_point, "dist", None)
if dist is not None: # Python 3.10+
return dist.name
match = entry_point.pattern.match(entry_point.value)
if not (match and match.group('module')):
return None
package = match.group('module').split('.')[0]
try:
metadata = importlib_metadata.metadata(package)
except importlib_metadata.PackageNotFoundError:
return None
else:
return metadata.get('name')
| StarcoderdataPython |
165858 |
import torch
# tempo imports
from . import compute_cell_posterior
from . import utils
from . import cell_posterior
from . import objective_functions
class ClockGenePosterior(torch.nn.Module):
def __init__(self,gene_param_dict,gene_prior_dict,num_grid_points,clock_indices,use_nb=False,log_mean_log_disp_coef=None,min_amp=0,max_amp=2.5,use_clock_output_only=False):
super(ClockGenePosterior, self).__init__()
self.clock_indices = clock_indices
self.gene_param_dict = gene_param_dict
self.gene_prior_dict = gene_prior_dict
self.num_grid_points = num_grid_points
self.use_nb = use_nb
self.log_mean_log_disp_coef = log_mean_log_disp_coef
self.min_amp = min_amp
self.max_amp = max_amp
self.num_genes = self.gene_param_dict['mu_loc'].shape[0]
self.use_clock_output_only = use_clock_output_only
def compute_cell_phase_posterior_likelihood(self,gene_X,log_L,prior_theta_euclid_dist,num_gene_samples=5):
# --- SAMPLE THE GENE PARAMETERS ---
# ** get distribution dict **
distrib_dict = utils.init_distributions_from_param_dicts(gene_param_dict = self.gene_param_dict, max_amp = self.max_amp, min_amp = self.min_amp, prep = True)
# ** sample **
mu_sampled = distrib_dict['mu'].rsample((num_gene_samples,)) # [num_gene_samples x num_genes]
A_sampled = distrib_dict['A'].rsample((num_gene_samples,)) # [num_gene_samples x num_genes]
phi_euclid_sampled = distrib_dict['phi_euclid'].rsample((num_gene_samples,)) # [num_gene_samples x num_genes x 2]
phi_sampled = torch.atan2(phi_euclid_sampled[:,:,1],phi_euclid_sampled[:,:,0]) # [num_gene_samples x num_genes x 2]
Q_sampled = utils.get_is_cycler_samples_from_dist(distrib_dict['Q_prob'],num_gene_samples=num_gene_samples,rsample=True)
# --- COMPUTE CELL POSTERIOR ---
theta_posterior_likelihood = compute_cell_posterior.compute_cell_posterior(gene_X = gene_X,
log_L = log_L,
num_grid_points = self.num_grid_points,
prior_theta_euclid_dist = prior_theta_euclid_dist, # self.prior_theta_euclid_dist
mu_sampled = mu_sampled,
A_sampled = A_sampled,
phi_sampled = phi_sampled,
Q_sampled = Q_sampled,
B_sampled = None,
use_nb = self.use_nb,
log_mean_log_disp_coef = self.log_mean_log_disp_coef)
return theta_posterior_likelihood
def get_clock_gene_param_dict(self):
clock_gene_param_dict = {}
for key in self.gene_param_dict:
if key == 'phi_euclid_loc':
clock_gene_param_dict[key] = self.gene_param_dict['phi_euclid_loc'][self.clock_indices,:]
else:
clock_gene_param_dict[key] = self.gene_param_dict[key][self.clock_indices]
return clock_gene_param_dict
def compute_loss(self,gene_X,log_L,prior_theta_euclid_dist,num_cell_samples,num_gene_samples):
# --- COMPUTE THE CELL POSTERIOR DISTRIBUTION ---
theta_posterior_likelihood = self.compute_cell_phase_posterior_likelihood(gene_X,log_L,prior_theta_euclid_dist,num_gene_samples)
# --- SAMPLE THE CELL PHASE POSTERIOR ---
theta_dist = cell_posterior.ThetaPosteriorDist(theta_posterior_likelihood)
theta_sampled = theta_dist.rsample(num_cell_samples)
# --- GET THE DISTRIB DICT AND CLOCK LOC SCALE DICT ---
# ** get distribution dict **
# input gene distrib dict
input_distrib_dict = utils.init_distributions_from_param_dicts(gene_param_dict = self.gene_param_dict, gene_prior_dict = self.gene_prior_dict, max_amp = self.max_amp, min_amp = self.min_amp)
# output gene distrib dict
if self.use_clock_output_only:
output_distrib_dict = utils.init_distributions_from_param_dicts(gene_param_dict = self.get_clock_gene_param_dict(), gene_prior_dict = self.gene_prior_dict, max_amp = self.max_amp, min_amp = self.min_amp)
else:
output_distrib_dict = input_distrib_dict
# --- COMPUTE THE EXPECTATION LOG LIKELIHOOD OF THE CYCLING GENES ---
# subset gene_X and distrib_dict to core clock genes only if need to
if self.use_clock_output_only:
gene_X = gene_X[:,self.clock_indices]
# ** compute gene LL in each cell over all samples **
cycler_log_likelihood_sampled = objective_functions.compute_sample_log_likelihood(gene_X, log_L,
theta_sampled = theta_sampled,
mu_dist = output_distrib_dict['mu'], A_dist = output_distrib_dict['A'], phi_euclid_dist = output_distrib_dict['phi_euclid'], Q_prob_dist = output_distrib_dict['Q_prob'],
num_gene_samples = num_gene_samples, use_flat_model = False,
use_nb = self.use_nb, log_mean_log_disp_coef = self.log_mean_log_disp_coef, rsample = True,
use_is_cycler_indicators = output_distrib_dict['Q_prob'] is not None)
# ** compute the MC expectations **
# cycler
cycler_mc_lls = torch.sum(torch.sum(cycler_log_likelihood_sampled,dim=0),dim=0).flatten()
cycler_gene_expectation_log_likelihood = torch.mean(cycler_mc_lls)
# clock
clock_mc_lls = torch.sum(torch.sum(cycler_log_likelihood_sampled[self.clock_indices,:,:,:],dim=0),dim=0).flatten()
clock_gene_expectation_log_likelihood = torch.mean(clock_mc_lls)
# --- COMPUTE THE KL OF THE CORE CLOCK GENES AND THE DE NOVO CYCLERS ---
# ** get variational and prior dist lists **
variational_dist_list = [input_distrib_dict['mu'],input_distrib_dict['A'],input_distrib_dict['phi_euclid']]
prior_dist_list = [input_distrib_dict['prior_mu'],input_distrib_dict['prior_A'],input_distrib_dict['prior_phi_euclid']]
if 'Q_prob' in input_distrib_dict and 'prior_Q_prob' in input_distrib_dict:
variational_dist_list += [input_distrib_dict['Q_prob']]
prior_dist_list += [input_distrib_dict['prior_Q_prob']]
# ** compute the divegence **
clock_and_de_novo_cycler_kl = objective_functions.compute_divergence(variational_dist_list = variational_dist_list,
prior_dist_list = prior_dist_list)
# # --- COMPUTE ELBO ---
kl_loss = torch.sum(clock_and_de_novo_cycler_kl)
if self.use_clock_output_only:
ll_loss = clock_gene_expectation_log_likelihood
else:
ll_loss = cycler_gene_expectation_log_likelihood
elbo_loss = kl_loss - ll_loss
return elbo_loss, ll_loss, kl_loss
| StarcoderdataPython |
9779816 | # Code to train T3D model
import os
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from keras.callbacks import ModelCheckpoint, EarlyStopping, CSVLogger, \
TensorBoard, LearningRateScheduler
from keras.optimizers import SGD
from keras import losses
import keras.backend as K
import traceback
from T3D_keras import T3D169_DenseNet
from get_video import video_gen_TL
FRAMES_PER_VIDEO = 32
FRAME_HEIGHT = 224
FRAME_WIDTH = 224
FRAME_CHANNEL = 3
NUM_CLASSES = 2
BATCH_SIZE = 2
EPOCHS = 200
MODEL_FILE_NAME = 'T3D_saved_model.h5'
PATH_TO_VIDEOS = '../dataset/transfer_learning/resized'
def transfer_learning():
sample_input = np.empty(
[FRAMES_PER_VIDEO, FRAME_HEIGHT, FRAME_WIDTH, FRAME_CHANNEL], dtype=np.uint8)
# For transfer learning, nb_classes has to be 2
nb_classes = 2
video_generator = video_gen_TL(
PATH_TO_VIDEOS, FRAMES_PER_VIDEO, FRAME_HEIGHT, FRAME_WIDTH, FRAME_CHANNEL, nb_classes, batch_size=BATCH_SIZE)
# Get Model
model = T3D169_DenseNet(sample_input.shape, nb_classes)
checkpoint = ModelCheckpoint('T3D_best_model_weights.hdf5', monitor='val_loss',
verbose=1, save_best_only=True, mode='min', save_weights_only=True)
checkpoint_all = ModelCheckpoint('T3D_saved_model_weights.hdf5', monitor='val_loss',
verbose=1, save_best_only=False, mode='min', save_weights_only=True)
# -------------------------------------------------------------------------
# The LR schedule is what the paper used, but from my experience, the
# initial lr is way too high and causes the loss to jump all over the place.
# -------------------------------------------------------------------------
# lrscheduler = LearningRateScheduler(lambda epoch: 0.1 * pow(10,-(epoch//30)), verbose=1)
lrscheduler = LearningRateScheduler(lambda epoch: 0.01 * pow(10,-(epoch//30)), verbose=1)
csvLogger = CSVLogger('history.csv', append=True)
tensorboard = TensorBoard(log_dir='./logs/T3D_Transfer_Learning')
callbacks_list = [checkpoint, checkpoint_all, lrscheduler, csvLogger, tensorboard]
# Compile model
optim = SGD(lr = 0.1, momentum=0.9, decay=1e-4, nesterov=True)
model.compile(optimizer=optim, loss=['binary_crossentropy'], metrics=['accuracy'])
if os.path.exists('./T3D_saved_model_weights.hdf5'):
print('Pre-existing model weights found, loading weights.......')
model.load_weights('./T3D_saved_model_weights.hdf5')
print('Weights loaded')
# Train model
print('Training started....')
# Arbitrary numbers as the dataset is huge (many video combinations possible)
train_steps = 1600
val_steps = 400
history = model.fit_generator(
video_generator,
steps_per_epoch=train_steps,
epochs=EPOCHS,
validation_data=video_generator,
validation_steps=val_steps,
verbose=1,
callbacks=callbacks_list,
workers=1,
use_multiprocessing=True
)
model.save(MODEL_FILE_NAME)
if __name__ == '__main__':
try:
transfer_learning()
except Exception as err:
print('Error:', err)
traceback.print_tb(err.__traceback__)
finally:
# Destroying the current TF graph to avoid clutter from old models / layers
K.clear_session()
| StarcoderdataPython |
3579633 | from datetime import datetime, timezone
instances = {
"Reservations": [
{
"Groups": [{"GroupName": "priv", "GroupId": "sg-0fea0dac"}],
"Instances": [
{
"AmiLaunchIndex": 0,
"ImageId": "ami-b0d57010",
"InstanceId": "i-e6b7ab04",
"InstanceType": "t2.nano",
"KernelId": "",
"KeyName": "work",
"LaunchTime": "2020-04-09T13:28:58.721Z",
"Monitoring": {"State": "disabled"},
"Placement": {
"AvailabilityZone": "eu-west-2a",
"GroupName": "",
"Tenancy": "default",
},
"PrivateDnsName": "ip-10-0-2-10.eu-west-2.compute.internal",
"PrivateIpAddress": "10.0.2.10",
"ProductCodes": [],
"PublicDnsName": "",
"State": {"Code": 16, "Name": "running"},
"SubnetId": "subnet-4c66da82",
"VpcId": "vpc-ad730e33",
"Architecture": "x86_64",
"BlockDeviceMappings": [
{
"DeviceName": "/dev/sda1",
"Ebs": {
"AttachTime": "2020-04-09T13:28:58.721Z",
"DeleteOnTermination": True,
"Status": "attached",
"VolumeId": "vol-a87f91c1",
},
}
],
"ClientToken": "",
"EbsOptimized": False,
"Hypervisor": "xen",
"NetworkInterfaces": [
{
"Attachment": {
"AttachTime": "2020-04-09T13:28:58.721Z",
"AttachmentId": "eni-attach-1fb7811e",
"DeleteOnTermination": True,
"DeviceIndex": 0,
"Status": "attached",
},
"Description": "Primary network interface",
"Groups": [{"GroupName": "priv", "GroupId": "sg-0fea0dac"}],
"MacAddress": "aa:d6:af:71:91:f6",
"NetworkInterfaceId": "eni-a8213f11",
"OwnerId": "763630846467",
"PrivateDnsName": "ip-10-0-2-10.eu-west-2.compute.internal",
"PrivateIpAddress": "10.0.2.10",
"PrivateIpAddresses": [
{
"Primary": True,
"PrivateDnsName": "ip-10-0-2-10.eu-west-2.compute.internal",
"PrivateIpAddress": "10.0.2.10",
}
],
"SourceDestCheck": True,
"Status": "in-use",
"SubnetId": "subnet-4c66da82",
"VpcId": "vpc-ad730e33",
}
],
"RootDeviceName": "/dev/sda1",
"RootDeviceType": "ebs",
"SecurityGroups": [{"GroupName": "priv", "GroupId": "sg-0fea0dac"}],
"SourceDestCheck": True,
"Tags": [{"Key": "Name", "Value": "test1"}],
"VirtualizationType": "hvm",
}
],
"OwnerId": "763630846467",
"ReservationId": "r-390600af",
}
]
}
volumes = {
"Volumes": [
{
"Attachments": [
{
"AttachTime": "2019-10-08T17:14:52.314Z",
"Device": "/dev/sda1",
"InstanceId": "i-8c1d8798",
"State": "attached",
"VolumeId": "vol-a24fffdc",
"DeleteOnTermination": False,
}
],
"AvailabilityZone": "eu-west-2a",
"CreateTime": "2017-08-10T17:34:59.644Z",
"Size": 10,
"SnapshotId": "snap-d1c97efa",
"State": "in-use",
"VolumeId": "vol-a24fffdc",
"Tags": [
{"Key": "Name", "Value": "test1"},
{"Key": "project", "Value": "test2"},
],
"VolumeType": "standard",
}
]
}
snapshots1 = {
"Snapshots": [
{
"Description": "osc-bsu-backup EF50CF3A80164A5EABAF8C78B2314C65",
"Encrypted": False,
"OwnerId": "763630846467",
"Progress": "100%",
"SnapshotId": "snap-cf4748a5",
"StartTime": datetime(
year=2019,
month=12,
day=17,
hour=16,
minute=2,
second=22,
microsecond=204,
tzinfo=timezone.utc,
),
"State": "completed",
"VolumeId": "vol-59b94d63",
"VolumeSize": 10,
"Tags": [],
},
{
"Description": "osc-bsu-backup EF50CF3A80164A5EABAF8C78B2314C65",
"Encrypted": False,
"OwnerId": "763630846467",
"Progress": "100%",
"SnapshotId": "snap-04fd92f4",
"StartTime": datetime(
year=2019,
month=12,
day=19,
hour=16,
minute=56,
second=20,
tzinfo=timezone.utc,
),
"State": "completed",
"VolumeId": "vol-59b94d63",
"VolumeSize": 10,
"Tags": [],
},
{
"Description": "osc-bsu-backup EF50CF3A80164A5EABAF8C78B2314C65",
"Encrypted": False,
"OwnerId": "763630846467",
"Progress": "100%",
"SnapshotId": "snap-30e1c236",
"StartTime": datetime(
year=2019,
month=12,
day=19,
hour=16,
minute=50,
second=9,
tzinfo=timezone.utc,
),
"State": "completed",
"VolumeId": "vol-59b94d63",
"VolumeSize": 10,
"Tags": [],
},
{
"Description": "osc-bsu-backup EF50CF3A80164A5EABAF8C78B2314C65",
"Encrypted": False,
"OwnerId": "763630846467",
"Progress": "100%",
"SnapshotId": "snap-d5caf847",
"StartTime": datetime(
year=2019,
month=12,
day=19,
hour=16,
minute=53,
second=4,
tzinfo=timezone.utc,
),
"State": "completed",
"VolumeId": "vol-59b94d63",
"VolumeSize": 10,
"Tags": [],
},
{
"Description": "osc-bsu-backup EF50CF3A80164A5EABAF8C78B2314C65",
"Encrypted": False,
"OwnerId": "763630846467",
"Progress": "100%",
"SnapshotId": "snap-f820da70",
"StartTime": datetime(
year=2019,
month=12,
day=19,
hour=16,
minute=57,
second=4,
tzinfo=timezone.utc,
),
"State": "completed",
"VolumeId": "vol-59b94d63",
"VolumeSize": 10,
"Tags": [],
},
{
"Description": "osc-bsu-backup EF50CF3A80164A5EABAF8C78B2314C65",
"Encrypted": False,
"OwnerId": "763630846467",
"Progress": "100%",
"SnapshotId": "snap-e5b47810",
"StartTime": datetime(
year=2019,
month=12,
day=19,
hour=16,
minute=57,
second=44,
tzinfo=timezone.utc,
),
"State": "completed",
"VolumeId": "vol-59b94d63",
"VolumeSize": 10,
"Tags": [],
},
{
"Description": "osc-bsu-backup EF50CF3A80164A5EABAF8C78B2314C65",
"Encrypted": False,
"OwnerId": "763630846467",
"Progress": "100%",
"SnapshotId": "snap-16959493",
"StartTime": datetime(
year=2019,
month=12,
day=19,
hour=17,
minute=21,
second=44,
tzinfo=timezone.utc,
),
"State": "completed",
"VolumeId": "vol-59b94d63",
"VolumeSize": 10,
"Tags": [],
},
{
"Description": "osc-bsu-backup EF50CF3A80164A5EABAF8C78B2314C65",
"Encrypted": False,
"OwnerId": "763630846467",
"Progress": "100%",
"SnapshotId": "snap-a8fa23e2",
"StartTime": datetime(
year=2019,
month=12,
day=19,
hour=17,
minute=15,
second=44,
tzinfo=timezone.utc,
),
"State": "completed",
"VolumeId": "vol-59b94d63",
"VolumeSize": 10,
"Tags": [],
},
{
"Description": "osc-bsu-backup EF50CF3A80164A5EABAF8C78B2314C65",
"Encrypted": False,
"OwnerId": "763630846467",
"Progress": "100%",
"SnapshotId": "snap-14f3772c",
"StartTime": datetime(
year=2019,
month=12,
day=19,
hour=17,
minute=18,
second=44,
tzinfo=timezone.utc,
),
"State": "completed",
"VolumeId": "vol-59b94d63",
"VolumeSize": 10,
"Tags": [],
},
{
"Description": "osc-bsu-backup EF50CF3A80164A5EABAF8C78B2314C65",
"Encrypted": False,
"OwnerId": "763630846467",
"Progress": "100%",
"SnapshotId": "snap-e9a6aae5",
"StartTime": datetime(
year=2019,
month=12,
day=19,
hour=17,
minute=18,
second=54,
tzinfo=timezone.utc,
),
"State": "completed",
"VolumeId": "vol-59b94d63",
"VolumeSize": 10,
"Tags": [],
},
{
"Description": "osc-bsu-backup EF50CF3A80164A5EABAF8C78B2314C65",
"Encrypted": False,
"OwnerId": "763630846467",
"Progress": "100%",
"SnapshotId": "snap-70be5243",
"StartTime": datetime(
year=2019,
month=12,
day=19,
hour=17,
minute=20,
second=54,
tzinfo=timezone.utc,
),
"State": "completed",
"VolumeId": "vol-59b94d63",
"VolumeSize": 10,
"Tags": [],
},
]
}
snapshots2 = {
"Snapshots": [
{
"Description": "osc-bsu-backup EF50CF3A80164A5EABAF8C78B2314C65",
"Encrypted": False,
"OwnerId": "763630846467",
"Progress": "100%",
"SnapshotId": "snap-8f3436c0",
"StartTime": datetime(
year=2019,
month=12,
day=19,
hour=16,
minute=2,
second=54,
tzinfo=timezone.utc,
),
"State": "completed",
"VolumeId": "vol-640141cf",
"VolumeSize": 10,
"Tags": [],
},
{
"Description": "osc-bsu-backup EF50CF3A80164A5EABAF8C78B2314C65",
"Encrypted": False,
"OwnerId": "763630846467",
"Progress": "100%",
"SnapshotId": "snap-e6996c10",
"StartTime": datetime(
year=2019,
month=12,
day=19,
hour=16,
minute=50,
second=8,
tzinfo=timezone.utc,
),
"State": "completed",
"VolumeId": "vol-640141cf",
"VolumeSize": 10,
"Tags": [],
},
{
"Description": "osc-bsu-backup EF50CF3A80164A5EABAF8C78B2314C65",
"Encrypted": False,
"OwnerId": "763630846467",
"Progress": "100%",
"SnapshotId": "snap-fa25ee50",
"StartTime": datetime(
year=2019,
month=12,
day=19,
hour=16,
minute=53,
second=2,
tzinfo=timezone.utc,
),
"State": "completed",
"VolumeId": "vol-640141cf",
"VolumeSize": 10,
"Tags": [],
},
{
"Description": "osc-bsu-backup EF50CF3A80164A5EABAF8C78B2314C65",
"Encrypted": False,
"OwnerId": "763630846467",
"Progress": "100%",
"SnapshotId": "snap-5569bbaa",
"StartTime": datetime(
year=2019,
month=12,
day=19,
hour=17,
minute=21,
second=4,
tzinfo=timezone.utc,
),
"State": "completed",
"VolumeId": "vol-640141cf",
"VolumeSize": 10,
"Tags": [],
},
{
"Description": "osc-bsu-backup EF50CF3A80164A5EABAF8C78B2314C65",
"Encrypted": False,
"OwnerId": "763630846467",
"Progress": "100%",
"SnapshotId": "snap-9c3c5d34",
"StartTime": datetime(
year=2019,
month=12,
day=19,
hour=16,
minute=56,
second=18,
tzinfo=timezone.utc,
),
"State": "completed",
"VolumeId": "vol-640141cf",
"VolumeSize": 10,
"Tags": [],
},
{
"Description": "osc-bsu-backup EF50CF3A80164A5EABAF8C78B2314C65",
"Encrypted": False,
"OwnerId": "763630846467",
"Progress": "100%",
"SnapshotId": "snap-7729b15f",
"StartTime": datetime(
year=2019,
month=12,
day=19,
hour=16,
minute=57,
second=1,
tzinfo=timezone.utc,
),
"State": "completed",
"VolumeId": "vol-640141cf",
"VolumeSize": 10,
"Tags": [],
},
{
"Description": "osc-bsu-backup EF50CF3A80164A5EABAF8C78B2314C65",
"Encrypted": False,
"OwnerId": "763630846467",
"Progress": "100%",
"SnapshotId": "snap-ecead238",
"StartTime": datetime(
year=2019,
month=12,
day=19,
hour=16,
minute=57,
second=40,
tzinfo=timezone.utc,
),
"State": "completed",
"VolumeId": "vol-640141cf",
"VolumeSize": 10,
"Tags": [],
},
{
"Description": "osc-bsu-backup EF50CF3A80164A5EABAF8C78B2314C65",
"Encrypted": False,
"OwnerId": "763630846467",
"Progress": "100%",
"SnapshotId": "snap-445be4c7",
"StartTime": datetime(
year=2019,
month=12,
day=19,
hour=17,
minute=15,
second=8,
tzinfo=timezone.utc,
),
"State": "completed",
"VolumeId": "vol-640141cf",
"VolumeSize": 10,
"Tags": [],
},
{
"Description": "osc-bsu-backup EF50CF3A80164A5EABAF8C78B2314C65",
"Encrypted": False,
"OwnerId": "763630846467",
"Progress": "100%",
"SnapshotId": "snap-e88add53",
"StartTime": datetime(
year=2019,
month=12,
day=19,
hour=17,
minute=18,
second=52,
tzinfo=timezone.utc,
),
"State": "completed",
"VolumeId": "vol-640141cf",
"VolumeSize": 10,
"Tags": [],
},
{
"Description": "osc-bsu-backup EF50CF3A80164A5EABAF8C78B2314C65",
"Encrypted": False,
"OwnerId": "763630846467",
"Progress": "100%",
"SnapshotId": "snap-5e2e73c4",
"StartTime": datetime(
year=2019,
month=12,
day=19,
hour=17,
minute=18,
second=26,
tzinfo=timezone.utc,
),
"State": "completed",
"VolumeId": "vol-640141cf",
"VolumeSize": 10,
"Tags": [],
},
{
"Description": "osc-bsu-backup EF50CF3A80164A5EABAF8C78B2314C65",
"Encrypted": False,
"OwnerId": "763630846467",
"Progress": "100%",
"SnapshotId": "snap-11d38c47",
"StartTime": datetime(
year=2019,
month=12,
day=19,
hour=17,
minute=19,
second=49,
tzinfo=timezone.utc,
),
"State": "completed",
"VolumeId": "vol-640141cf",
"VolumeSize": 10,
"Tags": [],
},
{
"Description": "osc-bsu-backup EF50CF3A80164A5EABAF8C78B2314C65",
"Encrypted": False,
"OwnerId": "763630846467",
"Progress": "100%",
"SnapshotId": "snap-f3338f81",
"StartTime": datetime(
year=2019,
month=12,
day=19,
hour=17,
minute=20,
second=21,
tzinfo=timezone.utc,
),
"State": "completed",
"VolumeId": "vol-640141cf",
"VolumeSize": 10,
"Tags": [],
},
]
}
create_snapshot1 = {
"Description": "osc-bsu-backup EF50CF3A80164A5EABAF8C78B2314C65",
"Encrypted": False,
"OwnerId": "763630846467",
"Progress": "0%",
"SnapshotId": "snap-91c8d227",
"StartTime": "2020-04-14T18:32:43.756Z",
"State": "in-queue",
"VolumeId": "vol-56d30e10",
"VolumeSize": 10,
}
snapshot_completed1 = {
"Snapshots": [
{
"Description": "osc-bsu-backup EF50CF3A80164A5EABAF8C78B2314C65",
"Encrypted": False,
"OwnerId": "763630846467",
"Progress": "100%",
"SnapshotId": "snap-91c8d227",
"StartTime": "2020-04-14T18:32:43.756Z",
"State": "completed",
"VolumeId": "vol-56d30e10",
"VolumeSize": 10,
"Tags": [],
}
]
}
create_snapshot2 = {
"Description": "osc-bsu-backup EF50CF3A80164A5EABAF8C78B2314C65",
"Encrypted": False,
"OwnerId": "763630846467",
"Progress": "0%",
"SnapshotId": "snap-f3338f81",
"StartTime": "2020-04-14T18:32:43.756Z",
"State": "in-queue",
"VolumeId": "vol-640141cf",
"VolumeSize": 10,
}
snapshot_completed2 = {
"Snapshots": [
{
"Description": "osc-bsu-backup EF50CF3A80164A5EABAF8C78B2314C65",
"Encrypted": False,
"OwnerId": "763630846467",
"Progress": "100%",
"SnapshotId": "snap-f3338f81",
"StartTime": "2020-04-14T18:32:43.756Z",
"State": "completed",
"VolumeId": "vol-640141cf",
"VolumeSize": 10,
"Tags": [],
},
{
"Description": "osc-bsu-backup EF50CF3A80164A5EABAF8C78B2314C65",
"Encrypted": False,
"OwnerId": "763630846467",
"Progress": "100%",
"SnapshotId": "snap-91c8d227",
"StartTime": "2020-04-14T18:32:43.756Z",
"State": "completed",
"VolumeId": "vol-56d30e10",
"VolumeSize": 10,
"Tags": [],
},
]
}
| StarcoderdataPython |
1965689 | <filename>src/testers/unittests/test_ast_simplification.py
#!/usr/bin/env python2
# coding: utf-8
"""Testing AST simplification."""
import unittest
from triton import ARCH, TritonContext, CALLBACK, AST_NODE
class TestAstSimplification(unittest.TestCase):
"""Testing AST simplification."""
def setUp(self):
self.Triton = TritonContext()
self.Triton.setArchitecture(ARCH.X86_64)
self.Triton.addCallback(self.xor_1, CALLBACK.SYMBOLIC_SIMPLIFICATION)
self.Triton.addCallback(self.xor_2, CALLBACK.SYMBOLIC_SIMPLIFICATION)
self.astCtxt = self.Triton.getAstContext()
def test_simplification(self):
a = self.astCtxt.bv(1, 8)
b = self.astCtxt.bv(2, 8)
# Example 1
c = a ^ a
c = self.Triton.simplify(c)
self.assertEqual(str(c), "(_ bv0 8)")
c = a ^ b
c = self.Triton.simplify(c)
self.assertEqual(str(c), "(bvxor (_ bv1 8) (_ bv2 8))")
c = (a & ~b) | (~a & b)
c = self.Triton.simplify(c)
self.assertEqual(str(c), "(bvxor (_ bv1 8) (_ bv2 8))")
# Example 2 - forme B
c = (~b & a) | (~a & b)
c = self.Triton.simplify(c)
self.assertEqual(str(c), "(bvxor (_ bv1 8) (_ bv2 8))")
# Example 2 - forme C
c = (~b & a) | (b & ~a)
c = self.Triton.simplify(c)
self.assertEqual(str(c), "(bvxor (_ bv1 8) (_ bv2 8))")
# Example 2 - forme D
c = (b & ~a) | (~b & a)
c = self.Triton.simplify(c)
self.assertEqual(str(c), "(bvxor (_ bv2 8) (_ bv1 8))")
return
# a ^ a -> a = 0
@staticmethod
def xor_1(api, node):
if node.getKind() == AST_NODE.BVXOR:
if node.getChildren()[0].equalTo(node.getChildren()[1]):
return api.getAstContext().bv(0, node.getBitvectorSize())
return node
# ((a & ~b) | (~a & b)) -> (a ^ b)
@staticmethod
def xor_2(api, node):
def getNot(node):
a = node.getChildren()[0]
b = node.getChildren()[1]
if a.getKind() == AST_NODE.BVNOT and b.getKind() != AST_NODE.BVNOT:
return a
if b.getKind() == AST_NODE.BVNOT and a.getKind() != AST_NODE.BVNOT:
return b
return None
def getNonNot(node):
a = node.getChildren()[0]
b = node.getChildren()[1]
if a.getKind() != AST_NODE.BVNOT and b.getKind() == AST_NODE.BVNOT:
return a
if b.getKind() != AST_NODE.BVNOT and a.getKind() == AST_NODE.BVNOT:
return b
return None
if node.getKind() == AST_NODE.BVOR:
c1 = node.getChildren()[0]
c2 = node.getChildren()[1]
if c1.getKind() == AST_NODE.BVAND and c2.getKind() == AST_NODE.BVAND:
c1_not = getNot(c1)
c2_not = getNot(c2)
c1_nonNot = getNonNot(c1)
c2_nonNot = getNonNot(c2)
if c1_not.equalTo(~c2_nonNot) and c2_not.equalTo(~c1_nonNot):
return c1_nonNot ^ c2_nonNot
return node
| StarcoderdataPython |
5199570 | <reponame>Organ-xiangjikeji/---
from django.shortcuts import render, HttpResponse, redirect
from web import models
from django.views.decorators.cache import cache_page
from django.http import JsonResponse
from web.common import utils
from web.common.orm_op import Myquery
from web.common import vcode
from web.common.redis_op import redis_conn
from django.views.generic import View
from web.common import myform
from web.common.decroters import urecord
import logging
logger = logging.getLogger('django')
# Create your views here.
from web.common import utils
# @cache_page(60*2)
def index(request):
"""首页"""
return render(request, 'index.html')
def case(request):
"""案例与服务"""
return render(request, 'case.html')
def free_data(request):
"""
数据下载页
:param request:
:return:
"""
data_set = models.Data.objects.all()
myquery = Myquery(request)
pg = None
print('first filter',myquery.nquery_set.count())
if myquery.nquery_set.count() != 0:
try:
page = request.GET.get('page', '1')
if page.isdigit():
pg = utils.MyPagenator(myquery.nquery_set, page)
else:
return render(request,'404page.html',{'msg':'您的请求错误'})
except Exception as e:
logger.error(e)
response = render(request, 'free_data.html', {
'myquery': myquery,
'pg': pg})
# response.delete_cookie('filters')
# response.set_cookie('filters',myquery.filters,max_age=-1)
return response
def job(request):
"""工作机会"""
return render(request, 'job.html')
def introduce(request):
"""
关于我们
:param request:
:return:
"""
return render(request, 'introduce.html')
def map(request):
"""
获取百度地图
:param request:
:return:
"""
return render(request, "map.html")
def contact(request):
"""
获取客户联系信息
:param request:
:return:
"""
return render(request, "contact.html")
class Contact(View):
def get(self, request):
return render(request, "contact.html")
def post(self, request):
ret = {
'status': 0,
'msg': 'success'
}
if not request.is_ajax():
ret = {'status':1,'msg':'请求错误'}
return JsonResponse(ret)
v_check = utils.check_vcode(request)
if v_check['status'] == 1:
ret['status'] = 1
ret['msg'] = '验证码错误'
return JsonResponse(ret)
elif v_check['status'] == 2 :
ret['status'] = 1
ret['msg'] = '验证码已过期'
return JsonResponse(ret)
form = myform.CustomerForm(request.POST)
try:
if form.is_valid():
form.save()
else:
ret['status'] = 1
print(form.errors)
ret['msg'] = '数据提交失败,您的手机或邮箱信息可能已经提交'
except Exception as e:
logger.error(e)
ret['status'] = 1
ret['msg'] = '数据提交失败'
return JsonResponse(ret)
def get_vimg(request):
"""
获取验证码
:param request:
:return:
"""
text, image = vcode.gen_captcha_text_and_image()
v_key = request.GET.get('vk')
ex_key = request.GET.get('ex')
if ex_key:
try:
redis_conn.delete(ex_key)
except Exception as e:
logger.error(e)
redis_conn.set(v_key, text, 60*3)
return HttpResponse(image.getvalue(), content_type='image/jpg')
@urecord
def data_detail(request, uid):
data = models.Data.objects.get(uid=uid)
return render(request, 'data_detail.html', {'data': data})
class Reg(View):
def get(self, request):
return render(request, 'register.html')
def post(self, request):
ret = {
'status': 0,
'msg': 'success'
}
if not request.is_ajax():
ret = {'status': 1, 'msg': '请求错误'}
return JsonResponse(ret)
pcode_check = utils.check_pcode(request)
if pcode_check['status'] == 1: #验证手机验证码
ret['status'] = 1
ret['msg'] = '手机验证码错误'
return JsonResponse(ret)
try:
user = models.UserInfo.objects.get_or_create(phone=request.POST.get('phone'))
request.session['user'] = user[0].phone
request.session['is_login'] = True
request.session.set_expiry(60 * 60 * 12)
except Exception as e:
logger.error(e)
ret['status'] = 1
ret['msg'] = '此号已注册'
return JsonResponse(ret)
class Vcode(View):
def post(self, request):
ret = {
'status': 0,
'msg': 'success'
}
if not request.is_ajax():
ret = {'status': 1, 'msg': '请求错误'}
return JsonResponse(ret)
v_check = utils.check_vcode(request)
if v_check['status'] == 2:
ret['status'] = 1
ret['msg'] = '验证码已过期'
return JsonResponse(ret)
elif v_check['status'] == 1:
ret['status'] = 1
ret['msg'] = '验证码错误'
return JsonResponse(ret)
print('hhhhhhhh')
if not utils.check_phone(request):
ret['status'] = 1
ret['msg'] = '手机号码已注册'
utils.send_pcode()
return JsonResponse(ret)
from django.http import FileResponse
def download(request,uid):
data = models.Data.objects.get(uid=uid)
data.downloads += 1
data.save()
file = open(data.file.path, 'rb')
response = FileResponse(file)
response['Content-Type'] = 'application/octet-stream'
response['Content-Disposition'] = 'attachment;filename="%s"'%file.name
return response
| StarcoderdataPython |
5096441 | <filename>roster/migrations/0007_auto_20170806_0044.py
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2017-08-06 00:44
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0003_auto_20170806_0009'),
('roster', '0006_auto_20170806_0035'),
]
operations = [
migrations.AddField(
model_name='ta',
name='semester',
field=models.ForeignKey(default=None, help_text='The semester for this TA', on_delete=django.db.models.deletion.CASCADE, to='core.Semester'),
preserve_default=False,
),
migrations.AlterField(
model_name='student',
name='curriculum',
field=models.ManyToManyField(blank=True, help_text='The choice of units that this student will work on', to='core.Unit'),
),
]
| StarcoderdataPython |
197387 | # -*- coding: utf-8 -*-
from .main import Postie
from .cli import create_parser, main
| StarcoderdataPython |
3241185 | <reponame>MAKENTNU/web<filename>docs/models.py
from ckeditor_uploader.fields import RichTextUploadingField
from django.db import models
from django.utils.translation import gettext_lazy as _
from users.models import User
from .validators import page_title_validator
MAIN_PAGE_TITLE = "Documentation"
class Page(models.Model):
"""Model for each individual documentation page."""
title = models.CharField(max_length=64, unique=True, verbose_name=_("Title"), validators=[page_title_validator])
created_by = models.ForeignKey(
to=User,
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name='doc_pages_created',
)
current_content = models.OneToOneField(
to="Content",
on_delete=models.SET_NULL,
null=True,
blank=True,
# Can be used as a boolean field by `Content`
related_name='is_currently_on_page',
)
def __str__(self):
return self.title
class Content(models.Model):
"""The content of a documentation page. All versions are kept for editing history."""
page = models.ForeignKey(
to=Page,
on_delete=models.CASCADE,
related_name='content_history',
verbose_name=_("Page"),
)
changed = models.DateTimeField(verbose_name=_("Time changed"))
content = RichTextUploadingField(verbose_name=_("Content"))
made_by = models.ForeignKey(
to=User,
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name='doc_page_contents_created',
verbose_name=_("Made by"),
)
| StarcoderdataPython |
8057872 | from .src import OpendtectColormaps | StarcoderdataPython |
1750453 | <gh_stars>1-10
import json
import os
from pathlib import Path
from eventz.event_store_json_file import EventStoreJsonFile
from eventz.marshall import Marshall, FqnResolver
from eventz.codecs.datetime import Datetime
from tests.conftest import parent_id1
marshall = Marshall(
fqn_resolver=FqnResolver(
fqn_map={
"tests.Child": "tests.example.child.Child",
"tests.Children": "tests.example.children.Children",
"tests.ParentCreated": "tests.example.parent.ParentCreated",
"tests.ChildChosen": "tests.example.parent.ChildChosen",
}
),
codecs={"codecs.eventz.Datetime": Datetime()},
)
def test_sequence_of_events_can_be_read(
json_events, parent_created_event, child_chosen_event
):
# set up the store
storage_path = str(Path(__file__).absolute().parent) + "/storage"
store = EventStoreJsonFile(
storage_path=storage_path, marshall=marshall, recreate_storage=True,
)
# insert fixture data into the storage
if not os.path.isdir(storage_path):
os.mkdir(storage_path)
os.chmod(storage_path, 0o777)
with open(f"{storage_path}/{parent_id1}.json", "w+") as json_file:
json.dump(json_events, json_file)
# run test and make assertion
events = store.fetch(parent_id1)
assert events == (parent_created_event.sequence(1), child_chosen_event.sequence(2))
def test_new_sequence_of_events_can_be_persisted(
json_events, parent_created_event, child_chosen_event
):
storage_path = str(Path(__file__).absolute().parent) + "/storage"
store = EventStoreJsonFile(
storage_path=storage_path, marshall=marshall, recreate_storage=True,
)
assert store.fetch(parent_id1) == ()
store.persist(parent_id1, (parent_created_event, child_chosen_event,))
with open(f"{storage_path}/{parent_id1}.json", "r+") as json_file:
assert json_file.read() == (
'[{"__fqn__":"tests.ParentCreated","__msgid__":"11111111-1111-1111-1111-111111111111",'
'"__seq__":1,"__timestamp__":{"__codec__":"codecs.eventz.Datetime","params":'
'{"timestamp":"2020-01-02T03:04:05.123Z"}},"__version__":1,'
f'"aggregateId":"{parent_id1}","children":'
'{"__fqn__":"tests.Children","items":[{"__fqn__":"tests.Child","name":"Child '
'One"},{"__fqn__":"tests.Child","name":"Child '
'Two"},{"__fqn__":"tests.Child","name":"Child Three"}],"name":"Group '
'One"}},'
'{"__fqn__":"tests.ChildChosen","__msgid__":"22222222-2222-2222-2222-222222222222",'
'"__seq__":2,"__timestamp__":{"__codec__":"codecs.eventz.Datetime","params":'
'{"timestamp":"2020-01-02T03:04:06.123Z"}},"__version__":1,'
f'"aggregateId":"{parent_id1}",'
'"child":{"__fqn__":"tests.Child","name":"Child '
f'Three"}}}}]'
)
def test_fetch_sequence_from(
json_events, parent_created_event, child_chosen_event
):
# set up the store
storage_path = str(Path(__file__).absolute().parent) + "/storage"
store = EventStoreJsonFile(
storage_path=storage_path, marshall=marshall, recreate_storage=True,
)
# insert fixture data into the storage
if not os.path.isdir(storage_path):
os.mkdir(storage_path)
os.chmod(storage_path, 0o777)
with open(f"{storage_path}/{parent_id1}.json", "w+") as json_file:
json.dump(json_events, json_file)
# run test and make assertion
events = store.fetch(parent_id1, seq=2)
assert events == (child_chosen_event.sequence(2),) | StarcoderdataPython |
4953643 | # Copyright (c) 2020 Graphcore Ltd. All rights reserved.
import tempfile
from tensorflow.python.ipu.config import IPUConfig
import numpy as np
from functools import partial
import tensorflow.compat.v1 as tf
from tensorflow.python import ipu
from ipu_sparse_ops import sparse, optimizers
import os
os.sys.path.append("../../") # dynamic_sparsity
from ipu_sparse_ops.model_baseclass import SparseModelOptions # noqa: E402
from ipu_sparse_ops.transformer.transformer_baseclass import TransformerOptions # noqa: E402
from ipu_sparse_ops.transformer.transformer_dense import DenseTransformer # noqa: E402
from ipu_sparse_ops.transformer.transformer_dynsparse import DynsparseTransformer # noqa: E402
# disable TF 2.0
tf.disable_eager_execution()
tf.disable_v2_behavior()
def get_program_arguments():
transformer_parser = TransformerOptions()
SparseModelOptions.add_all_arguments(transformer_parser)
transformer_parser.add_argument("--profile", action="store_true",
help="Enable profiling for mem profile")
default_settings = dict(
dtype=tf.float32,
source_sequence_length=12,
hidden_length=16,
ff_length=64,
attention_heads=1,
qkv_length=16,
sparsity=0.9,
batch_size=1,
random_seed=11,
pooling_type='NONE',
dropout_keep_prob=1
)
transformer_parser.set_defaults(**default_settings)
return transformer_parser.parse_args()
def stream_dense_grads_from_device(transformer, loss, ops=None):
# This will create tensorflow ops which have to be
# run in a session to retrieve the result
ops = {} if ops is None else ops
for name, sparse_layer in transformer.sparse_layers.items():
with tf.variable_scope(name, reuse=True):
dense_grad_w = sparse_layer.get_dense_grad_w(loss)
ops[name + '_grad_w'] = tf.convert_to_tensor(dense_grad_w)
return ops
def sparse_transformer_fwd_and_grad(transformer, input_activation):
transformer.compute_dense_grad = True
output_activation = transformer.feed_forward(input_activation, compute_dense_grad=True)
loss = tf.reduce_sum(output_activation)
# Wrap the optimizer (this would help manage the slot variables)
optimizer = optimizers.SparseOptimizer(tf.train.AdamOptimizer)
optimizer = optimizer(learning_rate=1e-3, sparse_layers=transformer.sparse_layers.values())
grads = optimizer.compute_gradients(loss)
input_grad = tf.gradients(loss, input_activation)[0]
with tf.control_dependencies([input_grad]):
train_op = optimizer.apply_gradients(grads)
with tf.control_dependencies([train_op]):
streamOps = {"output_activation": output_activation}
streamOps["input_grad"] = input_grad
# Sparse grads
for grad, var in grads:
streamOps[var.op.name + "_grad"] = grad
# Dense grads
stream_dense_grads_from_device(transformer, loss, streamOps)
return streamOps
def dense_transformer_fwd_and_grad(transformer, input_activation):
output_activation = transformer.feed_forward(input_activation)
loss = tf.reduce_sum(output_activation)
optimizer = tf.train.AdamOptimizer(learning_rate=1e-3)
grads = optimizer.compute_gradients(loss)
input_grad = tf.gradients(loss, input_activation)[0]
with tf.control_dependencies([input_grad]):
train_op = optimizer.apply_gradients(grads)
with tf.control_dependencies([train_op]):
streamOps = {"output_activation": output_activation}
streamOps["input_grad"] = input_grad
for grad, var in grads:
streamOps[var.op.name + "_grad"] = grad
return streamOps
def main(args):
tf.logging.set_verbosity(tf.logging.ERROR)
np.set_printoptions(linewidth=200)
random_seed = args.random_seed
checkpoint_path = os.path.join(tempfile.mkdtemp(), "model.ckpt")
# Input activations for the attention layer
random_gen = np.random.default_rng(seed=random_seed)
activations_np = random_gen.uniform(-0.1, 0.1, size=(args.batch_size, args.source_sequence_length, args.hidden_length))
# Configure the IPU
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
# Build IPU graphs
sparse_decoder_graph = tf.Graph()
sparse_transformer = DynsparseTransformer(args)
with sparse_decoder_graph.as_default():
with tf.device("cpu"):
# placeholder for activations
# weight placeholders are created inside sparse_transfomer
inputs_ph = tf.placeholder(args.dtype, activations_np.shape)
with ipu.scopes.ipu_scope("/device:IPU:0"):
sparse_decoder = partial(sparse_transformer_fwd_and_grad, sparse_transformer)
sparse_decoder_fetches = ipu.ipu_compiler.compile(sparse_decoder, [inputs_ph])
ipu.utils.move_variable_initialization_to_cpu()
# sparse-decoder
with tf.Session(graph=sparse_decoder_graph) as sess:
# initialize weights
sess.run(tf.global_variables_initializer())
# Save the sparse weights to checkpoint as dense
sparse_transformer.checkpointAsDense(checkpoint_path)
# run sparse decoder
sparse_result = sess.run(sparse_decoder_fetches, feed_dict={inputs_ph: activations_np})
# Create a dense transformer and initialize the weights to the values that
# the sparse model was initialzed with originally
dense_decoder_graph = tf.Graph()
dense_transformer = DenseTransformer(args)
with dense_decoder_graph.as_default():
with tf.device("cpu"):
# placeholder for activations
# weights will get streamed from checkpoint
inputs_ph = tf.placeholder(args.dtype, activations_np.shape)
with ipu.scopes.ipu_scope("/device:IPU:0"):
dense_decoder_fetches = partial(dense_transformer_fwd_and_grad, dense_transformer)
dense_graph = ipu.ipu_compiler.compile(dense_decoder_fetches, [inputs_ph])
ipu.utils.move_variable_initialization_to_cpu()
with tf.device("cpu"):
# We will only load the trainable variables, not momentum etc.
loader = tf.train.Saver(tf.trainable_variables())
# dense-decoder
with tf.Session(graph=dense_decoder_graph) as sess:
# Initialized momentums which are not part of the checkpoint
sess.run(tf.global_variables_initializer())
# Restore saved trainable variables
loader.restore(sess, checkpoint_path)
dense_result = sess.run(dense_graph, feed_dict={inputs_ph: activations_np})
# TEST
rtol = 1e-05
atol = 1e-05
if args.dtype == tf.float16:
rtol = 1e-04
atol = 1e-02
# Compare model output activations (actual vs. desired) -> (sparse vs. dense)
np.testing.assert_allclose(sparse_result["output_activation"], dense_result["output_activation"],
atol=atol, rtol=rtol, err_msg="Output activations do not match.")
# Compate gradient of output wrt. input
np.testing.assert_allclose(sparse_result["input_grad"], dense_result["input_grad"],
atol=atol, rtol=rtol, err_msg="Grads wrt. inputs do not match")
# Compare the dense_w and sparse grads of every sparse layer
for name, sparse_layer in sparse_transformer.sparse_layers.items():
# Compate the dense grads
dense_grad = dense_result[name + "/weight" + "_grad"]
sparse_grad_w = sparse_result[name + "_grad_w"]
np.testing.assert_allclose(sparse_grad_w, dense_grad, atol=atol, rtol=rtol,
err_msg=f"Dense grads for layer {name} do not match")
# Compare the sparse grads
sparse_grad_padded = sparse_result[name + "/sparse_layer/nz_values_grad"]
sparse_grad_data = sparse.SparseRepresentation(sparse_layer.weights.get_metainfo(), sparse_grad_padded)
i, j, sparse_grad = sparse.triplets_from_representation(sparse_layer.weights.spec, sparse_grad_data, sparse_layer.weights.matmul_options)
# Convert dense grads to blocks
block_size, _ = sparse_layer.get_nonzero_blocks_shape()
nx, ny = dense_grad.shape[0] // block_size, dense_grad.shape[1] // block_size
strides = np.array(dense_grad.strides) # strides are in bytes
strides = tuple(strides * block_size) + tuple(strides)
blocked_dense_grad = np.lib.stride_tricks.as_strided(dense_grad, (nx, ny, block_size, block_size), strides)
if block_size == 1:
blocked_dense_grad = np.squeeze(np.copy(blocked_dense_grad), axis=(-2, -1))
np.testing.assert_allclose(sparse_grad, blocked_dense_grad[i, j], atol=atol, rtol=rtol,
err_msg=f"Sparse grads for layer {name} do not match")
print("All results match.")
return sparse_result, dense_result
if __name__ == "__main__":
args = get_program_arguments()
a, b = main(args)
| StarcoderdataPython |
1756261 | import onnxruntime
import torch
import onnx
import onnxsim
class OnnxBackend:
"""
ONNX后端
"""
def __init__(self):
pass
@staticmethod
def convert(model, imgs, weights, dynamic, simplify):
"""
torch模型转为onnx模型
model: torch模型
imgs: [B,C,H,W]Tensor
weights: onnx权重保存路径
dynamic: batch轴是否设为动态维度
simplify: 是否简化onnx
"""
torch.onnx.export(
model,
imgs,
weights,
verbose=False,
opset_version=12,
input_names=["input"],
output_names=["output"],
dynamic_axes={"input": {0: "batch"}, "output": {0: "batch"}}
if dynamic
else None,
)
model_onnx = onnx.load(weights) # load onnx model
onnx.checker.check_model(model_onnx) # check onnx model
if simplify:
try:
model_onnx, check = onnxsim.simplify(
model_onnx,
dynamic_input_shape=dynamic,
input_shapes={"input": list(imgs.shape)} if dynamic else None,
)
assert check, "assert check failed"
onnx.save(model_onnx, weights)
except Exception as e:
print(f"simplifer failure: {e}")
print("*" * 28)
print("ONNX export success, saved as %s" % weights)
print("Visualize onnx with https://github.com/lutzroeder/netron.\n")
@staticmethod
def infer(weights, imgs):
"""
加载onnx并推理
weights(str): onnx权重路径
img(numpy): [B,C,H,W]
"""
# 初始化
session = onnxruntime.InferenceSession(
weights, providers=["CPUExecutionProvider"]
)
# 推理
output = session.run(
[session.get_outputs()[0].name], {session.get_inputs()[0].name: imgs}
)[0]
return output
| StarcoderdataPython |
399668 |
# class Myclass:
# i = 1234
# def f(self):
# print(self.i)
# my = Myclass()
# my.f()
from selenium import webdriver
driver = webdriver.Chrome()
driver.get('http://192.168.127.12:3000/signin')
class LoginPage:
username_id = "name"
passwd_id = "<PASSWORD>"
login_btn_className = "span-primary"
def user_logind(self,username,passwd):
driver.find_element_by_id(self.username_id).send_keys(username)
driver.find_element_by_id(self.passwd_id).send_keys(<PASSWORD>)
driver.find_element_by_class_name(self.login_btn_className).click()
login = LoginPage()
login.user_logind('helloworld','123456')
| StarcoderdataPython |
150136 | from expt import run_expt
expt_param = {'training_frac': 2.0/3.0, 'num_trials': 5, 'verbosity': True, 'num_ticks': 6}
solver_param = {'eta_list': [0.01,0.1,1,10,100,1000], 'num_inner_iter': 10, 'num_outer_iter': 100}
data = raw_input("dataset (abalone/adult/compas/crimes/default/page-blocks): ")
loss = raw_input("loss function (err/hmean/qmean/fmeasure/microF1): ")
cons = raw_input("constraint function (cov/dp/kld/nae or unconstrained): ")
expt_param['is_protected'] = True if cons == 'dp' else False
if cons == 'unc' or cons == 'unconstrained':
run_expt(loss, '', data, expt_param, solver_param)
else:
eps = raw_input("eps: ")
run_expt(loss, cons, data, expt_param, solver_param, float(eps))
| StarcoderdataPython |
4872796 | <filename>src/Compiler.py<gh_stars>0
import os
import json
from pprint import pprint
arr = os.listdir('../lib')
combined = ""
for file in arr:
with open('../lib/' + file) as data_file:
data = json.load(data_file)
combined += json.dumps(data)
pprint(data)
writer = open('../chrome/frameworks.js', 'w')
writer.write('let dog = ' + combined + ';')
writer.close()
if __name__ == "__main__":
print("Here") | StarcoderdataPython |
1999790 | <filename>icom_flow_ctrl.py
#!/usr/bin/python
# -*- coding= utf-8 -*-
from icom_ctrl_msg_id import *
class flow_ctrl():
SYNC_MSG_TIMER_ID = 1
DEFAULT_SYNC_MSG_TIMER_LEN = 1
in_flow_ctrl_state = False
current_timer_len = 1
pftimer_func = None
timer_running = False
pfflowctrl_func = None
GUI_STATE_INIT = 30
GUI_STATE_FOCUS = 40
GUI_STATE_ZOOMED = 35
GUI_STATE_NORMAL = 30
GUI_STATE_ICON = 10
GUI_STATE_HIDE = 9
GUI_STATE_OTHER = 8
HOLD_DIVIDE = 4
def __init__(self,tid=None):
self.__gui_state = flow_ctrl.GUI_STATE_NORMAL
self.__hold_rate = flow_ctrl.GUI_STATE_NORMAL
self.__ctrl_min_rate = self.__hold_rate*(flow_ctrl.HOLD_DIVIDE+1)//flow_ctrl.HOLD_DIVIDE if self.__hold_rate > 16 else self.__hold_rate *2
self.__last_ctrl_time_slips = 10
self.__last_flow_ctrl = flow_ctrl.HOLD_DIVIDE
self.pingpong_thresthold = self.__hold_rate//flow_ctrl.HOLD_DIVIDE
self.current_msg_rate = 0
self.real_data_msg_rate = 0
self.history_msg_rate = 0
self.sync_msg_count = 0
self.__sync_msg_resize_count = 0
self.in_flow_ctrl_state = False
self.current_timer_len = flow_ctrl.DEFAULT_SYNC_MSG_TIMER_LEN
self.__speed_too_high_step = 100
self.__flow_ctrl_send = 0
self.__flow_ctrl_cnf = 0
self.__flow_ctrl_enable = False
self.__tid = tid
flow_ctrl.SYNC_MSG_TIMER_ID = ICOM_TIMER.ID_FLOW_CTRL
def set_gui_state(self,ui_state):
if ui_state > 0 and ui_state != self.__gui_state:
self.__gui_state = ui_state
self.__hold_rate = ui_state
self.__ctrl_min_rate = self.__hold_rate*(flow_ctrl.HOLD_DIVIDE+1)//flow_ctrl.HOLD_DIVIDE if self.__hold_rate > 16 else self.__hold_rate *2
self.pingpong_thresthold = self.__hold_rate//flow_ctrl.HOLD_DIVIDE
def set_enable(self, enable):
self.__flow_ctrl_enable = True if enable else False
def set_timer_req_function(self,pftimer_func):
self.pftimer_func = pftimer_func
def set_flow_ctrl_req_function(self,pfflowctrl_func):
self.pfflowctrl_func = pfflowctrl_func
def send_flow_ctrl_req(self,ctrl_msg_count):
if self.pfflowctrl_func and self.__flow_ctrl_enable is True:
self.__flow_ctrl_send += 1
self.pfflowctrl_func('FLOW-CTRL',ctrl_msg_count)
def start_flow_ctrl_timer(self):
if self.pftimer_func and self.__flow_ctrl_enable is True:
flow_ctrl.timer_running = True
self.pftimer_func('START-TIMER',flow_ctrl.SYNC_MSG_TIMER_ID,self.current_timer_len)
def modify_flow_ctrl_timer(self,timer_len):
if flow_ctrl.timer_running is True and self.pftimer_func:
self.current_timer_len = timer_len
self.pftimer_func('MODIFY-TIMER',flow_ctrl.SYNC_MSG_TIMER_ID,self.current_timer_len)
def stop_flow_ctrl_timer(self):
if self.pftimer_func and self.__flow_ctrl_enable is True:
flow_ctrl.timer_running = False
self.pftimer_func('STOP-TIMER',flow_ctrl.SYNC_MSG_TIMER_ID)
self.current_timer_len = flow_ctrl.DEFAULT_SYNC_MSG_TIMER_LEN
def start_ctrl(self,port_name):
if flow_ctrl.timer_running is False and self.__flow_ctrl_enable is True:
self.start_flow_ctrl_timer()
def stop_ctrl(self,port_name):
if 'PORTS-ALL-CLOSE' == port_name and self.__flow_ctrl_enable is True:
if flow_ctrl.timer_running is True:
self.stop_flow_ctrl_timer()
self.__flow_ctrl_cnf = self.__flow_ctrl_send = 0
def get_flow_ctrl_info(self):
return (self.in_flow_ctrl_state,self.current_msg_rate,self.real_data_msg_rate,self.sync_msg_count,self.current_timer_len if flow_ctrl.timer_running else 0)
def process_flow_ctrl(self,msg_int_type, msg_int_param1, msg_int_param2, msg_int_param3, msg_int_param4):
need_continue_process = True
if ICOM_CTRL_MSG.ID_PROC_DATA_MSG == msg_int_type:
self.sync_msg_count += 1
self.__sync_msg_resize_count += msg_int_param1
if self.__flow_ctrl_enable is False:
pass
elif flow_ctrl.timer_running is False and self.sync_msg_count >= self.__ctrl_min_rate * flow_ctrl.HOLD_DIVIDE:
self.start_flow_ctrl_timer()
self.sync_msg_count = 0
self.__sync_msg_resize_count = 0
elif self.in_flow_ctrl_state is True or self.sync_msg_count <= self.__ctrl_min_rate:
pass
elif self.current_timer_len >= 2:
self.modify_flow_ctrl_timer(1)
self.current_timer_len = 1
elif self.sync_msg_count > self.__hold_rate * 10:
new_flow_ctrl = self.sync_msg_count * self.__speed_too_high_step * flow_ctrl.HOLD_DIVIDE//self.__hold_rate
if new_flow_ctrl >= self.__last_flow_ctrl + self.__speed_too_high_step and self.__flow_ctrl_cnf >= self.__flow_ctrl_send:
self.send_flow_ctrl_req(new_flow_ctrl)
self.__last_flow_ctrl = new_flow_ctrl
if self.__speed_too_high_step < 10240:
self.__speed_too_high_step *= 2
if self.sync_msg_count % new_flow_ctrl != 0:
need_continue_process = False
print ('ID:%d,hispeed:%d,%d,%d,%s, %d,%d'%(self.__tid,self.__speed_too_high_step,self.sync_msg_count,new_flow_ctrl,need_continue_process,msg_int_param1,msg_int_param2))
elif ICOM_CTRL_MSG.ID_SEND_DATA_CNF_OK == msg_int_type:
self.sync_msg_count += 1
self.__sync_msg_resize_count += msg_int_param1
elif ICOM_CTRL_MSG.ID_FORCE_SYNC_MSG == msg_int_type:
self.sync_msg_count += 1
self.__sync_msg_resize_count += msg_int_param1
elif ICOM_CTRL_MSG.ID_FLOW_CTRL_CNF == msg_int_type:
self.__flow_ctrl_cnf += 1
self.in_flow_ctrl_state = True if msg_int_param1 > flow_ctrl.HOLD_DIVIDE else False
if self.__flow_ctrl_cnf >= self.__flow_ctrl_send >= 1000:
self.__flow_ctrl_cnf = self.__flow_ctrl_send = 0
elif ICOM_CTRL_MSG.ID_TIMER_TIMEOUT == msg_int_type:
if isinstance(msg_int_param2,int) and msg_int_param2 > 0:
self.current_timer_len = msg_int_param2
flow_ctrl.timer_running = True
else:#timer stoped
self.current_timer_len = flow_ctrl.DEFAULT_SYNC_MSG_TIMER_LEN
self.__last_flow_ctrl = flow_ctrl.HOLD_DIVIDE
self.send_flow_ctrl_req(0)
self.current_msg_rate = (self.sync_msg_count + self.current_timer_len//2) // self.current_timer_len
self.history_msg_rate = (self.history_msg_rate * 8 + self.current_msg_rate*2) // 10
self.real_data_msg_rate = (self.__sync_msg_resize_count//flow_ctrl.HOLD_DIVIDE + self.current_timer_len//2) // self.current_timer_len
self.__last_ctrl_time_slips += self.current_timer_len
need_continue_process = True if (self.sync_msg_count > 0 or self.history_msg_rate > 0) else False
if self.__flow_ctrl_enable is False:
pass
elif self.__last_ctrl_time_slips > 5 and self.__flow_ctrl_cnf >= self.__flow_ctrl_send:
do_flow_ctrl = False
new_flow_ctrl = self.__sync_msg_resize_count//(self.current_timer_len*self.__hold_rate)
#print ('ID:%d rate cur:%d his:%d rel:%d new:%d lst:%d'%(self.__tid, self.current_msg_rate,self.history_msg_rate,self.real_data_msg_rate,new_flow_ctrl,self.__last_flow_ctrl))
if new_flow_ctrl != self.__last_flow_ctrl:
if self.in_flow_ctrl_state is True:
if not (self.__hold_rate - self.pingpong_thresthold < self.current_msg_rate < self.__hold_rate + self.pingpong_thresthold):
do_flow_ctrl = True
elif self.current_msg_rate > (self.__ctrl_min_rate + self.pingpong_thresthold) and self.history_msg_rate >= self.__ctrl_min_rate:
do_flow_ctrl = True
if do_flow_ctrl is True:
print ('ID:%d rate cur:%d his:%d rel:%d cnt:%d t:%d h:%d,lctrl:%d,nctrl:%d(%s)'%(self.__tid,self.current_msg_rate,self.history_msg_rate,self.real_data_msg_rate,self.sync_msg_count,
self.current_timer_len,self.__hold_rate,self.__last_flow_ctrl,new_flow_ctrl,do_flow_ctrl))
self.__last_flow_ctrl = new_flow_ctrl
self.send_flow_ctrl_req(new_flow_ctrl)
self.__last_ctrl_time_slips = 0
if self.__flow_ctrl_enable is False or flow_ctrl.timer_running is False:
pass
elif self.current_msg_rate <= 2 and self.history_msg_rate <= 3:#add timer length
self.__speed_too_high_step = 100
#reset timer length
if self.current_timer_len < 8:
if self.sync_msg_count <= 1:
self.modify_flow_ctrl_timer(self.current_timer_len + self.current_timer_len)
elif self.current_timer_len >= 5:
self.modify_flow_ctrl_timer(self.current_timer_len + 2)
else:
self.modify_flow_ctrl_timer(self.current_timer_len + 1)
elif self.current_timer_len > 1:#minus timer length
if self.current_msg_rate > 8:
self.modify_flow_ctrl_timer((self.current_timer_len + 3) // 4)
else:
self.modify_flow_ctrl_timer((self.current_timer_len + 1) // 2)
self.sync_msg_count = 0
self.__sync_msg_resize_count = 0
else:
self.sync_msg_count += 1
return need_continue_process
| StarcoderdataPython |
6661432 | import random
import sys
import pkg_resources
import pytest
from req_compile.repos.repository import (
WheelVersionTags,
Candidate,
sort_candidates,
_wheel_candidate,
_impl_major_minor,
_py_version_score,
)
@pytest.mark.parametrize(
"sys_py_version, py_requires",
[
("3.5.0", None),
("2.6.10", None),
("3.6.3", ("py3",)),
("3.6.3", ("py2", "py3")),
("3.6.3", ()),
],
)
def test_version_compatible(mock_py_version, sys_py_version, py_requires):
mock_py_version(sys_py_version)
assert WheelVersionTags(py_requires).check_compatibility()
@pytest.mark.parametrize(
"sys_py_version, py_requires",
[
("3.6.3", ("py2",)),
("2.7.16", ("py3",)),
],
)
def test_version_incompatible(mock_py_version, sys_py_version, py_requires):
mock_py_version(sys_py_version)
assert not WheelVersionTags(py_requires).check_compatibility()
@pytest.mark.parametrize(
"py_requires, expected",
[
(("py2",), "py2"),
(("py2", "py3"), "py2.py3"),
(("py3", "py2"), "py2.py3"),
((), "any"),
(None, "any"),
],
)
def test_version_str(py_requires, expected):
assert str(WheelVersionTags(py_requires)) == expected
def test_sort_non_semver():
# This is the order that pip chooses
candidate_vers = (
"2019.3",
"2017.2",
"2015.6",
"2013.6",
"2013b0",
"2012rc0",
"2012b0",
"2009r",
"2013d",
"2011k",
)
candidates = []
for ver in candidate_vers:
candidates.append(
Candidate(
"pytz", None, pkg_resources.parse_version(ver), None, None, "any", None
)
)
reference = list(candidates)
random.shuffle(candidates)
candidates = sort_candidates(candidates)
assert reference == candidates
def test_sort_specific_platforms(mock_py_version, mocker):
mock_py_version("3.7.4")
mocker.patch(
"req_compile.repos.repository.PLATFORM_TAGS",
("this_platform",),
)
candidate_wheels = (
"sounddevice-0.4.1-cp32.cp33.cp34.cp35.cp36.cp37.cp38.cp39.pp32.pp33.pp34.pp35.pp36.pp37.py3-None-this_platform.whl",
"sounddevice-0.4.1-py3-None-any.whl",
)
candidates = []
for wheel in candidate_wheels:
candidates.append(_wheel_candidate("pypi", wheel))
reference = list(candidates)
candidates = sort_candidates(reversed(candidates))
assert reference == candidates
def test_sort_wheels_with_any(mock_py_version, mocker):
mock_py_version("3.7.4")
mocker.patch(
"req_compile.repos.repository.PLATFORM_TAGS",
("this_platform",),
)
candidate_wheels = (
"pyenchant-3.2.1-py3-None-this_platform.and_another.whl",
"pyenchant-3.2.1-py3-None-this_platform.whl",
"pyenchant-3.2.1-py3-None-any.whl",
"pyenchant-3.2.1-py3-None-unsupported_platform.whl",
"pyenchant-3.2.1-None-None-any.whl",
)
candidates = [_wheel_candidate("pypi", wheel) for wheel in candidate_wheels]
sorted_candidates = sort_candidates(reversed(candidates))
assert sorted_candidates == list(candidates)
def test_sort_manylinux():
candidate1 = Candidate(
"pytz",
None,
pkg_resources.parse_version("1.0"),
WheelVersionTags(["cp37"]),
"cp37m",
["manylinux_2_12_x86_64", "manylinux2010_x86_64"],
None,
)
candidate2 = Candidate(
"pytz",
None,
pkg_resources.parse_version("1.0"),
WheelVersionTags(["cp37"]),
"cp37m",
["manylinux_2_11_x86_64"],
None,
)
assert candidate1.sortkey > candidate2.sortkey
@pytest.mark.skipif(sys.platform != "darwin", reason="MacOS only test")
def test_sort_macos():
candidate1 = Candidate(
"pytz",
None,
pkg_resources.parse_version("1.0"),
WheelVersionTags(["cp37"]),
"cp37",
["macosx_10_9_x86_64"],
None,
)
candidate2 = Candidate(
"pytz",
None,
pkg_resources.parse_version("1.0"),
WheelVersionTags(["cp37"]),
"cp37",
["macosx_10_8_x86_64"],
None,
)
candidate3 = Candidate(
"pytz",
None,
pkg_resources.parse_version("1.0"),
WheelVersionTags(["cp36"]),
"cp36",
["macosx_10_9_x86_64"],
None,
)
assert candidate1.sortkey > candidate2.sortkey
assert candidate2.sortkey > candidate3.sortkey
@pytest.mark.parametrize(
["py_version", "results"],
[
("py3", ("py", 3, 0)),
("cp37", ("cp", 3, 7)),
("cp3x", ("cp", 3, 0)),
("", ("", 0, 0)),
],
)
def test_impl_major_minor(py_version, results):
"""Verify python version tags are decomposed correctly"""
assert _impl_major_minor(py_version) == results
def test_py_version_score():
"""Test some basic rules about py version sorting"""
score1 = _py_version_score("cp37")
score2 = _py_version_score("jy37")
score3 = _py_version_score("cp36")
score4 = _py_version_score("py3")
assert score1 > score2
assert score2 > score3
assert score3 > score4
| StarcoderdataPython |
5039002 | <reponame>dylanashley/catastrophic-forgetting
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from sklearn.model_selection import StratifiedKFold
import argparse
import numpy as np
import os
import sys
import tensorflow as tf
# parse args
parser = argparse.ArgumentParser(
description='This constructs masks to create folds in the mnist and fashion MNIST datasets.')
parser.add_argument(
'outfile',
type=str,
help='npy file to dump masks to; will terminate if file already exists')
parser.add_argument(
'dataset',
type=str,
choices=['mnist', 'fashion_mnist'],
help='dataset to use in experiments')
parser.add_argument(
'folds',
type=int,
help='number of folds to build masks for')
args = vars(parser.parse_args())
# check args
if os.path.isfile(args['outfile']):
warnings.warn('outfile already exists; terminating\n')
sys.exit(0)
SEED = 15626 # generated by RANDOM.ORG
# load dataset
if args['dataset'] == 'mnist':
(raw_x_train, raw_y_train), (raw_x_test, raw_y_test) = \
tf.keras.datasets.mnist.load_data()
else:
assert args['dataset'] == 'fashion_mnist'
(raw_x_train, raw_y_train), (raw_x_test, raw_y_test) = \
tf.keras.datasets.fashion_mnist.load_data()
raw_x_train, raw_x_test = raw_x_train / 255.0, raw_x_test / 255.0
# build mask for folds
skf = StratifiedKFold(n_splits=args['folds'], shuffle=True, random_state=SEED)
folds = [i for _, i in skf.split(raw_x_train, raw_y_train)]
# build mask for each digit in folds
masks = [list() for _ in range(len(folds) + 1)]
assert(min(raw_y_train) == 0)
for j in range(max(raw_y_train) + 1):
for i in range(len(folds)):
assert(len(raw_y_train.shape) == 1)
mask = np.zeros(len(raw_y_train), dtype=bool)
mask[folds[i]] += raw_y_train[folds[i]] == j
masks[i].append(mask)
masks[-1].append(raw_y_test == j)
masks = np.array(masks)
# print report
print('fold,number,count')
lines = list()
for j in range(max(raw_y_train) + 1):
for i in range(len(folds)):
lines.append('{},{},{}'.format(i, j, sum(masks[i, j])))
lines.append('holdout,{},{}'.format(j, sum(masks[-1, j])))
for line in sorted(lines):
print(line)
# save masks
np.save(args['outfile'], masks)
| StarcoderdataPython |
9713055 | <reponame>HoleCat/echarlosperros<gh_stars>0
# Copyright 2016 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import os
import tempfile
import uuid
from google.api_core.exceptions import NotFound
from google.cloud import storage
from google.cloud.storage import Blob
import pytest
import storage_download_encrypted_file
import storage_generate_encryption_key
import storage_object_csek_to_cmek
import storage_rotate_encryption_key
import storage_upload_encrypted_file
BUCKET = os.environ["CLOUD_STORAGE_BUCKET"]
KMS_KEY = os.environ["CLOUD_KMS_KEY"]
TEST_ENCRYPTION_KEY = "<KEY>
TEST_ENCRYPTION_KEY_DECODED = base64.b64decode(TEST_ENCRYPTION_KEY)
TEST_ENCRYPTION_KEY_2 = "<KEY>
TEST_ENCRYPTION_KEY_2_DECODED = base64.b64decode(TEST_ENCRYPTION_KEY_2)
def test_generate_encryption_key(capsys):
storage_generate_encryption_key.generate_encryption_key()
out, _ = capsys.readouterr()
encoded_key = out.split(":", 1).pop().strip()
key = base64.b64decode(encoded_key)
assert len(key) == 32, "Returned key should be 32 bytes"
def test_upload_encrypted_blob():
with tempfile.NamedTemporaryFile() as source_file:
source_file.write(b"test")
storage_upload_encrypted_file.upload_encrypted_blob(
BUCKET,
source_file.name,
"test_encrypted_upload_blob",
TEST_ENCRYPTION_KEY,
)
@pytest.fixture(scope="module")
def test_blob():
"""Provides a pre-existing blob in the test bucket."""
bucket = storage.Client().bucket(BUCKET)
blob_name = "test_blob_{}".format(uuid.uuid4().hex)
blob = Blob(
blob_name,
bucket,
encryption_key=TEST_ENCRYPTION_KEY_DECODED,
)
content = "Hello, is it me you're looking for?"
blob.upload_from_string(content)
yield blob.name, content
# To delete an encrypted blob, you have to provide the same key
# used for the blob. When you provide a wrong key, you'll get
# NotFound.
try:
# Clean up for the case that the rotation didn't occur.
blob.delete()
except NotFound as e:
# For the case that the rotation succeeded.
print("Ignoring 404, detail: {}".format(e))
blob = Blob(
blob_name,
bucket,
encryption_key=TEST_ENCRYPTION_KEY_2_DECODED
)
blob.delete()
def test_download_blob(test_blob):
test_blob_name, test_blob_content = test_blob
with tempfile.NamedTemporaryFile() as dest_file:
storage_download_encrypted_file.download_encrypted_blob(
BUCKET, test_blob_name, dest_file.name, TEST_ENCRYPTION_KEY
)
downloaded_content = dest_file.read().decode("utf-8")
assert downloaded_content == test_blob_content
def test_rotate_encryption_key(test_blob):
test_blob_name, test_blob_content = test_blob
storage_rotate_encryption_key.rotate_encryption_key(
BUCKET, test_blob_name, TEST_ENCRYPTION_KEY, TEST_ENCRYPTION_KEY_2
)
with tempfile.NamedTemporaryFile() as dest_file:
storage_download_encrypted_file.download_encrypted_blob(
BUCKET, test_blob_name, dest_file.name, TEST_ENCRYPTION_KEY_2
)
downloaded_content = dest_file.read().decode("utf-8")
assert downloaded_content == test_blob_content
def test_object_csek_to_cmek(test_blob):
test_blob_name, test_blob_content = test_blob
cmek_blob = storage_object_csek_to_cmek.object_csek_to_cmek(
BUCKET, test_blob_name, TEST_ENCRYPTION_KEY_2, KMS_KEY
)
assert cmek_blob.download_as_string(), test_blob_content
| StarcoderdataPython |
6510533 | <filename>pymatgen/analysis/structure_analyzer.py
#!/usr/bin/env python
"""
This module provides classes to perform topological analyses of structures.
"""
from __future__ import division
__author__ = "<NAME>, <NAME>"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
__date__ = "Sep 23, 2011"
import math
import numpy as np
import itertools
import collections
from pyhull.voronoi import VoronoiTess
class VoronoiCoordFinder(object):
"""
Uses a Voronoi algorithm to determine the coordination for each site in a
structure.
"""
"""Radius in Angstrom cutoff to look for coordinating atoms"""
default_cutoff = 10.0
def __init__(self, structure, target=None):
"""
Args:
structure:
Input structure
target:
A list of target species to determine coordination for.
"""
self._structure = structure
if target is None:
self._target = structure.composition.elements
else:
self._target = target
def get_voronoi_polyhedra(self, n):
"""
Gives a weighted polyhedra around a site. This uses the voronoi
construction with solid angle weights.
See ref: A Proposed Rigorous Definition of Coordination Number,
M. O'Keeffe, Acta Cryst. (1979). A35, 772-775
Args:
n:
site index
Returns:
A dictionary of sites sharing a common Voronoi facet with the site
n and their solid angle weights
"""
localtarget = self._target
center = self._structure[n]
neighbors = self._structure.get_sites_in_sphere(
center.coords, VoronoiCoordFinder.default_cutoff)
neighbors = [i[0] for i in sorted(neighbors, key=lambda s: s[1])]
qvoronoi_input = [s.coords for s in neighbors]
voro = VoronoiTess(qvoronoi_input)
all_vertices = voro.vertices
results = {}
for nn, vind in voro.ridges.items():
if 0 in nn:
if 0 in vind:
raise RuntimeError("This structure is pathological,"
" infinite vertex in the voronoi "
"construction")
facets = [all_vertices[i] for i in vind]
results[neighbors[nn[1]]] = solid_angle(center.coords, facets)
maxangle = max(results.values())
resultweighted = {}
for nn, angle in results.items():
if nn.specie in localtarget:
resultweighted[nn] = angle / maxangle
return resultweighted
def get_coordination_number(self, n):
"""
Returns the coordination number of site with index n.
Args:
n:
site index
"""
return sum(self.get_voronoi_polyhedra(n).values())
def get_coordinated_sites(self, n, tol=0, target=None):
"""
Returns the sites that are in the coordination radius of site with
index n.
Args:
n:
Site number.
tol:
Weight tolerance to determine if a particular pair is
considered a neighbor.
Target:
Target element
Returns:
Sites coordinating input site.
"""
coordinated_sites = []
for site, weight in self.get_voronoi_polyhedra(n).items():
if weight > tol and (target is None or site.specie == target):
coordinated_sites.append(site)
return coordinated_sites
class RelaxationAnalyzer(object):
"""
This class analyzes the relaxation in a calculation.
"""
def __init__(self, initial_structure, final_structure):
"""
Please note that the input and final structures should have the same
ordering of sites. This is typically the case for most computational
codes.
Args:
initial_structure:
Initial input structure to calculation.
final_structure:
Final output structure from calculation.
"""
if final_structure.formula != initial_structure.formula:
raise ValueError("Initial and final structures have different " +
"formulas!")
self.initial = initial_structure
self.final = final_structure
def get_percentage_volume_change(self):
"""
Returns the percentage volume change.
Returns:
Volume change in percentage, e.g., 0.055 implies a 5.5% increase.
"""
initial_vol = self.initial.lattice.volume
final_vol = self.final.lattice.volume
return final_vol / initial_vol - 1
def get_percentage_lattice_parameter_changes(self):
"""
Returns the percentage lattice parameter changes.
Returns:
A dict of the percentage change in lattice parameter, e.g.,
{'a': 0.012, 'b': 0.021, 'c': -0.031} implies a change of 1.2%,
2.1% and -3.1% in the a, b and c lattice parameters respectively.
"""
initial_latt = self.initial.lattice
final_latt = self.final.lattice
d = {l: getattr(final_latt, l) / getattr(initial_latt, l) - 1
for l in ["a", "b", "c"]}
return d
def get_percentage_bond_dist_changes(self, max_radius=3.0):
"""
Returns the percentage bond distance changes for each site up to a
maximum radius for nearest neighbors.
Args:
max_radius:
Maximum radius to search for nearest neighbors. This radius is
applied to the initial structure, not the final structure.
Returns:
Bond distance changes as a dict of dicts. E.g.,
{index1: {index2: 0.011, ...}}. For economy of representation, the
index1 is always less than index2, i.e., since bonding between
site1 and siten is the same as bonding between siten and site1,
there is no reason to duplicate the information or computation.
"""
data = collections.defaultdict(dict)
for inds in itertools.combinations(xrange(len(self.initial)), 2):
(i, j) = sorted(inds)
initial_dist = self.initial[i].distance(self.initial[j])
if initial_dist < max_radius:
final_dist = self.final[i].distance(self.final[j])
data[i][j] = final_dist / initial_dist - 1
return data
def solid_angle(center, coords):
"""
Helper method to calculate the solid angle of a set of coords from the
center.
Args:
center:
Center to measure solid angle from.
coords:
List of coords to determine solid angle.
Returns:
The solid angle.
"""
o = np.array(center)
r = [np.array(c) - o for c in coords]
r.append(r[0])
n = [np.cross(r[i + 1], r[i]) for i in range(len(r) - 1)]
n.append(np.cross(r[1], r[0]))
phi = sum([math.acos(-np.dot(n[i], n[i + 1])
/ (np.linalg.norm(n[i]) * np.linalg.norm(n[i + 1])))
for i in range(len(n) - 1)])
return phi + (3 - len(r)) * math.pi
def contains_peroxide(structure, relative_cutoff=1.2):
"""
Determines if a structure contains peroxide anions.
Args:
structure:
Input structure.
relative_cutoff:
The peroxide bond distance is 1.49 Angstrom. Relative_cutoff * 1.49
stipulates the maximum distance two O atoms must be to each other
to be considered a peroxide.
Returns:
Boolean indicating if structure contains a peroxide anion.
"""
max_dist = relative_cutoff * 1.49
o_sites = []
for site in structure:
syms = [sp.symbol for sp in site.species_and_occu.keys()]
if "O" in syms:
o_sites.append(site)
for i, j in itertools.combinations(o_sites, 2):
if i.distance(j) < max_dist:
return True
return False
| StarcoderdataPython |
3517216 | #!/usr/bin/env python
"""
Author: <NAME>
Purpose: A simple Flask app that manages vlan configuration on network device.
"""
from flask import abort, jsonify, make_response, request
from sqlalchemy.exc import IntegrityError
from database import db, app, Vlans, update_vlans_db
from configurator.provider import manage
manage_vlans = manage.ManageVlans()
@app.route('/')
def index():
return "Welcome to configurator!"
@app.route('/config/vlans/<int:vlan_id>', methods=['GET'])
def get_vlan(vlan_id):
vlan = Vlans.query.get(vlan_id)
if vlan:
config = [{"vlan_id": vlan.vlan_id, "name": vlan.name, "description": vlan.description}]
# TODO: Clarify if updating vlan config on device allowed
# by other means (like manually login), if not remove below
# edit on device.
# ensure the vlan config stored in database is also
# on the device. This is an idempotent call
result = manage_vlans.edit_vlans(config, action="replaced")
if result:
app.logger.info("updated vlan config on device from database")
else:
app.logger.info("vlan config on device same as that of database")
return jsonify(config)
else:
abort(404, f"vlan resource {vlan_id} not found")
@app.route("/config/vlans", methods=["GET"])
def get_vlans():
"""
This is a view function which responds to requests to get the vlan
details from database
"""
config = []
vlan_records = Vlans.query.all()
app.logger.info("fetched vlan record %s" % vlan_records)
if vlan_records:
for vlan_record in vlan_records:
config.append({"vlan_id": vlan_record.vlan_id, "name": vlan_record.name, "description": vlan_record.description})
# TODO: Clarify if updating vlan config on device allowed
# by other means (like manually login), if not remove below
# edit on device.
# ensure the vlan config stored in database is also
# on the device. This is an idempotent call
try:
result = manage_vlans.edit_vlans(config, action="overridden")
if result:
app.logger.info("updated vlans config on device from database")
else:
app.logger.info("vlans config on device same as that of database")
return jsonify(config)
except Exception as e:
abort(400, f"Failed to get vlan config from device with error\n{e}")
else:
return jsonify([])
@app.route('/config/vlans', methods=['POST'])
def create_vlans():
if not request.json or not isinstance(request.json, list):
abort(400, f'invalid json body {request.json}, json body should be of type list')
config = request.json
for vlan in config:
for key in vlan.keys():
if key not in ['name', 'vlan_id', 'description']:
abort(400, "invalid key '%s' in config dict %s" % (key, vlan))
if key == 'vlan_id' and not (0 < vlan[key] <= 1024):
abort(400, "invalid vlan_id value %s in config dict %s" % (vlan[key], vlan))
# update the vlan config in database
try:
update_vlans_db(config)
app.logger.info(f"Updated db with vlan config {config}")
except IntegrityError as e:
abort(400, f"Failed to update config {config} in db with error\n{e.orig}")
# update the vlan config on device.
# This is an idempotent call
try:
result = manage_vlans.edit_vlans(config, action="overridden")
if result:
app.logger.info("overriden vlan config on device")
else:
app.logger.info("vlan config same as post request body")
db.session.commit()
except Exception as e:
db.session.rollback()
app.logger.info(f"device post request failed, rollback database items {config}")
abort(400, "Failed to update config {config} on device with error\n%s" % str(e))
# finally:
# db.session.close()
return jsonify(config), 201
@app.route('/config/vlans/<int:vlan_id>', methods=['PUT'])
def update_task(vlan_id):
#pdb.set_trace()
if not request.json or not isinstance(request.json, dict):
abort(400, f'invalid json body {request.json}, json body should be of type dict')
config = request.json
name = config.get('name')
if not name:
abort(400, f"name key is required")
if not (0 < vlan_id <= 1024):
abort(400, "invalid vlan_id value %s in config dict %s" % (vlan_id, config))
if vlan_id != config.get('vlan_id'):
abort(400, "vlan_id in url %s should be same as that in body %s" % (vlan_id, config.get('vlan_id')))
update = True if Vlans.query.get(vlan_id) else False
# update the vlan config in database
try:
if update:
update_vlans_db([config], action='update')
app.logger.info(f"Updated db with vlan config {config}")
else:
update_vlans_db([config], action='add')
app.logger.info(f"Added vlan config to db{config}")
except IntegrityError as e:
abort(400, f"Failed to update config {config} in db with error\n{e.orig}")
# update the vlan config on device.
# This is an idempotent call
try:
result = manage_vlans.edit_vlans(config, action="replaced")
if result:
app.logger.info("replaced vlan config on device")
else:
app.logger.info("vlan config same as post request body")
db.session.commit()
except Exception as e:
db.session.rollback()
app.logger.info(f"device post request failed, rollback database items {config}")
abort(400, "Failed to update config {config} on device with error\n%s" % str(e))
# finally:
# db.session.close()
return jsonify(config), 201
@app.route('/config/vlans/<int:vlan_id>/<string:name>', methods=['DELETE'])
def delete_task(vlan_id, name):
if not (0 < vlan_id <= 1024):
abort(400, "invalid vlan_id value %s" % vlan_id)
delete = True if Vlans.query.get(vlan_id) else False
# update the vlan config in database
try:
if delete:
update_vlans_db([{'vlan_id': vlan_id}], action='delete')
app.logger.info(f"deleted vlan config with id {vlan_id} from db")
else:
app.logger.info(f"vlan_id {vlan_id} record do not exist in db")
abort(404, f"vlan_id {vlan_id} does not exist")
except IntegrityError as e:
abort(400, f"Failed to delete vlan_id {vlan_id} in db with error\n{e.orig}")
# update the vlan config on device.
# This is an idempotent call
try:
result = manage_vlans.edit_vlans([{'vlan_id': vlan_id, 'name': name}], action="deleted")
if result:
app.logger.info(f"deleted vlan_id {vlan_id} on device")
else:
app.logger.info("vlan_id {vlan_id} record do not exit in db")
db.session.commit()
except Exception as e:
db.session.rollback()
app.logger.info(f"device delete request failed, rollback database items")
abort(400, "Failed to delete vlan config on device with error\n%s" % str(e))
# finally:
# db.session.close()
return jsonify({'result': True})
@app.errorhandler(400)
def not_found(error):
return make_response(jsonify({'error': error.get_description()}), 400)
if __name__ == "__main__":
# Start Flask app. The "host" and "debug" options are both security
# concerns, but for testing, we ignore them with the "nosec comment"
app.run(
host="0.0.0.0", debug=True, use_reloader=False #nosec
)
| StarcoderdataPython |
6562224 | import os
import time
def acquire_lock(db_path: str):
lock_path = db_path + '.lock'
while True:
if not os.path.exists(lock_path):
open(lock_path, 'a').close()
break
else:
time.sleep(0.0001)
def release_lock(db_path: str):
lock_path = db_path + '.lock'
if os.path.exists(lock_path):
os.remove(lock_path)
| StarcoderdataPython |
12800143 | import click
from terran.face import face_detection
from terran.io import open_video, write_video
from terran.vis import vis_faces
@click.command(name='find-video')
@click.argument('video-path')
@click.argument('output-path')
@click.option('--threshold', type=float, default=0.5)
@click.option('--batch-size', default=32)
@click.option('--duration', '-d', default=None, type=int)
@click.option('--framerate', '-f', default=None, type=int)
@click.option('--start-time', '-ss', default=None, type=str)
def find_video(
video_path, output_path, threshold, batch_size, duration, framerate,
start_time
):
# Open video to search in.
video = open_video(
video_path,
batch_size=batch_size,
read_for=duration,
start_time=start_time,
framerate=framerate,
)
# Create the video writer, copying the format options such as framerate
# from `video`.
writer = write_video(output_path, copy_format_from=video)
# Iterate over batches of video frames.
with click.progressbar(video, length=len(video)) as bar:
for frames in bar:
faces_per_frame = face_detection(frames)
for frame, faces in zip(frames, faces_per_frame):
# If you don't call `vis_faces` directly, the rendering will be
# done in the writing thread, thus not blocking the main
# program while drawing.
writer.write_frame(vis_faces, frame, faces)
writer.close()
if __name__ == '__main__':
find_video()
| StarcoderdataPython |
1806121 | from rest_framework import viewsets, permissions
from .models import Article, Tag
from .serializers import ArticleSerializer, TagSerializer
class ArticleViewSet(viewsets.ModelViewSet):
serializer_class = ArticleSerializer
permission_classes = [permissions.IsAuthenticated]
def get_queryset(self):
return Article.objects.all()
class TagViewSet(viewsets.ModelViewSet):
serializer_class = TagSerializer
permission_classes = [permissions.IsAuthenticated]
def get_queryset(self):
return Tag.objects.all()
| StarcoderdataPython |
41593 | from usefull import read_db
def test_read_csv_db_simple():
'''
page msg parent choice end
1 1. Mi sembra che 0 False False
2 ...se ti trovassi 1 True False
'''
assert read_db('db_simple.csv')[0]['page'] == 1
assert read_db('db_simple.csv')[0]['msg'][-3:] == 'che'
assert read_db('db_simple.csv')[1]['msg'][:9] == '...se ti '
assert read_db('db_simple.csv')[0]['end'] == False
def test_read_real_db():
assert len(read_db('db.csv')) == 167
| StarcoderdataPython |
3365001 | <reponame>HugoYZ/panel.residentes.proyectos
# Generated by Django 3.0.2 on 2020-02-06 22:30
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('banco_proyectos', '0005_datosresidente_usuario'),
]
operations = [
migrations.RemoveField(
model_name='datosresidente',
name='usuario',
),
]
| StarcoderdataPython |
3468838 | """
lab7
"""
#3.1
i = 0
while i <=5:
if i != 3:
print(i)
i += 1
#3.2
i = 5
result = 1
while 0 < i <= 5:
result *= i
i -= 1
print(result)
#3.3
i = 1
result = 0
while 1 <= i <= 5:
result += i
i += 1
print(result)
#3.4
i = 3
result = 1
while 3 <= i <= 8:
result *= i
i += 1
print(result)
#3.5
i = 4
result = 1
while i <= 8:
result *= i
i += 1
print(result)
#3.6
num_list = [12, 32, 43, 35]
while num_list:
num_list.remove(num_list[0])
print(num_list) | StarcoderdataPython |
6550222 | # MIT License
# (C) Copyright 2021 Hewlett Packard Enterprise Development LP.
#
# customApplianceTags : Custom Appliance Tags
def get_custom_appliance_tags(
self,
ne_id: str,
cached: bool,
) -> dict:
"""Get user-defined appliance tags
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - customApplianceTags
- GET
- /customApplianceTags/{neId}?cached={cached}
:param ne_id: Network Primary Key (nePk) of existing appliance,
e.g. ``3.NE``
:type ne_id: str
:param cached: ``True`` retrieves last known value to Orchestrator,
``False`` retrieves values directly from Appliance
:type cached: bool
:return: Returns user-defined appliance tags
:rtype: dict
"""
return self._get("/customApplianceTags/{}?cached={}".format(ne_id, cached))
| StarcoderdataPython |
3489736 | from .analytics import cluster, cluster_spatial, ModelResults, predict_labels
from .dynamics import sequence, transition
from .incs import linc
__all__ = ['linc', 'sequence', 'transition', 'cluster', 'cluster_spatial']
| StarcoderdataPython |
3497548 | <reponame>patrickhart/jaxdl
"""Temperature functions"""
from typing import Tuple, Any
import functools
import jax
import numpy as np
from jaxdl.utils.commons import InfoDict, TrainState
@functools.partial(jax.jit)
def update_temperature(temperature_net: TrainState, entropy: float,
target_entropy: float) -> Tuple[TrainState, InfoDict]:
"""Updates the temperature (alpha) value
Args:
temperature_net (TrainState): Temperature network
entropy (float): Externally passed entropy value
target_entropy (float): Target entropy value
Returns:
Tuple[TrainState, InfoDict]: Updated network
"""
# temperature loss
def temperature_loss_fn(temperature_params):
temperature = temperature_net.apply_fn(temperature_params)
temperature_loss = temperature * (entropy - target_entropy).mean()
return temperature_loss, {
'temperature': temperature,
'temperature_loss': temperature_loss
}
loss_info, grads = jax.value_and_grad(temperature_loss_fn, has_aux=True)(
temperature_net.params)
new_temperature_net = temperature_net.apply_gradients(grads=grads)
return new_temperature_net, loss_info[1] | StarcoderdataPython |
8005631 | #%%
import tensorflow as tf
import tensorflow_hub as hub
import umap
from tqdm import tqdm
#%%
class Embedding:
def __init__(self, model) -> None:
self.model = model
@classmethod
def create_from_hub(cls,
model_path="https://tfhub.dev/google/imagenet/efficientnet_v2_imagenet1k_m/feature_vector/2",
width=480,
height=480,
depth=3
):
model = tf.keras.Sequential([
hub.KerasLayer(model_path, trainable=False)
])
model.build([None, width, height, depth])
return cls(model)
def embed(self, x):
try:
y = self.model(x)
except Exception as e:
print(e)
y = None
return y
def transform(self, images):
features = []
paths = []
for batch, ps in tqdm(images.data, total=images.data.cardinality().numpy()):
embedding = self.embed(batch)
# Add metadata
paths.append(ps)
features.append(embedding)
# Flatten
features = tf.concat(features, axis=0).numpy()
paths = tf.concat(paths, axis=0).numpy().tolist()
return features, paths
# %%
class Reduction:
def __init__(self) -> None:
pass
def reduce(self, features):
embedding = umap.UMAP().fit_transform(features)
| StarcoderdataPython |
9679209 | #!/usr/bin/python
import Skype4Py
import sys
import json
import os
def on_message(message, status):
if status == Skype4Py.cmsReceived:
json_string = json.dumps({
'user': message.Sender.Handle,
'message': message.Body,
'room': message.Chat.Name,
})
sys.stdout.write(json_string + '\n')
sys.stdout.flush()
if sys.platform.startswith('linux'):
s = Skype4Py.Skype(Transport=os.environ.get('HUBOT_SKYPE_TRANSPORT', 'x11'))
else:
s = Skype4Py.Skype()
s.OnMessageStatus = on_message
s.Attach()
while True:
line = sys.stdin.readline()
try:
decoded = json.loads(line)
c = s.Chat(decoded['room'])
c.SendMessage(decoded['message'])
except:
continue
| StarcoderdataPython |
3530694 | #!/usr/bin/env python3
"""
Merges several csv files (the first file serves as base)
Assumes that they have the same set of columns,
but the columns do not have to be in the same order
"""
import csv
import sys
def main():
if len(sys.argv) < 3:
print("Wrong number of arguments: specify at least two files to merge!")
exit(1)
# get header
with open(sys.argv[1], "r") as merge_to:
header = csv.DictReader(merge_to).fieldnames
# copy
with open(sys.argv[1], "a") as merge_to:
writer = csv.DictWriter(merge_to, fieldnames=header)
for i in range(2, len(sys.argv)):
with open(sys.argv[i], "r") as merge_from:
reader = csv.DictReader(merge_from)
for row in reader:
# print(row)
writer.writerow(row)
if __name__ == '__main__':
main()
| StarcoderdataPython |
306941 | from .constants import COLOR, ANNOTATIONS, TYPE
from .struct.hetnet import HetNet
from .struct.multihetnet import MultiHetNet
__all__ = ['HetNet', 'MultiHetNet', 'hgnc', 'mi', 'up']
__version__ = '0.1.0'
__title__ = 'hetnetana'
__description__ = 'A Python package for integrating data and performing topological footprint analysis'
__url__ = 'https://github.com/cthoyt/hetnetana'
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__license__ = 'All Rights Reserved.'
__copyright__ = 'Copyright (c) 2016-2018 <NAME>'
class ColorFormatter:
def __init__(self, fmt, color):
self.fmt = fmt
self.color = color
def __call__(self, i):
return self.fmt.format(i)
def __str__(self):
return self.color
hgnc = ColorFormatter('hgnc{}', 'g')
mi = ColorFormatter('MI{:07}', 'm')
up = ColorFormatter('UP{:04}', 'p')
snp = ColorFormatter('rs{:07}', 's')
| StarcoderdataPython |
12823077 | import struct
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from fido2.attestation import Attestation
from fido2.ctap2 import CTAP2, CredentialManagement
from fido2.hid import CTAPHID
from fido2.utils import hmac_sha256
from fido2.webauthn import PublicKeyCredentialCreationOptions, PublicKeyCredentialDescriptor
import fido2.ctap2.base
from solo import helpers
# Base class
# Currently some methods are implemented here since they are the same in both devices.
class SoloClient:
def __init__(
self,
):
self.origin = "https://example.org"
self.host = "example.org"
self.user_id = b"they"
self.do_reboot = True
def set_reboot(self, val):
"""option to reboot after programming"""
self.do_reboot = val
def reboot(
self,
):
pass
def find_device(self, dev=None, solo_serial=None):
pass
def get_current_hid_device(
self,
):
"""Return current device class for CTAPHID interface if available."""
pass
def get_current_fido_client(
self,
):
"""Return current fido2 client if available."""
pass
def send_data_hid(self, cmd, data):
if not isinstance(data, bytes):
data = struct.pack("%dB" % len(data), *[ord(x) for x in data])
with helpers.Timeout(1.0) as event:
return self.get_current_hid_device().call(cmd, data, event)
def bootloader_version(
self,
):
pass
def solo_version(
self,
):
pass
def get_rng(self, num=0):
pass
def wink(
self,
):
self.send_data_hid(CTAPHID.WINK, b"")
def ping(self, data="pong"):
return self.send_data_hid(CTAPHID.PING, data)
def reset(
self,
):
CTAP2(self.get_current_hid_device()).reset()
def change_pin(self, old_pin, new_pin):
client = self.get_current_fido_client()
client.client_pin.change_pin(old_pin, new_pin)
def set_pin(self, new_pin):
client = self.get_current_fido_client()
client.client_pin.set_pin(new_pin)
def make_credential(self, pin=None):
client = self.get_current_fido_client()
rp = {"id": self.host, "name": "example site"}
user = {"id": self.user_id, "name": "example user"}
challenge = b"Y2hhbGxlbmdl"
options = PublicKeyCredentialCreationOptions(
rp,
user,
challenge,
[{"type": "public-key", "alg": -8}, {"type": "public-key", "alg": -7}],
)
result = client.make_credential(options, pin=pin)
attest = result.attestation_object
data = result.client_data
try:
attest.verify(data.hash)
except AttributeError:
verifier = Attestation.for_type(attest.fmt)
verifier().verify(attest.att_statement, attest.auth_data, data.hash)
print("Register valid")
x5c = attest.att_statement["x5c"][0]
cert = x509.load_der_x509_certificate(x5c, default_backend())
return cert
def cred_mgmt(self, pin):
client = self.get_current_fido_client()
token = client.client_pin.get_pin_token(pin)
ctap2 = CTAP2(self.get_current_hid_device())
return CredentialManagement(ctap2, client.client_pin.protocol, token)
def enter_solo_bootloader(
self,
):
"""
If solo is configured as solo hacker or something similar,
this command will tell the token to boot directly to the bootloader
so it can be reprogrammed
"""
pass
def enter_bootloader_or_die(self):
pass
def is_solo_bootloader(
self,
):
"""For now, solo bootloader could be the NXP bootrom on Solo v2."""
pass
def program_kbd(self, cmd):
ctap2 = CTAP2(self.get_current_hid_device())
return ctap2.send_cbor(0x51, cmd)
def sign_hash(self, credential_id, dgst, pin, rp_id, trusted_comment=None):
ctap2 = CTAP2(self.get_current_hid_device())
client = self.get_current_fido_client()
pin_auth = None
if pin:
pin_token = client.client_pin.get_pin_token(pin)
pin_auth = hmac_<PASSWORD>56(pin_token, dgst)[:16]
return ctap2.send_cbor(
0x50,
fido2.ctap2.base.args(
dgst,
PublicKeyCredentialDescriptor("public-key", credential_id),
pin_auth,
trusted_comment,
rp_id
)
)
def program_file(self, name):
pass
| StarcoderdataPython |
1728533 | from typing import List
from pydantic import BaseModel
from aos_sw_api.globel_models import CollectionResult, MacAddress
class MacTableEntry(BaseModel):
mac_address: str
vlan_id: int
port_id: str
class MacTableEntryList(BaseModel):
collection_result: CollectionResult
mac_table_entry_element: List[MacTableEntry]
| StarcoderdataPython |
4894664 | from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.expected_conditions import presence_of_element_located
class Wait:
def __init__(self, driver, time=20):
self.__wait = WebDriverWait(driver, time)
self.__driver = driver
def get_by_xpath(self, xpath):
self.__wait.until(presence_of_element_located((By.XPATH, xpath)))
return self.__driver.find_element_by_xpath(xpath)
def get_multiple_by_xpath(self, xpath):
self.__wait.until(presence_of_element_located((By.XPATH, xpath)))
return self.__driver.find_elements_by_xpath(xpath)
| StarcoderdataPython |
4851703 | <reponame>Enkya/ims_beta
from flask import request, jsonify, g, url_for
from flask_restplus import abort, Resource, fields, Namespace, marshal_with
from flask_restplus import marshal
from sqlalchemy import desc
from app.models.company import Company
from app.models.employee import Employee
from app.models.uniqueId import UniqueId
from app.models.resource import ResourceMeta
from app.models.typeapproval import Typeapproval
from app.utils.utilities import auth
from instance.config import Config
from datetime import datetime
typeapproval_api = Namespace(
'typeapproval', description='A typeapproval creation namespace')
typeapproval_fields = typeapproval_api.model(
'Typeapproval',
{
'id': fields.Integer(),
'taUniqueId': fields.String(
required=True,
attribute='ta_unique_id.value'),
'equipmentCategory': fields.String(
required=False,
attribute='equipment_category'),
'equipmentModel': fields.String(
required=False,
attribute='equipment_model'),
'equipmentName': fields.String(
required=False,
attribute='equipment_name'),
'equipmentDesc': fields.String(
required=False,
attribute='equipment_desc'),
'statusApproved': fields.Boolean(attribute='status_approved'),
'applicableStandards': fields.String(
required=False, attribute='applicable_standards'),
'approvalRejectionDate': fields.DateTime(
required=False,
attribute='approval_rejection_date'),
'applicant': fields.String(required=False, attribute='applicant.name'),
'assessedBy': fields.String(
required=False,
attribute='assessed_by.contact_person.person.full_name'),
'report': fields.String(required=False, attribute='report.full_name'),
'taCertificate': fields.String(
required=False,
attribute='ta_certificate.full_name'),
'date_created': fields.DateTime(
required=False,
attribute='date_created'),
'date_modified': fields.DateTime(
required=False,
attribute='date_modified'),
}
)
@typeapproval_api.route('', endpoint='typeapproval')
class TypeapprovalEndPoint(Resource):
@typeapproval_api.response(
200,
'Successful Retrieval of Typeapproval records')
@typeapproval_api.response(200, 'No typeapproval records found')
def get(self):
''' Retrieve typeapproval records'''
search_term = request.args.get('q') or None
limit = request.args.get('limit') or Config.MAX_PAGE_SIZE
page_limit = 100 if int(limit) > 100 else int(limit)
page = request.args.get('page') or 1
if page_limit < 1 or page < 1:
return abort(400, 'Page or Limit cannot be negative values')
typeapproval = Typeapproval.query.filter_by(active=True).\
order_by(desc(Typeapproval.date_created))
if typeapproval.all():
typeapproval_records = typeapproval
if search_term:
typeapproval_records = typeapproval_data.filter(
Typeapproval.ta_unique_id.ilike('%'+search_term+'%')
)
typeapproval_paged = typeapproval_records.paginate(
page=page, per_page=page_limit, error_out=True
)
results = dict(data=marshal(
typeapproval_paged.items,
typeapproval_fields))
pages = {
'page': page,
'per_page': page_limit,
'total_data': typeapproval_paged.total,
'pages': typeapproval_paged.pages
}
if page == 1:
pages['prev_page'] = url_for('api.typeapproval') + \
'?limit={}'.format(page_limit)
if page > 1:
pages['prev_page'] = url_for('api.typeapproval') + \
'?limit={}&page={}'.format(page_limit, page-1)
if page < typeapproval_paged.pages:
pages['next_page'] = url_for('api.typeapproval') + \
'?limit={}&page={}'.format(page_limit, page+1)
results.update(pages)
return results, 200
return abort(404, message='No Typeapproval found for specified user')
@typeapproval_api.response(201, 'Typeapproval created successfully!')
@typeapproval_api.response(409, 'Typeapproval already exists!')
@typeapproval_api.response(500, 'Internal Server Error')
@typeapproval_api.doc(model='Typeapproval', body=typeapproval_fields)
def post(self):
''' Create a typeapproval resource'''
arguments = request.get_json(force=True)
ta_unique_id = arguments.get('taUniqueId').strip() or None
status_approved = arguments.get('statusApproved') or False
equipment_category = arguments.get('equipmentCategory').strip() or None
equipment_name = arguments.get('equipmentName').strip() or None
equipment_model = arguments.get('equipmentModel').strip() or None
equipment_desc = arguments.get('equipmentDesc').strip() or None
applicable_standards = arguments.get(
'applicableStandards').strip() or None
approval_rejection_date = arguments.get(
'approvalRejectionDate').strip() or None
approval_rejection_date = datetime.strptime(
approval_rejection_date, '%d-%m-%y'
)
ta_certificate_id = arguments.get('taCertificateID').strip() or None
assessed_by_id = arguments.get('assessedBy').strip() or None
applicant_id = int(arguments.get('applicant').strip()) or None
report_url = arguments.get('report').strip() or None
try:
report = ResourceMeta.query.filter_by(full_name=report_url).first()
if not report:
report = ResourceMeta(
version=1,
name=report_url.split('/')[-1],
location=report_url.split('/')[:-1])
if not applicant_id:
return abort(400, message='Applicant needed to process data')
applicant = Company.query.filter_by(
id=applicant_id,
active=True).first()
assessed_by = Employee.query.filter_by(id=assessed_by_id).first()
ta_certificate = ResourceMeta.query.filter_by(
id=ta_certificate_id).first()
typeapproval = Typeapproval(
ta_unique_id=ta_unique_id,
status_approved=status_approved,
equipment_category=equipment_category,
equipment_name=equipment_name,
equipment_model=equipment_model,
equipment_desc=equipment_desc,
applicable_standards=applicable_standards,
approval_rejection_date=approval_rejection_date,
assessed_by=assessed_by,
ta_certificate=ta_certificate,
applicant=applicant,
report=report
)
if typeapproval.save_typeapproval():
return {
'message': 'Typeapproval record created successfully!'
}, 201
return abort(409, message='Typeapproval already exists!')
except Exception as e:
abort(
400,
message='Failed to create new typeapproval -> {}'.format(e))
@typeapproval_api.route(
'/<int:typeapproval_id>',
endpoint='single_typeapproval')
class SingleTypeapprovalEndpoint(Resource):
@typeapproval_api.header('x-access-token', 'Access Token', required=True)
@marshal_with(typeapproval_fields)
@typeapproval_api.response(200, 'Successful retrieval of typeapproval')
@typeapproval_api.response(400, 'No typeapproval found with specified ID')
def get(self, typeapproval_id):
''' Retrieve individual typeapproval with given typeapproval_id '''
typeapproval = Typeapproval.query.filter_by(
id=typeapproval_id, active=True).first()
if typeapproval:
return typeapproval, 200
abort(404, message='No typeapproval found with specified ID')
@typeapproval_api.header('x-access-token', 'Access Token', required=True)
@typeapproval_api.response(200, 'Successfully Updated Typeapproval')
@typeapproval_api.response(
400,
'Typeapproval with id {} not found or not yours.')
@typeapproval_api.marshal_with(typeapproval_fields)
def put(self, typeapproval_id):
''' Update typeapproval with given typeapproval_id '''
arguments = request.get_json(force=True)
name = arguments.get('name').strip()
typeapproval = Typeapproval.query.filter_by(
id=typeapproval_id, active=True).first()
if typeapproval:
if name:
typeapproval.name = name
typeapproval.save()
return typeapproval, 200
else:
abort(
404,
message='Typeapproval with id {} not found'.format(
typeapproval_id))
@typeapproval_api.header('x-access-token', 'Access Token', required=True)
@auth.login_required
@typeapproval_api.response(
200, 'Typeapproval with id {} successfully deleted.')
@typeapproval_api.response(
400,
'Typeapproval with id {} not found or not yours.')
def delete(self, typeapproval_id):
''' Delete typeapproval with typeapproval_id as given '''
typeapproval = Typeapproval.query.filter_by(
id=typeapproval_id, active=True).first()
if typeapproval:
if typeapproval.delete_typeapproval():
response = {
'message': 'Typeapproval with id {} deleted.'.format(
typeapproval_id)
}
return response, 200
else:
abort(
404,
message='Typeapproval with id {} not found.'.format(
typeapproval_id)
)
| StarcoderdataPython |
1846489 | from django.contrib import admin
from django import forms
from django.forms.models import BaseInlineFormSet
from .models import Doctor, Patient, Order, OrderType, OrderStatus, OrderColor, OrderTypeEntry
# Register your models here.
"""
class OrderTypeEntryInline(admin.StackedInline):
model = OrderTypeEntry
# Using extra to display and exact amount of extra field forms for the OrderTypeEntry in the creation form of a model.
extra = 1
"""
class OrderTypeEntryInline(admin.TabularInline):
model = OrderTypeEntry
# Using extra to display and exact amount of extra field forms for the OrderTypeEntry in the creation form of a model.
extra = 1
fields = [
'order', 'color', 'type', 'status', 'unitCount', 'redo', 'paid', 'warranty'
]
class DoctorAdmin(admin.ModelAdmin):
fields = [
'firstName', 'lastName', 'cabinet', 'phone'
]
list_display = [
'id', 'fullName', 'firstName', 'lastName', 'cabinet', 'phone', 'createdBy', 'createdAt', 'updatedBy', 'updatedAt'
]
search_fields = [
'firstName', 'lastName', 'cabinet', 'phone', 'createdBy__email'
]
def save_model(self, request, obj, form, change):
obj.createdBy = request.user
obj.updatedBy = request.user
super().save_model(request, obj, form, change)
class PatientAdmin(admin.ModelAdmin):
fields = [
'firstName', 'lastName', 'phone', 'details'
]
list_display = [
'id', 'fullName', 'firstName', 'lastName', 'phone', 'details', 'createdBy', 'createdAt', 'updatedBy', 'updatedAt'
]
search_fields = [
'firstName', 'lastName', 'phone', 'createdBy__username'
]
def save_model(self, request, obj, form, change):
obj.createdBy = request.user
obj.updatedBy = request.user
super().save_model(request, obj, form, change)
class OrderAdmin(admin.ModelAdmin):
fields = [
'doctor', 'patient'
]
list_display = [
'id', 'doctor', 'patient', 'createdBy', 'createdAt', 'updatedBy', 'updatedAt'
]
search_fields = [
'type__type', 'doctor__firstName', 'doctor__lastName', 'patient__firstName', 'patient__lastName', 'createdBy__username'
]
inlines = [OrderTypeEntryInline]
def save_formset(self, request, form, formset, change):
instances = formset.save(commit=False)
pre_instance = form.save(commit=False)
for obj in formset.deleted_objects:
obj.delete()
for instance in instances:
if request.method == "POST":
if (change == False):
print("da")
instance.createdBy = request.user
instance.updatedBy = request.user
else:
print("nu")
instance.createdBy = request.user
instance.updatedBy = request.user
instance.save()
formset.save_m2m()
def getDoctor(self, obj):
return obj.fullName()
def getPatient(self, obj):
return obj.fullName()
def save_model(self, request, obj, form, change):
obj.createdBy = request.user
obj.updatedBy = request.user
super().save_model(request, obj, form, change)
class OrderTypeAdmin(admin.ModelAdmin):
fields = [
'type', 'ppu'
]
list_display = [
'id', 'type', 'getPricePerUnit', 'createdBy', 'createdAt', 'updatedBy', 'updatedAt'
]
search_fields = [
'type', 'ppu', 'createdBy__email'
]
def getOrderId(self, obj):
return obj.order.id
def getPricePerUnit(self, obj):
return obj.ppu
getPricePerUnit.admin_order_field = 'ppu'
getPricePerUnit.short_description = 'Price Per Unit'
def save_model(self, request, obj, form, change):
obj.createdBy = request.user
obj.updatedBy = request.user
super().save_model(request, obj, form, change)
class OrderStatusAdmin(admin.ModelAdmin):
fields = [
'status'
]
list_display = [
'id', 'status', 'createdBy', 'createdAt', 'updatedBy', 'updatedAt'
]
search_fields = [
'status', 'createdBy__email'
]
def save_model(self, request, obj, form, change):
obj.createdBy = request.user
obj.updatedBy = request.user
super().save_model(request, obj, form, change)
class OrderColorAdmin(admin.ModelAdmin):
fields = [
'color'
]
list_display = [
'id', 'color', 'createdBy', 'createdAt', 'updatedBy', 'updatedAt'
]
search_fields = [
'color', 'createdBy__email'
]
def save_model(self, request, obj, form, change):
obj.createdBy = request.user
obj.updatedBy = request.user
super().save_model(request, obj, form, change)
class OrderTypeEntryAdmin(admin.ModelAdmin):
fields = [
'order', 'color', 'type', 'status', 'unitCount', 'redo', 'paid', 'warranty'
]
list_display = [
'id', 'getOrderId', 'getOrderDoctor', 'getOrderPatient', 'color', 'type', 'status', 'unitCount', 'redo', 'paid', 'warranty', 'createdBy', 'createdAt', 'updatedBy', 'updatedAt'
]
search_fields = [
'order', 'type__type', 'order__doctor__firstName', 'order__doctor__lastName', 'order__patient__firstName', 'order__patient__lastName', 'status__status', 'createdBy__email'
]
def getOrderId(self, obj):
return obj.order.id
getOrderId.admin_order_field = 'order__id'
getOrderId.short_description = 'Order Id'
def getOrderDoctor(self, obj):
return obj.order.doctor
getOrderDoctor.admin_order_field = 'order__doctor'
getOrderDoctor.short_description = 'Doctor'
def getOrderPatient(self, obj):
return obj.order.patient
getOrderPatient.admin_order_field = 'order__patient'
getOrderPatient.short_description = 'Patient'
def save_model(self, request, obj, form, change):
obj.createdBy = request.user
obj.updatedBy = request.user
super().save_model(request, obj, form, change)
admin.site.register(Doctor, DoctorAdmin)
admin.site.register(Patient, PatientAdmin)
admin.site.register(Order, OrderAdmin)
admin.site.register(OrderType, OrderTypeAdmin)
admin.site.register(OrderStatus, OrderStatusAdmin)
admin.site.register(OrderColor, OrderColorAdmin)
admin.site.register(OrderTypeEntry, OrderTypeEntryAdmin)
| StarcoderdataPython |
8076463 | from enum import Enum, auto
class Types(Enum):
UNIT = auto()
BOOL = auto()
INT = auto()
SYMB = auto()
VOID = auto()
FUNC = auto()
class Sym:
def __init__(self, val):
self.val = val
def __str__(self):
return self.val
class Helper:
def type_from_value(self, value):
if value == '#u':
return Types.UNIT
elif value == '->':
return Types.FUNC
elif value is True or value is False:
return Types.BOOL
elif isinstance(value, int):
return Types.INT
elif isinstance(value, Sym):
return Types.SYMB
else:
raise ValueError('Could not get Type from:', value)
def type_from_name(self, name):
if name == 'unit':
return Types.UNIT
elif name == 'bool':
return Types.BOOL
elif name == 'int':
return Types.INT
elif name == 'symb':
return Types.SYMB
elif name == '->':
return Types.FUNC
else:
raise ValueError('No such Type:', name)
def normalise_types(self, args):
if args == []:
return [Types.VOID]
types = []
for arg in args:
if isinstance(arg, list):
types.append(self.normalise_types(arg))
else:
types.append(self.type_from_name(arg))
return types
def get_type(self, arg):
if arg == []:
return Types.VOID
if isinstance(arg, list):
return self.type_from_name(arg[1])
return self.type_from_value(arg)
def types_are_equal(self, foo, bar):
if isinstance(foo, list) != isinstance(bar, list):
return False
if not isinstance(foo, list):
return foo is bar
if len(foo) != len(bar):
return False
for x, y in zip(foo, bar):
if not self.types_are_equal(x, y):
return False
return True
class Primitive:
def __init__(self):
self.primitives_unary = ['not']
self.primitives_binary = ['+', '-', '*',
'/', '%', '=',
'!=', '<', '<=',
'>', '>=', 'sym=?',
'and', 'or',
'bool=?', ]
self.all_ops = self.primitives_binary + self.primitives_unary
def __call__(self, op, args):
argc = len(args)
# check for argc
if argc > 2 or argc < 1:
raise ValueError('wrong-number-of-args')
elif argc == 1 and op not in self.primitives_unary:
raise ValueError('wrong-number-of-args')
elif argc == 2 and op not in self.primitives_binary:
raise ValueError('wrong-number-of-args')
helper = Helper()
types = self.types(op)
foo = [helper.get_type(x) for x in args]
if not helper.types_are_equal(foo, types[0]):
raise ValueError('Prim', op, 'arguments are not valid')
return types
def raw_types(self, op):
if op in ['+', '-', '*', '/', '%']:
return ['->', ['int', 'int'], 'int']
elif op in ['<', '=', '>', '<=', '>=', '!=']:
return ['->', ['int', 'int'], 'bool']
elif op in ['or', 'and', 'bool=?']:
return ['->', ['bool', 'bool'], 'bool']
elif op == 'not':
return ['->', ['bool'], 'bool']
elif op == 'sym=?':
return ['->', ['symb', 'symb'], 'bool']
else:
raise ValueError('No type for prim op:', op)
def types(self, op):
helper = Helper()
_raw = self.raw_types(op)
args = []
for arg in _raw[1]:
args.append(helper.type_from_name(arg))
return_type = helper.type_from_name(_raw[-1])
return args, return_type
class TypeFlex:
def __init__(self):
self.prim_type = Primitive()
self.helper = Helper()
self.vars = {}
def validate(self, exps, types):
normalised_types = self.helper.normalise_types(types)
return self.check(exps, normalised_types)
def check(self, _exps, _types):
args_types = [self.helper.get_type(x) for x in _exps[0]]
for x, t in zip(_exps[0], args_types):
if x == []:
continue
self.vars[x[0]] = t
if args_types != _types[0]:
return False
if not isinstance(_exps[1], list):
return self.helper.type_from_value(_exps[1]) is _types[-1]
exps = _exps[1]
types = _types[1]
if exps[0] == 'abs':
if types[0] is not Types.FUNC:
return False
return self.check(exps[1:], types[1:])
elif exps[0] == 'sym':
return types is Types.SYMB
elif exps[0] == 'prim':
if not isinstance(types, list) or types[0] != Types.FUNC:
return False
types = types[1:]
op, args = exps[1], exps[2:]
prim_args_types, return_type = self.prim_type(op, args)
if not self.helper.types_are_equal(prim_args_types, types[0]):
return False
return return_type is types[-1]
elif exps[0] in self.prim_type.all_ops:
_exps[1].insert(0, 'prim')
return self.check(_exps, _types)
raise ValueError('Uh, oh', exps, types)
def type_check(_exp, _type):
if _exp[0] != 'flexk' or _type[0] != '=>':
return False
type_flex = TypeFlex()
try:
return type_flex.validate(_exp[1:], _type[1:])
except ValueError as e:
print(str(e))
return False
| StarcoderdataPython |
4849284 | <reponame>CharaD7/azure-sdk-for-python<filename>unreleased/azure-mgmt-machinelearning/azure/mgmt/machinelearning/models/module_asset_parameter.py
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ModuleAssetParameter(Model):
"""Parameter definition for a module asset.
:param name: Parameter name.
:type name: str
:param parameter_type: Parameter type.
:type parameter_type: str
:param mode_values_info: Definitions for nested interface parameters if
this is a complex module parameter.
:type mode_values_info: dict
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'parameter_type': {'key': 'parameterType', 'type': 'str'},
'mode_values_info': {'key': 'modeValuesInfo', 'type': '{ModeValueInfo}'},
}
def __init__(self, name=None, parameter_type=None, mode_values_info=None):
self.name = name
self.parameter_type = parameter_type
self.mode_values_info = mode_values_info
| StarcoderdataPython |
3584167 |
'''
Dual output E3648A 0-8V / 0-20V 2.5A
http://cp.literature.agilent.com/litweb/pdf/E3646-90001.pdf
'''
import time
import sys
import datetime
import serial
class Timeout(Exception):
pass
def now():
return datetime.datetime.utcnow().isoformat()
def dbg(s):
if 0:
print 'GPIO %s: %s' % (now(), s)
'''
*********************************
Serial
*********************************
Just send commands verbatim
'''
class PUSerial:
def __init__(self, port="/dev/ttyUSB0", baudrate=9600, timeout=0, verbose=False):
self.port = port
self.verbose = verbose
self.ser = serial.Serial(port,
baudrate=baudrate,
bytesize=serial.EIGHTBITS,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
rtscts=False,
dsrdtr=False,
xonxoff=False,
timeout=3,
writeTimeout=0)
self.ser.flushInput()
self.ser.flushOutput()
def interface(self):
return "RS232"
def send_str(self, s):
if self.verbose:
print 'DBG: sending "%s"' % (s)
s += "\n"
self.ser.write(s)
self.ser.flush()
def recv_str(self):
s = self.ser.readline()
s = s.rstrip()
if self.verbose:
print 'DBG: received "%s"' % (s)
return s
def sendrecv_str(self, s):
if self.verbose:
print 'DBG: sending "%s"' % (s)
# send without sleep
self.ser.write(s + '\n')
self.ser.flush()
# wait for response line
s = self.ser.readline()
s = s.rstrip()
if self.verbose:
print 'DBG: received "%s"' % (s)
return s
def version(self):
return 'N/A'
'''
outp: 1 or 2
Device tracks which is currently enabled
By default commands act on the last selected output
Option argument to per-output commands can switch output if not already selected
'''
class E36:
def __init__(self, io, verbose=False):
self.verbose = verbose
self.vendor = None
self.model = None
# Active rail for commands, unknown at init
self.outp = None
self.io = io
# Make sure simple queries work
if not self.version():
raise Exception("Failed init %s" % (io.interface()))
'''
*********************************8
MISC
*********************************8
'''
def version(self):
return self.io.sendrecv_str("SYSTEM:VERSION?")
def ident(self):
# just vendor, model
return self.ident_ex()[0:2]
def ident_ex(self):
'''
PS ident: ['HEWLETT-PACKARD', 'E3632A', '0', '1.1-5.0-1.0']
'''
ret = self.io.sendrecv_str("*IDN?").split(',')
self.vendor = ret[0]
self.model = ret[1]
sn = ret[2]
fw = ret[3]
return (self.vendor, self.model, sn, fw)
def remote(self):
'''Put into remote mode? Required before running any commands'''
self.io.send_str("SYSTEM:REMOTE")
def local(self):
'''Put into local mode? Evidently displays better'''
#self.io.send_str("SYSTEM:LOCAL") # to make display updates in real time
# for some reason you need to issue the GPIB instead of the device local command
self.io.local()
def off(self, tsleep=0.2):
'''Turn off both outputs'''
self.io.send_str("OUTPUT OFF")
# Copied from on. Needed?
time.sleep(tsleep)
def on(self, tsleep=0.2):
'''Turn on both outputs'''
self.io.send_str("OUTPUT ON")
# 0.1 causes error, 0.15 fine
time.sleep(tsleep)
# .15 worked + some margin
def set_outp(self, outp, tsleep=0.25):
'''Force selecting given rail'''
if not outp in (1, 2):
raise Exception('Bad outp %s' % (outp,))
# FIXME: hack
if self.model == 'E3632A':
return
self.io.send_str("INSTRUMENT:SELECT OUTP%d" % outp)
self.outp = outp
time.sleep(tsleep)
def disp_vi(self, outp=None):
'''display actual currents on front panel'''
# FIXME: hack
if self.model == 'E3632A':
return
if outp is not None and outp != self.outp:
self.set_outp(outp)
self.io.send_str("DISP:MODE VI")
def wait_ready(self):
'''
Generally, it is best to use the "Operation Complete" bit (bit
0) in the Standard Event register to signal when a command
sequence is completed. This bit is set in the register after an
*OPC command has been executed. If you send *OPC after a
command which loads a message in the power supply's
output buffer (query data), you can use the "Operation
Complete" bit to determine when the message is available.
However, if too many messages are generated before the
*OPC command executes (sequentially), the output buffer
will overload and the power supply will stop processing
commands.
'''
while True:
print "sending *OPC?"
self.io.send_str("*OPC?\012")
self.ser.flush()
rx = self.ser.readline(100).rstrip()
print "got ",rx
if(rx == "1"):
break
def apply(self, voltage, current, outp=None):
'''Set both voltage and current at once?'''
if outp is not None and outp != self.outp:
self.set_outp(outp)
self.io.send_str("APPL %s,%s" % (voltage, current))
'''
Errors are retrieved in the first- in- first- out (FIFO) order.
The first error returned is the first error that was stored.
Errors are cleared as you read them. When you have read all
errors from the queue, the ERROR annunciator turns off and
the errors are cleared. The power supply beeps once each
time an error is generated.
If more than 20 errors have occurred, the last error stored
in the queue (the most recent error) is replaced with
- 350, "Queue overflow". No additional errors are stored until
you remove errors from the queue. If no errors have
occurred when you read the error queue, the power supply
responds with +0, "No error" over the remote interface or NO
ERRORS from the front panel.
The error queue is cleared by the *CLS (clear status)
command or when power is cycled. The errors are also
cleared when you read the queue.
The *RST (reset) command does not clear the error queue.
'''
def beep(self):
'''Call this to annoying your labmates'''
self.io.send_str("SYSTEM:BEEPER")
def text(self, s):
'''Call this to put creepy messages directly on the display'''
if len(s) > 11:
raise Exception('string too long')
self.io.send_str("DISPLAY:TEXT \"%s\"" % (s,))
def text_clr(self):
self.io.send_str("DISPlay:TEXT:CLEar")
def rst(self, tsleep=1.0):
'''Reset the device except for errors'''
self.io.send_str("*RST")
# Device locks up for a bit
time.sleep(tsleep)
def clr(self):
'''Clear error queue'''
self.io.send_str("*CLS")
def get_err(self):
'''Get next error from queue'''
return self.io.sendrecv_str("SYST:ERR?")
'''
*********************************8
CURRENT
*********************************8
'''
def curr(self, outp=None):
'''Get current reading'''
return float(self.io.sendrecv_str("MEAS:CURR?"))
def curr_max(self, outp=None):
'''Get current setpoint as set by set_curr'''
return float(self.io.sendrecv_str("CURR?"))
def set_curr(self, current, outp=None):
'''Set current limit on given output'''
if outp is not None and outp != self.outp:
self.set_outp(outp)
self.io.send_str("CURR %3.3f" % current)
'''
*********************************8
VOLTAGE
*********************************8
'''
# 0.185 s over serial
def volt(self, outp=None):
'''Get voltage reading'''
if outp is not None and outp != self.outp:
self.set_outp(outp)
return float(self.io.sendrecv_str("MEAS:VOLT?"))
def volt_max(self, outp=None):
'''Get voltage setpoint'''
if outp is not None and outp != self.outp:
self.set_outp(outp)
return float(self.io.sendrecv_str("VOLT?"))
def set_volt(self, volt, outp=None):
'''Set voltage limit on given output'''
if outp is not None and outp != self.outp:
self.set_outp(outp)
self.io.send_str("VOLT %3.3f" % (volt,))
def set_ovp(self, volt, outp=None):
'''Set over voltage protection limit on given output'''
if outp is not None and outp != self.outp:
self.set_outp(outp)
self.io.send_str("VOLTAGE:PROT %3.3f" % (volt,))
def ovp_enb(self, outp=None):
'''Enable over voltage protection'''
if outp is not None and outp != self.outp:
self.set_outp(outp)
self.io.send_str("VOLTAGE:PROT:STATE ON")
def ovp_dis(self, outp=None):
'''Disable over voltage protection'''
if outp is not None and outp != self.outp:
self.set_outp(outp)
self.io.send_str("VOLTAGE:PROT:STATE OFF")
def ovp_clr(self, outp=None):
'''Clear voltage protect fault?'''
if outp is not None and outp != self.outp:
self.set_outp(outp)
self.io.send_str("VOLTAGE:PROT:CLEAR")
def print_errors(ps):
print 'Errors:'
errors = []
while True:
s = ps.get_err()
if s == '+0,"No error"':
break
errors.append(s)
if errors:
for error in errors:
print ' %s' % error
else:
print ' None'
| StarcoderdataPython |
4887375 | <reponame>antonsuba/ids-test-environment-sdn<filename>test_cases/__init__.py
#!/usr/bin/python
from os.path import dirname, basename, isfile
import glob
EXCLUDE = ['__init__.py', 'test_case.py']
MODULES = glob.glob(dirname(__file__) + '/*.py')
__all__ = [basename(f)[:-3] for f in MODULES if isfile(f) and basename(f) not in EXCLUDE]
| StarcoderdataPython |
1911844 | # <NAME>
# CSC 110
# Fall 2015
def getChoice():
# Displays all of the possible functions to the user
# and asks the user for their choice. This is done until the
# correct input is entered in by the user (i.e. an integer
# beteen 1 - 8)
print("")
print(" Please specify your search criteria so we may assist you")
print(" Enter a number that corresponds with one of the choices below:")
print(" ---------------------------------------------------------")
print(" 1 - Search based on the name of the AIRLINE")
print(" 2 - Search based on the CHEAPEST FLIGHT available")
print(" 3 - Search based on a PRICE RANGE")
print(" 4 - Search based on the SHORTEST FLIGHT available")
print(" 5 - Search based on a DEPARTURE TIME RANGE")
print(" 6 - Search based on the AVERAGE PRICE for the")
print(" desired airline")
print(" 7 - Search based on the name of the AIRLINE and")
print(" the PRICE or DURATION")
print(" ---------------------------------------------------------")
print(" If you would like to QUIT >> Enter 8")
choice = int(input(" Enter your selection: "))
print("")
# an integer 1 - 8 to represent the choices for the user
return choice
def readFile():
# Reads the selected file and strips the characters from the first line
# then using a loop each line is stripped and stored in their designated
# lists. The lines are split by te ',' and the prices are casted as
# integers.
airlines = []
flightNumbers = []
priceList = []
depTimeList = []
arrTimeList = []
## Get local variable for the file name used to open the file
file = "airlines.txt"
infile = open(file,'r')
# Loop through the file until each line is read and stored
for line in infile:
## Strips the \n character from the end of the line each
## time the loop is iterated
line.strip()
## separates the data in the current line in the file
## based on the placement of the comma
airline, flightNumber, depTime, arrTime, price = line.split(",")
## updates the lists by adding the split data
## to each of the designated lists
airlines = airlines + [airline]
flightNumbers = flightNumbers + [flightNumber]
depTimeList = depTimeList + [depTime]
arrTimeList = arrTimeList + [arrTime]
priceList = priceList + [(price)]
infile.close()
return airlines, flightNumbers, depTimeList, arrTimeList, priceList
def findAirline(name,airlines, flightNumbers, depList, arrList, prices):
# The user will be prompted to enter the name of the airline of
# their choosing until a valid entry is entered. The program will
# then display all of the flights on the desired airline.
while name not in airlines:
print(" ")
print("Not a valid airline name time. Please try again.")
name = input("Enter the name of the airline service that you would like to use: ")
savedIndex = 0
indexes = []
count = 0
## Searches for the name in the list of airlines
for i in range(len(airlines)):
if name == airlines[i]:
## Saves the index where the name is a match
savedIndex = i
## Count is incremented each time a match is found
count += 1
## The index is added to the list of indices
indexes.append(savedIndex)
print(" ")
print(" -- Rhode Island Flight Finder AIRLINE SEARCH -- ")
print(" ")
## Prints a table for each index where the name
## entered matches a name in the airline list
if len(indexes) > 0:
if count > 1:
print(" There are",count," flights found for", name)
else:
print(" There is 1 flight for", name)
print(" ")
## Prints the table using the list of the saved indices
## from the airlines list and prints all of the
## corresponding information to that flight
printTable(indexes,airlines, flightNumbers, depList, arrList, prices)
## Otherwise the user is informed that the name is not in the list
else:
print(" Sorry, there were 0 flights found.")
print(" Try another search or check the spelling.")
return
def findCheapestFlight(airlines,flightNumbers,prices,priceList):
# The flight with the lowest cost will be found and displayed
# back to the user.
lowPrice = prices[0]
lowIndex = 0
## Searches element by element in the list and stores the
## index of the cheapest flight
for i in range(len(prices)):
if lowPrice > prices[i]:
lowIndex = i
print(" -- Rhode Island Flight Finder CHEAPEST PRICE SEARCH -- ")
print(" ")
## Displays all of the corresponding flight info using the savend index
## of the lowest price
print(" ",airlines[lowIndex]," Flight Number",flightNumbers[lowIndex]," at",priceList[lowIndex])
return lowIndex
def findPrice(priceThreshold,airlines,flightNumbers,depList,arrList,prices,priceList):
# User is prompted to enter the maximum price desired.
# Based on the maximum price that is entered the function
# will find all flights with price that is lower than the price entered.
# If no such price exists, the user will be prompted for another entry.
# Otherwise a table of flights that are lower than the desired price
# will be displayed.
savedIndex = 0
indexes = []
count = 0
lowIndex = lowestPrice(prices)
## Prompts the user for the a new price if the price returns
## nothing or 0
while priceThreshold < prices[lowIndex]:
print(" ")
print(" Sorry, there were 0 flights found.")
print(" The price you entered was too low. Try increasing your price.")
print(" ")
priceThreshold = int(input("Enter the maximum price of the flight you would like to take: "))
## Searches for the prices lower then the threshold in the list of prices
for i in range(len(airlines)):
if priceThreshold >= prices[i]:
## Saves the index where the price is less than or equal
# to the threshold
savedIndex = i
## Count is incremented each time a there is a price
## less than or equal to the threshold
count += 1
## The index is added to the list of indices
indexes.append(savedIndex)
print(" ")
print(" -- Rhode Island Flight Finder PRICE SEARCH -- ")
print(" ")
## Prints a table for each index where the name
## entered matches a name in the airline list
if len(indexes) > 0:
if count > 1:
print(" There are",count,"flights found at or below",priceThreshold,"USD")
else:
print(" There is 1 flight at or below",priceThreshold,"USD")
print(" ")
## Prints the table using the list of the saved indices
## from the airlines list and prints all of the
## corresponding information to that flight
printTable(indexes,airlines,flightNumbers,depList,arrList,priceList)
return
def findShortestFlight(airlines,flightNumbers,depList,arrList):
# The flight with the shortest time between the departure and the arrival
# times will be found. This flight will then be displayed to the user.
durations = []
for i in range(len(depList)):
duration = flightDuration(depList[i],arrList[i])
durations = durations + [duration]
shortestFlight = durations[0]
shortIndex = 0
## Searches element by element in the list and stores the
## index of the shortest flight
if (len(durations) > 0):
for i in range(len(durations)):
if durations[i]<shortestFlight:
shortestFlight = durations[i]
shortIndex = i
print(" -- Rhode Island Flight Finder SHORTEST FLIGHT SEARCH -- ")
print(" ")
## Displays all of the corresponding flight info using the savend index
## of the lowest price
print(" ",airlines[shortIndex]," Flight Number",flightNumbers[shortIndex])
print(" departing at",depList[shortIndex],"and arriving at",arrList[shortIndex])
print(" Total Flight Duration:",durations[shortIndex],"minutes")
return
def findDepartureTime(depMin,depMax,depTimes,airlines,flightNumbers,priceList,depList,arrList):
# The user is asked for a minimum and maximum departure time.
# Then the flight that is at least the minimun departure time and
# and at the most the maximum departure time will be searched for.
# All of the flights within this range will be displayed in a table.
# If there is no such flight within this range then the user will
# be prompted to raise their minimum.
## Variables for the index, a list of indices, the count, and the mimimum departure time
savedIndex = 0
indexes = []
count = 0
minTime = depTimes[0]
## Gets the hours and minutes from the departure and arrival lists
## and converts them to intergers (one for the hours and one for the minutes)
depHour,depMinute = getFlightTime(depMin)
minimum = (depHour * 60) + depMinute
depHour,depMinute = getFlightTime(depMax)
maximum = (depHour * 60) + depMinute
## Searches for the prices lower then the threshold in the list of prices
for i in range(len(depTimes)):
if depTimes[i] >= minimum and depTimes[i] <= maximum:
## Saves the index where the price is less than or equal
## to the threshold
savedIndex = i
## Count is incremented each time a there is a price
## less than or equal to the threshold
count += 1
## The index is added to the list of indices
indexes.append(savedIndex)
## Keeps track of the minum
if depTimes[i] < minTime:
minTime = depTimes[i]
earliest = depList[i]
print(" ")
print(" -- Rhode Island Flight Finder SEARCH BY DEPARTURE -- ")
print(" ")
## Prints a table for each index where the name
## entered matches a name in the airline list
if len(indexes) > 0:
if count > 1:
print(" There are",count,"flights found between",depMin,"and",depMax)
else:
print(" There is 1 flight found between",depMin,"and",depMax)
print(" ")
## Prints the table using the list of the saved indices
## from the airlines list and prints all of the
## corresponding information to that flight
printTable(indexes,airlines,flightNumbers,depList,arrList,priceList)
## Otherwise the user is informed that the name is not in the list
else:
print(" Sorry, there are 0 flights that depart at",depMin)
print(" Please increase your mimimum departure time")
print(" Earliest Departure from PVD >> MCO",earliest)
return
def findAveragePrice(name,airlines,prices):
# The user is prompted for the name of the airlines. Based on their input
# the average flight price for the desired airline will be found.
# If the airline entered is invalid, the user will be prompted until
# a valid entry in received.
## Prompts the user for the re entry if the name of the airline
## is not found in the list of airlines
while name not in airlines:
print(" ")
print("Not a valid airline name time. Please try again.")
name = input("Ente the name of the airline service that you would like to use: ")
priceSum = 0
count = 0
## Searches for the name in the list of airlines
for i in range(len(airlines)):
if name == airlines[i]:
## If the airline is found at the index i, then
## the price corresponding to that flight is added
## to a sum
priceSum += prices[i]
count += 1
## The average is found for the sums
averagePrice = priceSum/count
print(" ")
print(" -- Rhode Island Flight Finder SEARCH BY AVERAGE PRICE -- ")
print(" ")
print(" The average price for",name,"is",averagePrice,"USD.")
return
def findAirlineSort(name,airlines,flightNumbers,depTimes,arrTimes,prices,depList,arrList,priceList):
# The user will be prompted to enter the name of the airline of
# their choosing until a valid entry is entered. The user is shown
# the flights for the airline and they are prompted again whether
# they would like to sort the flights by price or duration.
# If the entry is invalid, the user will be prompted until they
# enter in a correct choice for price or duration. The program will
# then display all of the flights on the desired airline sorted based
# on the choice of the user.
while name not in airlines:
print(" ")
print("Not a valid airline name time. Please try again.")
name = input("Enter the name of the airline service that you would like to use: ")
savedIndex = 0
savedIndexes = []
count = 0
airlineList = []
sortedPrices = []
durations = []
#durationList = []
## Searches for the name in the list of airlines
for i in range(len(airlines)):
if name == airlines[i]:
## Saves the index where the name is a match
savedIndex = i
## Count is incremented each time a match is found
count += 1
## The index is added to the list of indices
savedIndexes.append(savedIndex)
## Asks the user to choose how they would like to sort the flights
sort = int(input("Enter the 1 if you would like to sort by PRICE and 2 for FLIGHT DURATION:"))
print(" ")
print(" -- Rhode Island Flight Finder AIRLINE SEARCH -- ")
print(" ")
## Sorts the lists and prints out the table based on the user input
if len(savedIndexes) > 0:
if sort == 1:
if count > 1:
print(" There are",count," flights found for", name)
else:
print(" There is 1 flight for", name)
print(" ")
## Prints the table using the list of the saved indices
## from the airlines list and prints all of the
## corresponding information to that flight
printTable(savedIndexes,airlines, flightNumbers, depList, arrList, priceList)
elif sort == 2:
## creates a list of durations based off of the saved indexes
for k in range(len(savedIndexes)):
duration = flightDuration(depList[savedIndexes[k]],arrList[savedIndexes[k]])
durations = durations + [duration]
sortedDurations,savedIndexes = insertionSort(durations,savedIndexes)
if count > 1:
print(" There are",count," flights found for", name)
else:
print(" There is 1 flight for", name)
print(" ")
## Prints the table using the list of the saved indices
## from the airlines list and prints all of the
## corresponding information to that flight
printTable(savedIndexes,airlines, flightNumbers, depList, arrList, priceList)
else:
print("Not a valid option, please try again.")
sort = int(input("Enter the 1 if you would like to sort by PRICE and 2 for FLIGHT DURATION:"))
else:
print(" Sorry, there were 0 flights found.")
print(" Try another search or check the spelling.")
return
def convertPrices(priceList):
# Converts a list of string into integers to represent
# the prices for each of the flights
prices = []
for i in range(len(priceList)):
price = priceList[i]
price = price[1:]
prices = prices + [int(price)]
return prices
def convertFlightTimes(depList,arrList):
# Converts the flight time from 24:00 format to
# a minute format element by element in the list
depTimes =[]
arrTimes =[]
for i in range(len(depList)):
depHour,depMin = getFlightTime(depList[i])
depTime = (depHour * 60) + depMin
depTimes = depTimes + [depTime]
for i in range(len(arrList)):
arrHour,arrMin = getFlightTime(arrList[i])
arrTime = (arrHour * 60) + arrMin
arrTimes = arrTimes + [arrTime]
return depTimes,arrTimes
def flightDuration(departures,arrivals):
# This function receives the departure and the arrival of a flight.
# Based on the 24:00 clock format, the function will calculated
# the duration of the flight and return the amount in minutes.
## Defines the number of minutes in a full day
day = 1440
## Getting the hours and minutes from the departure and arrival times
depHour,depMin = getFlightTime(departures)
arrHour,arrMin = getFlightTime(arrivals)
## Multiplies the int representing the hour by 60
## to convert the hour into minutes then adds
## the additonal minutes
depTime = (depHour * 60) + depMin
arrTime = (arrHour * 60) + arrMin
if (depTime < day):
duration = abs(depTime - arrTime)
else:
arrTime = arrTime + day
duration = abs(depTime - arrTime)
return duration
def getFlightTime(flightTime):
# Receives a string that holds the flight time a 24 hour
# format. The colon is removed from the string and the
# hour and minutes from that string are returned.
hour,minute = flightTime.split(":")
hour = int(hour)
minute = int(minute)
return hour,minute
def lowestPrice(prices):
# Finds the lowest price in a list of prices. These prices must be
# Given in the form of a list and must be intergers or floats.
lowPrice = prices[0]
index = 0
## Finds the lowest price in the list and saves the index
for i in range(len(prices)):
if lowPrice > prices[i]:
index = i
return index
def insertionSort(theList,savedIndexes):
# The partion begins at the beginning of the list and is incremented
# by 1 each time. The left of the partition is always considered to
# be sorted.
#print(savedIndexes)
#print(theList)
for i in range(1, len(theList)):
save1 = theList[i]
save2 = savedIndexes[i]
j = i
while j > 0 and theList[j - 1] > save1:
## comparison
theList[j] = theList[j - 1]
j = j - 1
## swap
theList[j] = save1
savedIndexes[j] = save2
print(savedIndexes)
print(theList)
return theList,savedIndexes
def printTable(indexes,airlines,flightNumbers,depTimes,arrTimes,prices):
# Prints a table of the complete information of an airline.
print(" AIRLINE","\t","FLT#","\t","DEPT","\t","ARVL","\t","PRICE")
print(" __________________________________________")
print(" ")
for i in range(len(indexes)):
print(" ",airlines[indexes[i]],"\t",flightNumbers[indexes[i]],"\t",depTimes[indexes[i]],"\t",arrTimes[indexes[i]],"\t ",prices[indexes[i]])
return
def main():
# The main function will implement the corresponding choice selection to
# the correct function. If 8 is selected, the program will say goodbye
# and the program will stop running.
print(" ---------------------------------------------------------------- ")
print("| Rhode Island Flight Finder |")
print(" ---------------------------------------------------------------- ")
print(" ")
## Extracts the data from the file
airlines,flightNumbers,depList,arrList,priceList = readFile()
## Converts the prices to a list of integers for computations
prices = convertPrices(priceList)
## Converts the flight times into minutes (list of integers) for computations
depTimes,arrTimes = convertFlightTimes(depList,arrList)
## Asks the user for their choice
userChoice = getChoice()
while userChoice != 8:
if userChoice == 1:
name = input("Ente the name of the airline service that you would like to use: ")
findAirline(name,airlines,flightNumbers,depList,arrList,priceList)
print("")
userChoice = getChoice()
elif userChoice == 2:
findCheapestFlight(airlines,flightNumbers,prices,priceList)
userChoice = getChoice()
print("")
elif userChoice == 3:
maxPrice = int(input("Ente the maximum price that you would like your flight to be: "))
findPrice(maxPrice,airlines,flightNumbers,depList,arrList,prices,priceList)
userChoice = getChoice()
print("")
elif userChoice == 4:
findShortestFlight(airlines,flightNumbers,depList,arrList)
userChoice = getChoice()
print("")
elif userChoice == 5:
print("Please enter the following in 24:00 time format >>")
print("")
## Prompts the user for a valid departure time until it is entered
depMin = input("Enter the minimum departure time: ")
while ':' not in depMin:
print("Not a valid departure time. Please try again.")
depMin = input("Enter the minimum departure time: ")
## Prompts the user for a valid arrival time until it is entered
depMax = input("Enter the maximum departure time: ")
while ':' not in depMax:
print("Not a valid departure time. Please try again.")
depMax = input("Enter the minimum departure time: ")
findDepartureTime(depMin,depMax,depTimes,airlines,flightNumbers,priceList,depList,arrList)
userChoice = getChoice()
print("")
elif userChoice == 6:
name = input("Ente the name of the airline service: ")
findAveragePrice(name,airlines,prices)
userChoice = getChoice()
print(" ")
elif userChoice == 7:
name = input("Ente the name of the airline service that you would like to use: ")
findAirlineSort(name,airlines,flightNumbers,depTimes,arrTimes,prices,depList,arrList,priceList)
userChoice = getChoice()
print("")
else:
print("Please enter in a valid value that\nrepresents one of the previous values.")
userChoice = int(input(" Enter your selection: "))
print("")
print("Thank you for using the Rhode Island Flight Finder >>")
main()
| StarcoderdataPython |
20942 | <reponame>willogy-team/insights--tensorflow<gh_stars>0
import os
import argparse
import numpy as np
import tensorflow as tf
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Model
from tensorflow.keras.preprocessing.image import load_img, img_to_array
import matplotlib.pyplot as plt
from visualizations.manual_plot_by_matplotlib import plot_filters_of_a_layer
from visualizations.manual_plot_by_matplotlib import plot_feature_maps_of_a_layer, plot_feature_maps_of_multiple_layers
from visualizations.automatic_plot_by_tf_keras_vis import plot_activation_maximization_of_a_layer
from visualizations.automatic_plot_by_tf_keras_vis import plot_vanilla_saliency_of_a_model
from visualizations.automatic_plot_by_tf_keras_vis import plot_smoothgrad_of_a_model
from visualizations.automatic_plot_by_tf_keras_vis import plot_gradcam_of_a_model
from visualizations.automatic_plot_by_tf_keras_vis import plot_gradcam_plusplus_of_a_model
from visualizations.automatic_plot_by_tf_keras_vis import plot_scorecam_of_a_model
from visualizations.automatic_plot_by_tf_keras_vis import plot_faster_scorecam_of_a_model
ap = argparse.ArgumentParser()
ap.add_argument("-trd", "--train_dir", required=True, help="Path to dataset train directory")
ap.add_argument("-mdp", "--model_path", required=True, help="Path to the folder for saving checkpoints")
args = vars(ap.parse_args())
def create_model():
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(8, 7, activation='relu'),
tf.keras.layers.Conv2D(8, 5, activation='relu'),
tf.keras.layers.Conv2D(8, 3, activation='relu'),
tf.keras.layers.Flatten(input_shape=(32, 32, 3)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(3, activation='softmax')
])
input_shape = (None, 128, 128, 3)
model.build(input_shape)
model.compile(loss='categorical_crossentropy', optimizer=Adam(learning_rate=1e-4), metrics=['accuracy'])
return model
model = create_model()
checkpoint_path = os.path.join(args["model_path"], 'models')
model.load_weights(checkpoint_path)
for idx, layer in enumerate(model.layers):
print('[*] layer: ', layer)
if 'conv' not in layer.name:
print('No')
continue
filters_weights, biases_weights = layer.get_weights()
print('[**] id: {}, layer.name: {}, filters_weights.shape: {}, biases_weights.shape: {}'.format(idx, layer.name, filters_weights.shape, biases_weights.shape))
print('[**] layer.output.shape: {}'.format(layer.output.shape))
filters_max, filters_min = filters_weights.max(), filters_weights.min()
filters_weights = (filters_weights - filters_min)/(filters_max - filters_min)
print('[**] filters_weights: ', filters_weights)
plot_filters_of_a_layer(filters_weights, 3)
# === Output feature maps from a single layer ===
# A PIL object
img = load_img(os.path.join(args["train_dir"], 'n02085620-Chihuahua', 'n02085620_1558.jpg'), target_size=(128, 128))
# Convert to numpy array
img = img_to_array(img)
img = np.expand_dims(img, axis=0)
# img = model.preprocess_input(img)
img = img/255
model_1 = Model(inputs=model.inputs, outputs=model.layers[0].output)
feature_maps_1 = model_1.predict(img)
print('[*] feature_maps_1.shape: ', feature_maps_1.shape)
plot_feature_maps_of_a_layer(feature_maps_1)
# === Output feature maps from multiple layers ===
list_of_outputs = [model.layers[idx].output for idx in range(3)]
model_2 = Model(inputs=model.inputs, outputs=list_of_outputs)
model_2.summary()
feature_maps_2 = model_2.predict(img)
for feature_map in feature_maps_2:
print('[*] feature_map.shape: ', feature_map.shape)
plot_feature_maps_of_multiple_layers(feature_maps_2)
# === Output activation maximization from a single layer ===
plot_activation_maximization_of_a_layer(model, 2)
# === GradCam++ from a single layer ===
# plot_gradcam_plusplus_of_a_layer(model, 2)
# === Attentions ===
image_titles = ['Chihuahua', 'Japanese_spaniel', 'Maltese_dog']
img1 = load_img(os.path.join(args["train_dir"], 'n02085620-Chihuahua', 'n02085620_1558.jpg'), target_size=(128, 128))
img2 = load_img(os.path.join(args["train_dir"], 'n02085782-Japanese_spaniel', 'n02085782_2874.jpg'), target_size=(128, 128))
img3 = load_img(os.path.join(args["train_dir"], 'n02085936-Maltese_dog', 'n02085936_4245.jpg'), target_size=(128, 128))
img1 = np.asarray(img1)
img2 = np.asarray(img2)
img3 = np.asarray(img3)
images = np.asarray([img1, img2, img3])
X = images/255
## Vanilla saliency
print('[*] Vanilla saliency')
plot_vanilla_saliency_of_a_model(model, X, image_titles)
## SmoothGrad
print('[*] SmoothGrad')
plot_smoothgrad_of_a_model(model, X, image_titles)
## GradCAM
print('[*] GradCAM')
plot_gradcam_of_a_model(model, X, image_titles, images)
## GradCAM++
print('[*] GradCAM++')
plot_gradcam_plusplus_of_a_model(model, X, image_titles, images)
## ScoreCAM
print('[*] ScoreCam')
plot_scorecam_of_a_model(model, X, image_titles, images)
## Faster-ScoreCAM
print('[*] Faster-ScoreCAM')
plot_faster_scorecam_of_a_model(model, X, image_titles, images) | StarcoderdataPython |
6646064 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Functions for dealing with the Stagger model photospheres. """
from __future__ import division, absolute_import, print_function
__author__ = "<NAME> <<EMAIL>>"
import logging
import numpy as np
from .interpolator import BaseInterpolator
logger = logging.getLogger(__name__)
class Interpolator(BaseInterpolator):
opacity_scale = "logtau"
def __init__(self, filename, **kwargs):
return super(self.__class__, self).__init__(filename, **kwargs)
def pickle_from_tsv_file(filename, depth_scale="optical", skiprows=72,
delimiter=";"):
"""
Pickle the Stagger-grid models from TSV-formatted filename.
:param filename:
The path of the TSV-formatted file.
:type filename:
str
:param depth_scale: [optional, optical assumed]
Which horizontal averaging method to use. Available options are:
optical, mass density, Rosseland, or geometric height
:type depth_scale:
str
:param skiprows: [optional]
The number of rows at the top of the file before the header information.
:type skiprows:
int
:param delimiter: [optional]
The delimiting character between columns.
:type delimiter:
str
"""
depth_scale_hint = depth_scale.lower()[0] # work it out from first letter
if depth_scale_hint not in ("o", "m", "r", "z", "g", "h"): # zgh are same
raise ValueError(
"depth scale expected to be 'optical', 'mass density', "
"Rosseland, or geometric height")
if depth_scale_hint in ("g", "h"):
depth_scale_hint = "z"
elif depth_scale_hint == "r":
depth_scale_hint = "R"
depth_scale = {
"o": "optical",
"m": "mass density",
"R": "Rossland opacity",
"z": "geometric height",
}[depth_scale_hint]
with open(filename, "r") as fp:
contents = fp.readlines()[skiprows + 1:]
if contents[-1] == "\n": contents.pop(-1)
# Number of extra columns in each row.
n = 4
# First three lines are for headers
names = contents[0].strip().split(delimiter)
units = contents[1].strip().split(delimiter)
contents = contents[3:]
num_models = len(set([row.split(delimiter)[n - 1] for row in contents]))
parameters = np.nan * np.ones((num_models, n - 1))
# Assume they all have the same number of depth points.
assert (len(contents) % num_models) == 0
num_depth_points = int(len(contents) / num_models)
num_photospheric_quantitites = len(names) - n
photospheres = np.nan * np.ones(
(num_models, num_depth_points, num_photospheric_quantitites))
for i in range(num_models):
# The '4:' arises from the first four columns being the model parameters
parameters[i, :] = \
map(float, contents[i*num_depth_points].split(delimiter)[:n-1])
photospheres[i, :, :] = np.array(
[map(float, map(str.strip, _.split(delimiter)[n:])) \
for _ in contents[i*num_depth_points:(i + 1)*num_depth_points]])
names, units = names[n:], units[n:]
# Replace dimensionless columns with "" for astropy.
# Which depth scale do we want?
indices = np.array([0] + [i for i, name in enumerate(names) \
if name.endswith("({})".format(depth_scale_hint))])
names = [names[0]] + [names[i][:-3] for i in indices[1:]]
units = [units[i].replace("[-]", "") for i in indices]
photospheres = photospheres[:, :, indices]
meta = {
"kind": "Stagger",
"source_path": filename,
"horizontal_averaging": depth_scale,
"photospheric_units": units
}
assert np.all(np.isfinite(parameters))
assert np.all(np.isfinite(photospheres))
parameters = np.core.records.fromarrays(parameters.T,
names=("effective_temperature", "surface_gravity", "metallicity"))
return (parameters, photospheres, names, meta)
| StarcoderdataPython |
5139748 | <filename>tutorial.py<gh_stars>0
from influxdb_client import InfluxDBClient, Point
from influxdb_client.client.write_api import SYNCHRONOUS
import pandas as pd
import random
import time
bucket = "WattTest"
client = InfluxDBClient(url="http://localhost:8086", token="<KEY> , org="4d4e5bd125ac30b2")
write_api = client.write_api(write_options=SYNCHRONOUS )
query_api = client.query_api()
# ---------- reading csv data -----------
df = pd.read_csv("data_export_device_single_point_energy.csv", delim_whitespace=True)
print(df)
# ---------- convert the csv data into line protocol and insert into influxDB with time limiter 1, so that you can have a nice graph of static data
# "measurementlevel","levelIdentification/campusId","levelIdentification/buildingId","levelIdentification/floorId","levelIdentification/areaId","levelIdentification/deviceId","measurements/groupBy","measurements/aggregation/func","measurements/aggregation/metric","measurements/time_from","measurements/time_to","measurements/monitoredObject/group","measurements/monitoredObject/values/metric","measurements/monitoredObject/values/value"
# i = 0
# while i < 1000:
# p = Point("my_measurement").tag("Stock", "Ford").tag("Ticker","F").field("Open", df["Open"][i]).field("High", df["High"][i]).field("Low", df["Low"][i]).field("Close",df["Close"][i])
# write_api.write(bucket=bucket, record=p)
# print(i, " opit ", df["Volume"][i])
# time.sleep(1)
# i += 1
# i = 1
# while i < 1000:
# randomNum = random.randrange(20,30) + random.uniform(0,1)
# p = Point("my_measurement").tag("location", "Prague").field("temperature", randomNum)
# write_api.write(bucket=bucket, record=p)
# print(i, " opit ", randomNum)
# randomNum = random.randrange(20,30) + random.uniform(0,1)
# p = Point("my_measurement").tag("location", "Varna").field("temperature", randomNum)
# write_api.write(bucket=bucket, record=p)
# print(i, " opit ", randomNum)
# time.sleep(5)
# i += 1
| StarcoderdataPython |
9772473 | <reponame>subramp-prep/leetcode
import heapq
class Solution(object):
def kthSmallest(self, matrix, k):
return list(heapq.merge(*matrix))[k-1] | StarcoderdataPython |
6644241 | <reponame>pi-top/pi-top-Python-SDK
from pitop import Camera, DriveController, Pitop
from pitop.labs import WebController
robot = Pitop()
robot.add_component(DriveController())
robot.add_component(Camera())
speed = 0.2
def key_down(data, send):
global speed
key = data.get("key")
if key == "w":
robot.drive.forward(speed, hold=True)
elif key == "s":
robot.drive.backward(speed, hold=True)
elif key == "d":
robot.drive.right(speed)
elif key == "a":
robot.drive.left(speed)
elif key == "ArrowUp":
speed = min(1, speed + 0.2)
send({"type": "speed", "data": speed})
elif key == "ArrowDown":
speed = max(0, speed - 0.2)
send({"type": "speed", "data": speed})
def key_up(data):
key = data.get("key")
if key == "w" or key == "s":
robot.drive.stop()
elif key == "d":
robot.drive.right(0)
elif key == "a":
robot.drive.left(0)
controller = WebController(
get_frame=robot.camera.get_frame,
message_handlers={"key_down": key_down, "key_up": key_up},
)
controller.serve_forever()
| StarcoderdataPython |
155107 | # -*- coding: utf-8 -*-
# pragma pylint: disable=unused-argument, no-self-use
# (c) Copyright IBM Corp. 2010, 2018. All Rights Reserved
"""Tests using pytest_resilient_circuits"""
from __future__ import print_function
from fn_aws_utilities.util.aws_sns_api import AwsSns
class TestSendSmsViaSns:
""" Tests for the send_sms_via_sns function"""
def test_success(self):
""" Test calling with sample values for the parameters """
aws_access_key_id = "" # Fill with access key id
aws_secret_access_key = "" # Fill with secret access key
aws_region_name = "us-east-1" # Fill with region name
phone_number = "" # Fill with number to send a test message to (include international code)
sns = AwsSns(aws_access_key_id, aws_secret_access_key, aws_region_name)
assert sns.message_members("best message", [phone_number]).get("MessageId") is not None
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.