id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
136873 | import typing
import dataclasses as dc
from .raw_client import RawClient
T = typing.TypeVar('T')
@dc.dataclass
class ApiStorage:
_client: RawClient
_apis: typing.Dict[typing.Type[typing.Any], typing.Any] = dc.field(default_factory=dict)
def get_api(self, api_type: typing.Type[T]) -> T:
if not issubclass(api_type, ApiBase):
raise ValueError('Invalid api type')
if api_type not in self._apis:
self._apis[api_type] = api_type(client=self._client, storage=self)
result: T = self._apis[api_type]
return result
@dc.dataclass
class ApiBase:
client: RawClient
storage: ApiStorage
| StarcoderdataPython |
1791470 | from django.urls import path
from django.contrib.auth import views as auth_views
from . import views
app_name = 'user'
urlpatterns = [
path('signup', views.signup, name='signup'),
path('sent', views.activation_sent, name='activation_sent'),
path('activate/<slug:uidb64>/<slug:token>/', views.activate, name='activate'),
path('signin', views.signin, name='signin'),
path('signout', views.signout, name='signout'),
path('password_reset', views.password_reset, name='password_reset'),
path('reset/<uidb64>/<token>/', views.PasswordResetConfirm.as_view(), name='password_reset_confirm'),
path('reset/done/', auth_views.PasswordResetCompleteView.as_view(), name='password_reset_complete'),
path('informations', views.informations, name='informations'),
path('admin', views.admin, name='admin'),
path('admin/<id>', views.edit_user, name='edit_user'),
]
| StarcoderdataPython |
1612225 | <reponame>ChaseKnowlden/airflow
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from contextlib import contextmanager
from typing import List, Optional
import pytest
from airflow.models import Connection
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
from airflow.utils import db
from tests.test_utils import AIRFLOW_MAIN_FOLDER
from tests.test_utils.logging_command_executor import get_executor
from tests.test_utils.system_tests_class import SystemTest
AWS_DAG_FOLDER = os.path.join(AIRFLOW_MAIN_FOLDER, "airflow", "providers", "amazon", "aws", "example_dags")
AWS_EKS_KEY = "aws_eks.json"
@contextmanager
def provide_aws_context(key_file_path: Optional[str] = None):
"""
Authenticates the context to be able use aws resources.
Falls back to awscli default authentication methods via `.aws`` folder.
"""
# TODO: Implement more authentication methods
yield
@contextmanager
def provide_aws_s3_bucket(name):
AmazonSystemTest.create_aws_s3_bucket(name)
yield
AmazonSystemTest.delete_aws_s3_bucket(name)
@pytest.mark.system("amazon")
class AmazonSystemTest(SystemTest):
@staticmethod
def _region_name():
return os.environ.get("REGION_NAME")
@staticmethod
def _registry_id():
return os.environ.get("REGISTRY_ID")
@staticmethod
def _image():
return os.environ.get("IMAGE")
@staticmethod
def _execution_role_arn():
return os.environ.get("EXECUTION_ROLE_ARN")
@staticmethod
def _remove_resources():
# remove all created/existing resources flag
return os.environ.get("REMOVE_RESOURCES", False)
@classmethod
def execute_with_ctx(cls, cmd: List[str]):
"""
Executes command with context created by provide_aws_context.
"""
executor = get_executor()
with provide_aws_context():
executor.execute_cmd(cmd=cmd)
@staticmethod
def create_connection(aws_conn_id: str, region: str) -> None:
"""
Create aws connection with region
:param aws_conn_id: id of the aws connection to create
:type aws_conn_id: str
:param region: aws region name to use in extra field of the aws connection
:type region: str
"""
db.merge_conn(
Connection(
conn_id=aws_conn_id,
conn_type="aws",
extra=f'{{"region_name": "{region}"}}',
),
)
@classmethod
def create_aws_s3_bucket(cls, name: str) -> None:
"""
Creates the aws bucket with the given name.
:param name: name of the bucket
"""
cmd = ["aws", "s3api", "create-bucket", "--bucket", name]
cls.execute_with_ctx(cmd)
@classmethod
def delete_aws_s3_bucket(cls, name: str) -> None:
"""
Deletes the aws bucket with the given name. It needs to empty the bucket before it can be deleted.
:param name: name of the bucket
"""
cmd = ["aws", "s3", "rm", f"s3://{name}", "--recursive"]
cls.execute_with_ctx(cmd)
cmd = ["aws", "s3api", "delete-bucket", "--bucket", name]
cls.execute_with_ctx(cmd)
@classmethod
def create_emr_default_roles(cls) -> None:
"""Create EMR Default roles for running system test
This will create the default IAM roles:
- `EMR_EC2_DefaultRole`
- `EMR_DefaultRole`
"""
cmd = ["aws", "emr", "create-default-roles"]
cls.execute_with_ctx(cmd)
@staticmethod
def create_ecs_cluster(aws_conn_id: str, cluster_name: str) -> None:
"""
Create ecs cluster with given name
If specified cluster exists, it doesn't change and new cluster will not be created.
:param aws_conn_id: id of the aws connection to use when creating boto3 client/resource
:type aws_conn_id: str
:param cluster_name: name of the cluster to create in aws ecs
:type cluster_name: str
"""
hook = AwsBaseHook(
aws_conn_id=aws_conn_id,
client_type="ecs",
)
hook.conn.create_cluster(
clusterName=cluster_name,
capacityProviders=[
"FARGATE_SPOT",
"FARGATE",
],
defaultCapacityProviderStrategy=[
{
"capacityProvider": "FARGATE_SPOT",
"weight": 1,
"base": 0,
},
{
"capacityProvider": "FARGATE",
"weight": 1,
"base": 0,
},
],
)
@staticmethod
def delete_ecs_cluster(aws_conn_id: str, cluster_name: str) -> None:
"""
Delete ecs cluster with given short name or full Amazon Resource Name (ARN)
:param aws_conn_id: id of the aws connection to use when creating boto3 client/resource
:type aws_conn_id: str
:param cluster_name: name of the cluster to delete in aws ecs
:type cluster_name: str
"""
hook = AwsBaseHook(
aws_conn_id=aws_conn_id,
client_type="ecs",
)
hook.conn.delete_cluster(
cluster=cluster_name,
)
@staticmethod
def create_ecs_task_definition(
aws_conn_id: str,
task_definition: str,
container: str,
image: str,
execution_role_arn: str,
awslogs_group: str,
awslogs_region: str,
awslogs_stream_prefix: str,
) -> None:
"""
Create ecs task definition with given name
:param aws_conn_id: id of the aws connection to use when creating boto3 client/resource
:type aws_conn_id: str
:param task_definition: family name for task definition to create in aws ecs
:type task_definition: str
:param container: name of the container
:type container: str
:param image: image used to start a container,
format: `registry_id`.dkr.ecr.`region`.amazonaws.com/`repository_name`:`tag`
:type image: str
:param execution_role_arn: task execution role that the Amazon ECS container agent can assume,
format: arn:aws:iam::`registry_id`:role/`role_name`
:type execution_role_arn: str
:param awslogs_group: awslogs group option in log configuration
:type awslogs_group: str
:param awslogs_region: awslogs region option in log configuration
:type awslogs_region: str
:param awslogs_stream_prefix: awslogs stream prefix option in log configuration
:type awslogs_stream_prefix: str
"""
hook = AwsBaseHook(
aws_conn_id=aws_conn_id,
client_type="ecs",
)
hook.conn.register_task_definition(
family=task_definition,
executionRoleArn=execution_role_arn,
networkMode="awsvpc",
containerDefinitions=[
{
"name": container,
"image": image,
"cpu": 256,
"memory": 512, # hard limit
"memoryReservation": 512, # soft limit
"logConfiguration": {
"logDriver": "awslogs",
"options": {
"awslogs-group": awslogs_group,
"awslogs-region": awslogs_region,
"awslogs-stream-prefix": awslogs_stream_prefix,
},
},
},
],
requiresCompatibilities=[
"FARGATE",
],
cpu="256", # task cpu limit (total of all containers)
memory="512", # task memory limit (total of all containers)
)
@staticmethod
def delete_ecs_task_definition(aws_conn_id: str, task_definition: str) -> None:
"""
Delete all revisions of given ecs task definition
:param aws_conn_id: id of the aws connection to use when creating boto3 client/resource
:type aws_conn_id: str
:param task_definition: family prefix for task definition to delete in aws ecs
:type task_definition: str
"""
hook = AwsBaseHook(
aws_conn_id=aws_conn_id,
client_type="ecs",
)
response = hook.conn.list_task_definitions(
familyPrefix=task_definition,
status="ACTIVE",
sort="ASC",
maxResults=100,
)
revisions = [arn.split(":")[-1] for arn in response["taskDefinitionArns"]]
for revision in revisions:
hook.conn.deregister_task_definition(
taskDefinition=f"{task_definition}:{revision}",
)
@staticmethod
def is_ecs_task_definition_exists(aws_conn_id: str, task_definition: str) -> bool:
"""
Check whether given task definition exits in ecs
:param aws_conn_id: id of the aws connection to use when creating boto3 client/resource
:type aws_conn_id: str
:param task_definition: family prefix for task definition to check in aws ecs
:type task_definition: str
"""
hook = AwsBaseHook(
aws_conn_id=aws_conn_id,
client_type="ecs",
)
response = hook.conn.list_task_definition_families(
familyPrefix=task_definition,
status="ACTIVE",
maxResults=100,
)
return task_definition in response["families"]
| StarcoderdataPython |
3351299 | """ Implementation of the 'original' volume/area scaling glacier model from
Marzeion et. al. 2012, see http://www.the-cryosphere.net/6/1295/2012/.
While the mass balance model is comparable to OGGMs past mass balance model,
the 'dynamic' part does not include any ice physics but works with ares/volume
and length/volume scaling instead.
Author: <NAME>
"""
# Built ins
import os
import logging
import datetime
from time import gmtime, strftime
# External libs
import numpy as np
import pandas as pd
import xarray as xr
import netCDF4
from scipy.optimize import minimize_scalar
# import OGGM modules
import oggm
import oggm.cfg as cfg
from oggm.cfg import SEC_IN_YEAR, SEC_IN_MONTH
from oggm import __version__
from oggm import utils, entity_task, global_task
from oggm.utils import floatyear_to_date, ncDataset
from oggm.exceptions import InvalidParamsError, MassBalanceCalibrationError
from oggm.core import climate
from oggm.core.massbalance import MassBalanceModel
# Module logger
log = logging.getLogger(__name__)
def _compute_temp_terminus(temp, temp_grad, ref_hgt,
terminus_hgt, temp_anomaly=0):
"""Computes the (monthly) mean temperature at the glacier terminus,
following section 2.1.2 of Marzeion et. al., 2012. The input temperature
is scaled by the given temperature gradient and the elevation difference
between reference altitude and the glacier terminus elevation.
Parameters
----------
temp : netCDF4 variable
monthly mean climatological temperature (degC)
temp_grad : netCDF4 variable or float
temperature lapse rate [degC per m of elevation change]
ref_hgt : float
reference elevation for climatological temperature [m asl.]
terminus_hgt : float
elevation of the glacier terminus (m asl.)
temp_anomaly : netCDF4 variable or float, optional
monthly mean temperature anomaly, default 0
Returns
-------
netCDF4 variable
monthly mean temperature at the glacier terminus [degC]
"""
temp_terminus = temp + temp_grad * (terminus_hgt - ref_hgt) + temp_anomaly
return temp_terminus
def _compute_solid_prcp(prcp, prcp_factor, ref_hgt, min_hgt, max_hgt,
temp_terminus, temp_all_solid, temp_grad,
prcp_grad=0, prcp_anomaly=0):
"""Compute the (monthly) amount of solid precipitation onto the glacier
surface, following section 2.1.1 of Marzeion et. al., 2012. The fraction of
solid precipitation depends mainly on the terminus temperature and the
temperature thresholds for solid and liquid precipitation. It is possible
to scale the precipitation amount from the reference elevation to the
average glacier surface elevation given a gradient (zero per default).
Parameters
----------
prcp : netCDF4 variable
monthly mean climatological precipitation [kg/m2]
prcp_factor : float
precipitation scaling factor []
ref_hgt : float
reference elevation for climatological precipitation [m asl.]
min_hgt : float
minimum glacier elevation [m asl.]
max_hgt : float
maximum glacier elevation [m asl.]
temp_terminus : netCDF4 variable
monthly mean temperature at the glacier terminus [degC]
temp_all_solid : float
temperature threshold below which all precipitation is solid [degC]
temp_grad : netCDF4 variable or float
temperature lapse rate [degC per m of elevation change]
prcp_grad : netCDF4 variable or float, optional
precipitation lapse rate [kg/m2 per m of elevation change], default = 0
prcp_anomaly : netCDF4 variable or float, optional
monthly mean precipitation anomaly [kg/m2], default = 0
Returns
-------
netCDF4 variable
monthly mean solid precipitation [kg/m2]
"""
# compute fraction of solid precipitation
if max_hgt == min_hgt:
# prevent division by zero if max_hgt equals min_hgt
f_solid = (temp_terminus <= temp_all_solid).astype(int)
else:
# use scaling defined in paper
f_solid = (1 + (temp_terminus - temp_all_solid)
/ (temp_grad * (max_hgt - min_hgt)))
f_solid = np.clip(f_solid, 0, 1)
# compute mean elevation
mean_hgt = 0.5 * (min_hgt + max_hgt)
# apply precipitation scaling factor
prcp_solid = (prcp_factor * prcp + prcp_anomaly)
# compute solid precipitation
prcp_solid *= (1 + prcp_grad * (mean_hgt - ref_hgt)) * f_solid
return prcp_solid
def get_min_max_elevation(gdir):
"""Reads the DEM and computes the minimal and maximal glacier surface
elevation in meters asl, from the given (RGI) glacier outline.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
Returns
-------
[float, float]
minimal and maximal glacier surface elevation [m asl.]
"""
# open DEM file and mask the glacier surface area
fpath = gdir.get_filepath('gridded_data')
with ncDataset(fpath) as nc:
mask = nc.variables['glacier_mask'][:]
topo = nc.variables['topo'][:]
# get relevant elevation information
min_elev = np.min(topo[np.where(mask == 1)])
max_elev = np.max(topo[np.where(mask == 1)])
return min_elev, max_elev
def get_yearly_mb_temp_prcp(gdir, time_range=None, year_range=None):
"""Read climate file and compute mass balance relevant climate parameters.
Those are the positive melting temperature at glacier terminus elevation
as energy input and the amount of solid precipitation onto the glacier
surface as mass input. Both parameters are computes as yearly sums.
Default is to read all data, but it is possible to specify a time range by
giving two (included) datetime bounds. Similarly, the year range limits the
returned data to the given bounds of (hydrological) years.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
time_range : datetime tuple, optional
[t0, t1] time bounds, default = None
year_range : float tuple, optional
[y0, y1] year range, default = None
Returns
-------
[float array, float array, float array]
hydrological years (index), melting temperature [degC],
solid precipitation [kg/m2]
"""
# convert hydrological year range into time range
if year_range is not None:
sm = cfg.PARAMS['hydro_month_' + gdir.hemisphere]
em = sm - 1 if (sm > 1) else 12
t0 = datetime.datetime(year_range[0]-1, sm, 1)
t1 = datetime.datetime(year_range[1], em, 1)
return get_yearly_mb_temp_prcp(gdir, time_range=[t0, t1])
# get needed parameters
temp_all_solid = cfg.PARAMS['temp_all_solid']
temp_melt = cfg.PARAMS['temp_melt']
prcp_fac = cfg.PARAMS['prcp_scaling_factor']
default_grad = cfg.PARAMS['temp_default_gradient']
g_minmax = cfg.PARAMS['temp_local_gradient_bounds']
# Marzeion et. al., 2012 used a precipitation lapse rate of 3%/100m.
# But the prcp gradient is omitted for now.
# prcp_grad = 3e-4
prcp_grad = 0
# read the climate file
igrad = None
with utils.ncDataset(gdir.get_filepath('climate_monthly'), mode='r') as nc:
# time
time = nc.variables['time']
time = netCDF4.num2date(time[:], time.units)
# limit data to given time range and
# raise errors is bounds are outside available data
if time_range is not None:
p0 = np.where(time == time_range[0])[0]
try:
p0 = p0[0]
except IndexError:
raise climate.MassBalanceCalibrationError('time_range[0] '
'not found in file')
p1 = np.where(time == time_range[1])[0]
try:
p1 = p1[0]
except IndexError:
raise climate.MassBalanceCalibrationError('time_range[1] not '
'found in file')
else:
p0 = 0
p1 = len(time)-1
time = time[p0:p1+1]
# read time series of temperature and precipitation
itemp = nc.variables['temp'][p0:p1+1]
iprcp = nc.variables['prcp'][p0:p1+1]
# read time series of temperature lapse rate
if 'gradient' in nc.variables:
igrad = nc.variables['gradient'][p0:p1+1]
# Security for stuff that can happen with local gradients
igrad = np.where(~np.isfinite(igrad), default_grad, igrad)
igrad = np.clip(igrad, g_minmax[0], g_minmax[1])
# read climate data reference elevation
ref_hgt = nc.ref_hgt
# use the default gradient if no gradient is supplied by the climate file
if igrad is None:
igrad = itemp * 0 + default_grad
# Up to this point, the code is mainly copy and paste from the
# corresponding OGGM routine, with some minor adaptions.
# What follows is my code: So abandon all hope, you who enter here!
# get relevant elevation information
min_hgt, max_hgt = get_min_max_elevation(gdir)
# get temperature at glacier terminus
temp_terminus = _compute_temp_terminus(itemp, igrad, ref_hgt, min_hgt)
# compute positive 'melting' temperature/energy input
temp = np.clip(temp_terminus - temp_melt, a_min=0, a_max=None)
# get solid precipitation
prcp_solid = _compute_solid_prcp(iprcp, prcp_fac, ref_hgt,
min_hgt, max_hgt,
temp_terminus, temp_all_solid,
igrad, prcp_grad)
# check if climate data includes all 12 month of all years
ny, r = divmod(len(time), 12)
if r != 0:
raise ValueError('Climate data should be N full years exclusively')
# last year gives the tone of the hydro year
years = np.arange(time[-1].year - ny + 1, time[-1].year + 1, 1)
# compute sums over hydrological year
temp_yr = np.zeros(len(years))
prcp_yr = np.zeros(len(years))
for i, y in enumerate(years):
temp_yr[i] = np.sum(temp[i * 12:(i + 1) * 12])
prcp_yr[i] = np.sum(prcp_solid[i * 12:(i + 1) * 12])
return years, temp_yr, prcp_yr
def _fallback_local_t_star(gdir):
"""A Fallback function if vascaling.local_t_star raises an Error.
This function will still write a `vascaling_mustar.json`, filled with NANs,
if vascaling.local_t_star fails and cfg.PARAMS['continue_on_error'] = True.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
the glacier directory to process
"""
# Scalars in a small dict for later
df = dict()
df['rgi_id'] = gdir.rgi_id
df['t_star'] = np.nan
df['bias'] = np.nan
df['mu_star_glacierwide'] = np.nan
gdir.write_json(df, 'vascaling_mustar')
@entity_task(log, writes=['vascaling_mustar'], fallback=_fallback_local_t_star)
def local_t_star(gdir, ref_df=None, tstar=None, bias=None):
"""Compute the local t* and associated glacier-wide mu*.
If `tstar` and `bias` are not provided, they will be interpolated from the
reference t* list.
The mass balance calibration parameters (i.e. temperature lapse rate,
temperature thresholds for melting, solid and liquid precipitation,
precipitation scaling factor) are written to the climate_info.pkl file.
The results of the calibration process (i.e. t*, mu*, bias) are stored in
the `vascaling_mustar.json` file, to be used later by other tasks.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
ref_df : :py:class:`pandas.Dataframe`, optional
replace the default calibration list with a custom one
tstar : int, optional
the year when the glacier should be in equilibrium, default = None
bias : float, optional
the associated reference bias, default = None
"""
# specify relevant mass balance parameters
params = ['temp_default_gradient', 'temp_all_solid', 'temp_all_liq',
'temp_melt', 'prcp_scaling_factor']
if tstar is None or bias is None:
# Do our own interpolation of t_start for given glacier
if ref_df is None:
if not cfg.PARAMS['run_mb_calibration']:
# Make some checks and use the default one
climate_info = gdir.read_json('climate_info')
source = climate_info['baseline_climate_source']
ok_source = ['CRU TS4.01', 'CRU TS3.23', 'HISTALP']
if not np.any(s in source.upper() for s in ok_source):
msg = ('If you are using a custom climate file you should '
'run your own MB calibration.')
raise MassBalanceCalibrationError(msg)
# major RGI version relevant
v = gdir.rgi_version[0]
# baseline climate
str_s = 'cru4' if 'CRU' in source else 'histalp'
vn = 'vas_ref_tstars_rgi{}_{}_calib_params'.format(v, str_s)
for k in params:
if cfg.PARAMS[k] != cfg.PARAMS[vn][k]:
msg = ('The reference t* you are trying to use was '
'calibrated with different MB parameters. You '
'might have to run the calibration manually.')
raise MassBalanceCalibrationError(msg)
ref_df = cfg.PARAMS['vas_ref_tstars_rgi{}_{}'.format(v, str_s)]
else:
# Use the the local calibration
fp = os.path.join(cfg.PATHS['working_dir'], 'ref_tstars.csv')
ref_df = pd.read_csv(fp)
# Compute the distance to each glacier
distances = utils.haversine(gdir.cenlon, gdir.cenlat,
ref_df.lon, ref_df.lat)
# Take the 10 closest
aso = np.argsort(distances)[0:9]
amin = ref_df.iloc[aso]
distances = distances[aso]**2
# If really close no need to divide, else weighted average
if distances.iloc[0] <= 0.1:
tstar = amin.tstar.iloc[0]
bias = amin.bias.iloc[0]
else:
tstar = int(np.average(amin.tstar, weights=1./distances))
bias = np.average(amin.bias, weights=1./distances)
# Add the climate related params to the GlacierDir to make sure
# other tools cannot fool around without re-calibration
out = gdir.read_json('climate_info')
out['mb_calib_params'] = {k: cfg.PARAMS[k] for k in params}
gdir.write_json(out, 'climate_info')
# We compute the overall mu* here but this is mostly for testing
# Climate period
mu_hp = int(cfg.PARAMS['mu_star_halfperiod'])
yr = [tstar - mu_hp, tstar + mu_hp]
# get monthly climatological values
# of terminus temperature and solid precipitation
years, temp, prcp = get_yearly_mb_temp_prcp(gdir, year_range=yr)
# solve mass balance equation for mu*
# note: calving is not considered
mustar = np.mean(prcp) / np.mean(temp)
# check for a finite result
if not np.isfinite(mustar):
raise climate.MassBalanceCalibrationError('{} has a non finite '
'mu'.format(gdir.rgi_id))
# Clip the mu
if not (cfg.PARAMS['min_mu_star'] < mustar < cfg.PARAMS['max_mu_star']):
raise climate.MassBalanceCalibrationError('mu* out of '
'specified bounds.')
# Scalars in a small dict for later
df = dict()
df['rgi_id'] = gdir.rgi_id
df['t_star'] = int(tstar)
df['bias'] = bias
df['mu_star'] = mustar
gdir.write_json(df, 'vascaling_mustar')
@entity_task(log, writes=['climate_info'])
def t_star_from_refmb(gdir, mbdf=None):
"""Computes the reference year t* for the given glacier and mass balance
measurements.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
mbdf : :py:class:`pd.Series`
observed MB data indexed by year. If None, read automatically from the
reference data, default = None
Returns
-------
dict
A dictionary {'t_star': [], 'bias': []} containing t* and the
corresponding mass balance bias
"""
# make sure we have no marine terminating glacier
assert gdir.terminus_type == 'Land-terminating'
# get reference time series of mass balance measurements
if mbdf is None:
mbdf = gdir.get_ref_mb_data()['ANNUAL_BALANCE']
# compute average observed mass-balance
ref_mb = np.mean(mbdf)
# Compute one mu candidate per year and the associated statistics
# Only get the years were we consider looking for t*
y0, y1 = cfg.PARAMS['tstar_search_window']
ci = gdir.read_json('climate_info')
y0 = y0 or ci['baseline_hydro_yr_0']
y1 = y1 or ci['baseline_hydro_yr_1']
years = np.arange(y0, y1+1)
ny = len(years)
mu_hp = int(cfg.PARAMS['mu_star_halfperiod'])
mb_per_mu = pd.Series(index=years)
# get mass balance relevant climate parameters
years, temp, prcp = get_yearly_mb_temp_prcp(gdir, year_range=[y0, y1])
# get climate parameters, but only for years with mass balance measurements
selind = np.searchsorted(years, mbdf.index)
sel_temp = temp[selind]
sel_prcp = prcp[selind]
sel_temp = np.mean(sel_temp)
sel_prcp = np.mean(sel_prcp)
# for each year in the climatic period around t* (ignoring the first and
# last 15-years), compute a mu-candidate by solving the mass balance
# equation for mu. afterwards compute the average (modeled) mass balance
# over all years with mass balance measurements using the mu-candidate
for i, y in enumerate(years):
# ignore begin and end, i.e. if the
if ((i - mu_hp) < 0) or ((i + mu_hp) >= ny):
continue
# compute average melting temperature
t_avg = np.mean(temp[i - mu_hp:i + mu_hp + 1])
# skip if if too cold, i.e. no melt occurs (division by zero)
if t_avg < 1e-3:
continue
# compute the mu candidate for the current year, by solving the mass
# balance equation for mu*
mu = np.mean(prcp[i - mu_hp:i + mu_hp + 1]) / t_avg
# compute mass balance using the calculated mu and the average climate
# conditions over the years with mass balance records
mb_per_mu[y] = np.mean(sel_prcp - mu * sel_temp)
# compute differences between computed mass balance and reference value
diff = (mb_per_mu - ref_mb).dropna()
# raise error if no mu could be calculated for any year
if len(diff) == 0:
raise MassBalanceCalibrationError('No single valid mu candidate for '
'this glacier!')
# choose mu* as the mu candidate with the smallest absolute bias
amin = np.abs(diff).idxmin()
# write results to the `climate_info.pkl`
d = gdir.read_json('climate_info')
d['t_star'] = amin
d['bias'] = diff[amin]
gdir.write_json(d, 'climate_info')
return {'t_star': amin, 'bias': diff[amin],
'avg_mb_per_mu': mb_per_mu, 'avg_ref_mb': ref_mb}
@global_task
def compute_ref_t_stars(gdirs):
"""Detects the best t* for the reference glaciers and writes them to disk
This task will be needed for mass balance calibration of custom climate
data. For CRU and HISTALP baseline climate a pre-calibrated list is
available and should be used instead.
Parameters
----------
gdirs : list of :py:class:`oggm.GlacierDirectory` objects
will be filtered for reference glaciers
"""
if not cfg.PARAMS['run_mb_calibration']:
raise InvalidParamsError('Are you sure you want to calibrate the '
'reference t*? There is a pre-calibrated '
'version available. If you know what you are '
'doing and still want to calibrate, set the '
'`run_mb_calibration` parameter to `True`.')
# Reference glaciers only if in the list and period is good
ref_gdirs = utils.get_ref_mb_glaciers(gdirs)
# Run
from oggm.workflow import execute_entity_task
out = execute_entity_task(t_star_from_refmb, ref_gdirs)
# Loop write
df = pd.DataFrame()
for gdir, res in zip(ref_gdirs, out):
# list of mus compatibles with refmb
rid = gdir.rgi_id
df.loc[rid, 'lon'] = gdir.cenlon
df.loc[rid, 'lat'] = gdir.cenlat
df.loc[rid, 'n_mb_years'] = len(gdir.get_ref_mb_data())
df.loc[rid, 'tstar'] = res['t_star']
df.loc[rid, 'bias'] = res['bias']
# Write out
df['tstar'] = df['tstar'].astype(int)
df['n_mb_years'] = df['n_mb_years'].astype(int)
file = os.path.join(cfg.PATHS['working_dir'], 'ref_tstars.csv')
df.sort_index().to_csv(file)
@entity_task(log)
def find_start_area(gdir, year_start=1851):
"""This task find the start area for the given glacier, which results in
the best results after the model integration (i.e., modeled glacier surface
closest to measured RGI surface in 2003).
All necessary prepro task (gis, centerline, climate) must be executed
beforehand, as well as the local_t_star() task.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
year_start : int, optional
year at the beginning of the model integration, default = 1851
(best choice for working with HISTALP data)
Returns
-------
:py:class:`scipy.optimize.OptimizeResult`
"""
# instance the mass balance models
mbmod = VAScalingMassBalance(gdir)
# get reference area and year from RGI
a_rgi = gdir.rgi_area_m2
rgi_df = utils.get_rgi_glacier_entities([gdir.rgi_id])
y_rgi = int(rgi_df.BgnDate.values[0][:4])
# get min and max glacier surface elevation
h_min, h_max = get_min_max_elevation(gdir)
# set up the glacier model with the reference values (from RGI)
model_ref = VAScalingModel(year_0=y_rgi, area_m2_0=a_rgi,
min_hgt=h_min, max_hgt=h_max,
mb_model=mbmod)
def _to_minimize(area_m2_start, ref, year_start=year_start):
"""Initialize VAS glacier model as copy of the reference model (ref)
and adjust the model to the given starting area (area_m2_start) and
starting year (1851). Let the model evolve to the same year as the
reference model. Compute and return the relative absolute area error.
Parameters
----------
area_m2_start : float
ref : :py:class:`oggm.VAScalingModel`
year_start : float, optional
the default value is inherited from the surrounding task
Returns
-------
float
relative absolute area estimate error
"""
# define model
model_tmp = VAScalingModel(year_0=ref.year_0,
area_m2_0=ref.area_m2_0,
min_hgt=ref.min_hgt_0,
max_hgt=ref.max_hgt,
mb_model=ref.mb_model)
# scale to desired starting size
model_tmp.create_start_glacier(area_m2_start, year_start=year_start)
# run and compare, return relative error
return np.abs(model_tmp.run_and_compare(ref))
# define bounds - between 100m2 and two times the reference size
area_m2_bounds = [100, 2 * model_ref.area_m2_0]
# run minimization
minimization_res = minimize_scalar(_to_minimize, args=(model_ref),
bounds=area_m2_bounds,
method='bounded')
return minimization_res
class VAScalingMassBalance(MassBalanceModel):
"""Original mass balance model, used in <NAME>. al., 2012.
The general concept is similar to the oggm.PastMassBalance model.
Thereby the main difference is that the Volume/Area Scaling mass balance
model returns only one glacier wide mass balance value per month or year.
"""
def __init__(self, gdir, mu_star=None, bias=None,
filename='climate_monthly', input_filesuffix='',
repeat=False, ys=None, ye=None, check_calib_params=True):
"""Initialize.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
mu_star : float, optional
set to the alternative value of mu* you want to use, while
the default is to use the calibrated value
bias : float, optional
set to the alternative value of the calibration bias [mm we yr-1]
you want to use (the default is to use the calibrated value)
Note that this bias is *substracted* from the computed MB. Indeed:
BIAS = MODEL_MB - REFERENCE_MB
filename : str, optional
set to a different BASENAME if you want to use alternative climate
data
input_filesuffix : str, optional
the file suffix of the input climate file, no suffix as default
repeat : bool
Whether the climate period given by [ys, ye] should be repeated
indefinitely in a circular way, default=False
ys : int
The start of the climate period where the MB model is valid
(default: the period with available data)
ye : int
The end of the climate period where the MB model is valid
(default: the period with available data)
check_calib_params : bool
OGGM will try hard not to use wrongly calibrated mu* by checking
the parameters used during calibration and the ones you are
using at run time. If they don't match, it will raise an error.
Set to False to suppress this check.
"""
# initalize of oggm.MassBalanceModel
super(VAScalingMassBalance, self).__init__()
# read mass balance parameters from file
if mu_star is None:
df = gdir.read_json('vascaling_mustar')
mu_star = df['mu_star']
if bias is None:
if cfg.PARAMS['use_bias_for_run']:
df = gdir.read_json('vascaling_mustar')
bias = df['bias']
else:
bias = 0.
# set mass balance parameters
self.mu_star = mu_star
self.bias = bias
# set mass balance calibration parameters
self.t_solid = cfg.PARAMS['temp_all_solid']
self.t_liq = cfg.PARAMS['temp_all_liq']
self.t_melt = cfg.PARAMS['temp_melt']
prcp_fac = cfg.PARAMS['prcp_scaling_factor']
default_grad = cfg.PARAMS['temp_default_gradient']
# Check the climate related params to the GlacierDir to make sure
if check_calib_params:
mb_calib = gdir.read_json('climate_info')['mb_calib_params']
for k, v in mb_calib.items():
if v != cfg.PARAMS[k]:
raise RuntimeError('You seem to use different mass-'
'balance parameters than used for the '
'calibration. '
'Set `check_calib_params=False` '
'to ignore this warning.')
# set public attributes
self.temp_bias = 0.
self.prcp_bias = 1.
self.repeat = repeat
self.hemisphere = gdir.hemisphere
# read climate file
fpath = gdir.get_filepath(filename, filesuffix=input_filesuffix)
with ncDataset(fpath, mode='r') as nc:
# time
time = nc.variables['time']
time = netCDF4.num2date(time[:], time.units)
ny, r = divmod(len(time), 12)
if r != 0:
raise ValueError('Climate data should be N full years')
# This is where we switch to hydro float year format
# Last year gives the tone of the hydro year
self.years = np.repeat(np.arange(time[-1].year-ny+1,
time[-1].year+1), 12)
self.months = np.tile(np.arange(1, 13), ny)
# Read timeseries
self.temp = nc.variables['temp'][:]
self.prcp = nc.variables['prcp'][:] * prcp_fac
if 'gradient' in nc.variables:
grad = nc.variables['gradient'][:]
# Security for stuff that can happen with local gradients
g_minmax = cfg.PARAMS['temp_local_gradient_bounds']
grad = np.where(~np.isfinite(grad), default_grad, grad)
grad = np.clip(grad, g_minmax[0], g_minmax[1])
else:
grad = self.prcp * 0 + default_grad
self.grad = grad
self.ref_hgt = nc.ref_hgt
self.ys = self.years[0] if ys is None else ys
self.ye = self.years[-1] if ye is None else ye
# compute climatological precipitation around t*
# needed later to estimate the volume/lenght scaling parameter
t_star = gdir.read_json('vascaling_mustar')['t_star']
mu_hp = int(cfg.PARAMS['mu_star_halfperiod'])
yr = [t_star - mu_hp, t_star + mu_hp]
_, _, prcp_clim = get_yearly_mb_temp_prcp(gdir, year_range=yr)
# convert from [mm we. yr-1] into SI units [m we. yr-1]
prcp_clim = prcp_clim * 1e-3
self.prcp_clim = np.mean(prcp_clim)
def get_monthly_climate(self, min_hgt, max_hgt, year):
"""Compute and return monthly positive terminus temperature
and solid precipitation amount for given month.
Parameters
----------
min_hgt : float
glacier terminus elevation [m asl.]
max_hgt : float
maximal glacier surface elevation [m asl.]
year : float
floating year, following the hydrological year convention
Returns
-------
[float, float]
(temp_for_melt) positive terminus temperature [degC] and
(prcp_solid) solid precipitation amount [kg/m^2]
"""
# process given time index
y, m = floatyear_to_date(year)
if self.repeat:
y = self.ys + (y - self.ys) % (self.ye - self.ys + 1)
if y < self.ys or y > self.ye:
raise ValueError('year {} out of the valid time bounds: '
'[{}, {}]'.format(y, self.ys, self.ye))
pok = np.where((self.years == y) & (self.months == m))[0][0]
# Read timeseries
itemp = self.temp[pok] + self.temp_bias
iprcp = self.prcp[pok] * self.prcp_bias
igrad = self.grad[pok]
# compute terminus temperature
temp_terminus = _compute_temp_terminus(itemp, igrad,
self.ref_hgt, min_hgt)
# compute positive 'melting' temperature/energy input
temp_for_melt = np.clip(temp_terminus - self.t_melt,
a_min=0, a_max=None)
# compute solid precipitation
prcp_solid = _compute_solid_prcp(iprcp, 1,
self.ref_hgt, min_hgt, max_hgt,
temp_terminus, self.t_solid, igrad)
return temp_for_melt, prcp_solid
def get_monthly_mb(self, min_hgt, max_hgt, year):
"""Compute and return the glacier wide mass balance
for the given year/month combination.
Possible mb bias is applied...
Parameters
----------
min_hgt : float
glacier terminus elevation [m asl.]
max_hgt : float
maximal glacier (surface) elevation [m asl.]
year : float
floating year and month, following the hydrological year convention
Returns
-------
float
average glacier wide mass balance [m/s]
"""
# get melting temperature and solid precipitation
temp_for_melt, prcp_solid = self.get_monthly_climate(min_hgt,
max_hgt,
year=year)
# compute mass balance
mb_month = prcp_solid - self.mu_star * temp_for_melt
# apply mass balance bias
mb_month -= self.bias / SEC_IN_YEAR * SEC_IN_MONTH
# convert into SI units [m_ice/s]
return mb_month / SEC_IN_MONTH / self.rho
def get_annual_climate(self, min_hgt, max_hgt, year):
"""Compute and return monthly positive terminus temperature
and solid precipitation amount for all months
of the given (hydrological) year.
Parameters
----------
min_hgt : float
glacier terminus elevation [m asl.]
max_hgt : float
maximal glacier (surface) elevation [m asl.]
year : float
floating year, following the hydrological year convention
Returns
-------
[float array, float array]
(temp_for_melt) monthly positive terminus temperature [degC] and
(prcp_solid) monthly solid precipitation amount [kg/m2]
"""
# process given time index
year = np.floor(year)
if self.repeat:
year = self.ys + (year - self.ys) % (self.ye - self.ys + 1)
if year < self.ys or year > self.ye:
raise ValueError('year {} out of the valid time bounds: '
'[{}, {}]'.format(year, self.ys, self.ye))
pok = np.where(self.years == year)[0]
if len(pok) < 1:
raise ValueError('Year {} not in record'.format(int(year)))
# Read timeseries
itemp = self.temp[pok] + self.temp_bias
iprcp = self.prcp[pok] * self.prcp_bias
igrad = self.grad[pok]
# compute terminus temperature
temp_terminus = _compute_temp_terminus(itemp, igrad,
self.ref_hgt, min_hgt)
# compute positive 'melting' temperature/energy input
temp_for_melt = np.clip(temp_terminus - self.t_melt,
a_min=0, a_max=None)
# compute solid precipitation
# prcp factor is set to 1 since it the time series is already corrected
prcp_solid = _compute_solid_prcp(iprcp, 1,
self.ref_hgt, min_hgt, max_hgt,
temp_terminus, self.t_solid, igrad)
return temp_for_melt, prcp_solid
def get_annual_mb(self, min_hgt, max_hgt, year):
"""Compute and return the annual glacier wide mass balance for the
given year. Possible mb bias is applied.
Parameters
----------
min_hgt : float
glacier terminus elevation
max_hgt : float
maximal glacier (surface) elevation
year : float
floating year, following the hydrological year convention
Returns
-------
float
average glacier wide mass balance [m/s]
"""
# get annual mass balance climate
temp_for_melt, prcp_solid = self.get_annual_climate(min_hgt,
max_hgt,
year)
# compute mass balance
mb_annual = np.sum(prcp_solid - self.mu_star * temp_for_melt)
# apply bias and convert into SI units
return (mb_annual - self.bias) / SEC_IN_YEAR / self.rho
def get_specific_mb(self, min_hgt, max_hgt, year):
"""Compute and return the annual specific mass balance
for the given year. Possible mb bias is applied.
Parameters
----------
min_hgt : float
glacier terminus elevation
max_hgt : float
maximal glacier (surface) elevation
year : float
float year, using the hydrological year convention
Returns
-------
float
glacier wide average mass balance, units of millimeter water
equivalent per year [mm w.e./yr]
"""
# get annual mass balance climate
temp_for_melt, prcp_solid = self.get_annual_climate(min_hgt,
max_hgt,
year)
# compute mass balance
mb_annual = np.sum(prcp_solid - self.mu_star * temp_for_melt)
# apply bias
return mb_annual - self.bias
def get_monthly_specific_mb(self, min_hgt=None, max_hgt=None, year=None):
"""Compute and return the monthly specific mass balance
for the given month. Possible mb bias is applied.
Parameters
----------
min_hgt : float, optional
glacier terminus elevation [m asl.], default = None
max_hgt : float, optional
maximal glacier (surface) elevation [m asl.], default = None
year : float, optional
float year and month, using the hydrological year convention,
default = None
Returns
-------
float
glacier wide average mass balance, units of millimeter water
equivalent per months [mm w.e./yr]
"""
# get annual mass balance climate
temp_for_melt, prcp_solid = self.get_monthly_climate(min_hgt,
max_hgt,
year)
# compute mass balance
mb_monthly = np.sum(prcp_solid - self.mu_star * temp_for_melt)
# apply bias and return
return mb_monthly - (self.bias / SEC_IN_YEAR * SEC_IN_MONTH)
def get_ela(self, year=None):
"""The ELA can not be calculated using this mass balance model.
Parameters
----------
year : float, optional
Raises
-------
NotImplementedError
"""
raise NotImplementedError('The equilibrium line altitude can not be ' +
'computed for the `VAScalingMassBalance` ' +
'model.')
class RandomVASMassBalance(MassBalanceModel):
"""Random shuffle of all MB years within a given time period.
This is useful for finding a possible past glacier state or for sensitivity
experiments.
Note that this is going to be sensitive to extreme years in certain
periods, but it is by far more physically reasonable than other
approaches based on gaussian assumptions.
Parameters
----------
"""
def __init__(self, gdir, mu_star=None, bias=None,
y0=None, halfsize=15, seed=None, filename='climate_monthly',
input_filesuffix='', all_years=False,
unique_samples=False):
"""Initialize.
Parameters
----------
gdir : GlacierDirectory
the glacier directory
mu_star : float, optional
set to the alternative value of mu* you want to use
(the default is to use the calibrated value)
bias : float, optional
set to the alternative value of the calibration bias [mm we yr-1]
you want to use (the default is to use the calibrated value)
Note that this bias is *substracted* from the computed MB. Indeed:
BIAS = MODEL_MB - REFERENCE_MB.
y0 : int, optional, default: tstar
the year at the center of the period of interest. The default
is to use tstar as center.
halfsize : int, optional
the half-size of the time window (window size = 2 * halfsize + 1)
seed : int, optional
Random seed used to initialize the pseudo-random number generator.
filename : str, optional
set to a different BASENAME if you want to use alternative climate
data.
input_filesuffix : str
the file suffix of the input climate file
all_years : bool
if True, overwrites ``y0`` and ``halfsize`` to use all available
years.
unique_samples: bool
if true, chosen random mass-balance years will only be available
once per random climate period-length
if false, every model year will be chosen from the random climate
period with the same probability
"""
super(RandomVASMassBalance, self).__init__()
# initialize the VAS equivalent of the PastMassBalance model over the
# whole available climate period
self.mbmod = VAScalingMassBalance(gdir, mu_star=mu_star, bias=bias,
filename=filename,
input_filesuffix=input_filesuffix)
# get mb model parameters
self.prcp_clim = self.mbmod.prcp_clim
# define years of climate period
if all_years:
# use full climate period
self.years = self.mbmod.years
else:
if y0 is None:
# choose t* as center of climate period
df = gdir.read_json('vascaling_mustar')
self.y0 = df['t_star']
else:
# set y0 as attribute
self.y0 = y0
# use 31-year period around given year `y0`
self.years = np.arange(self.y0-halfsize, self.y0+halfsize+1)
# define year range and number of years
self.yr_range = (self.years[0], self.years[-1]+1)
self.ny = len(self.years)
self.hemisphere = gdir.hemisphere
# define random state
self.rng = np.random.RandomState(seed)
self._state_yr = dict()
# whether or not to sample with or without replacement
self.unique_samples = unique_samples
if self.unique_samples:
self.sampling_years = self.years
@property
def temp_bias(self):
"""Temperature bias to add to the original series."""
return self.mbmod.temp_bias
@temp_bias.setter
def temp_bias(self, value):
"""Temperature bias to add to the original series."""
for attr_name in ['_lazy_interp_yr', '_lazy_interp_m']:
if hasattr(self, attr_name):
delattr(self, attr_name)
self.mbmod.temp_bias = value
@property
def prcp_bias(self):
"""Precipitation factor to apply to the original series."""
return self.mbmod.prcp_bias
@prcp_bias.setter
def prcp_bias(self, value):
"""Precipitation factor to apply to the original series."""
for attr_name in ['_lazy_interp_yr', '_lazy_interp_m']:
if hasattr(self, attr_name):
delattr(self, attr_name)
self.mbmod.prcp_bias = value
@property
def bias(self):
"""Residual bias to apply to the original series."""
return self.mbmod.bias
@bias.setter
def bias(self, value):
"""Residual bias to apply to the original series."""
self.mbmod.bias = value
def get_state_yr(self, year=None):
"""For a given year, get the random year associated to it."""
year = int(year)
if year not in self._state_yr:
if self.unique_samples:
# --- Sampling without replacement ---
if self.sampling_years.size == 0:
# refill sample pool when all years were picked once
self.sampling_years = self.years
# choose one year which was not used in the current period
_sample = self.rng.choice(self.sampling_years)
# write chosen year to dictionary
self._state_yr[year] = _sample
# update sample pool: remove the chosen year from it
self.sampling_years = np.delete(
self.sampling_years,
np.where(self.sampling_years == _sample))
else:
# --- Sampling with replacement ---
self._state_yr[year] = self.rng.randint(*self.yr_range)
return self._state_yr[year]
def get_monthly_mb(self, min_hgt, max_hgt, year=None):
""" Wrapper around the class intern mass balance model function.
Compute and return the glacier wide mass balance
for the given year/month combination.
Possible mb bias is applied...
Parameters
----------
min_hgt : float
glacier terminus elevation [m asl.]
max_hgt : float
maximal glacier (surface) elevation [m asl.]
year : float
floating year and month, following the hydrological year convention
Returns
-------
float
average glacier wide mass balance [m/s]
"""
ryr, m = floatyear_to_date(year)
ryr = utils.date_to_floatyear(self.get_state_yr(ryr), m)
return self.mbmod.get_monthly_mb(min_hgt, max_hgt, year=ryr)
def get_annual_mb(self, min_hgt, max_hgt, year=None):
""" Wrapper around the class intern mass balance model function.
Compute and return the annual glacier wide mass balance for the given
year. Possible mb bias is applied.
Parameters
----------
min_hgt : float
glacier terminus elevation
max_hgt : float
maximal glacier (surface) elevation
year : float
floating year, following the hydrological year convention
Returns
-------
float
average glacier wide mass balance [m/s]
"""
ryr = self.get_state_yr(int(year))
return self.mbmod.get_annual_mb(min_hgt, max_hgt, year=ryr)
def get_specific_mb(self, min_hgt, max_hgt, year):
""" Wrapper around the class intern mass balance model function.
Compute and return the annual specific mass balance for the given year.
Possible mb bias is applied.
Parameters
----------
min_hgt : float
glacier terminus elevation
max_hgt : float
maximal glacier (surface) elevation
year : float
float year, using the hydrological year convention
Returns
-------
float
glacier wide average mass balance, units of millimeter water
equivalent per year [mm w.e./yr]
"""
ryr = self.get_state_yr(int(year))
return self.mbmod.get_specific_mb(min_hgt, max_hgt, year=ryr)
def get_ela(self, year=None):
"""The ELA can not be calculated using this mass balance model.
Parameters
----------
year : float, optional
Raises
-------
NotImplementedError
"""
ryr = self.get_state_yr(int(year))
return self.mbmod.get_ela(year=ryr)
@entity_task(log)
def run_random_climate(gdir, nyears=1000, y0=None, halfsize=15,
bias=None, seed=None, temperature_bias=None,
climate_filename='climate_monthly',
climate_input_filesuffix='', output_filesuffix='',
init_area_m2=None, unique_samples=False):
"""Runs the random mass balance model for a given number of years.
This initializes a :py:class:`oggm.core.vascaling.RandomVASMassBalance`,
and runs and stores a :py:class:`oggm.core.vascaling.VAScalingModel` with
the given mass balance model.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
the glacier directory to process
nyears : int, optional
length of the simulation, default = 1000
y0 : int, optional
central year of the random climate period. The default is to be
centred on t*. Default = None
halfsize : int, optional
the half-size of the time window (window size = 2 * halfsize + 1),
default = 15
bias : float, optional
bias of the mb model. Default is to use the calibrated one, which
is often a better idea. For t* experiments it can be useful to set it
to zero. Default = None
seed : int
seed for the random generator. If you ignore this, the runs will be
different each time. Setting it to a fixed seed accross glaciers can
be usefull if you want to have the same climate years for all of them
temperature_bias : float, optional
add a bias to the temperature timeseries, default = None
climate_filename : str, optional
name of the climate file, e.g. 'climate_monthly' (default) or
'gcm_data'
climate_input_filesuffix: str, optional
filesuffix for the input climate file
output_filesuffix : str, optional
this add a suffix to the output file (useful to avoid overwriting
previous experiments)
init_area_m2: float, optional
glacier area with which the model is initialized, default is RGI value
unique_samples: bool, optional
if true, chosen random mass-balance years will only be available once
per random climate period-length
if false, every model year will be chosen from the random climate
period with the same probability (default)
Returns
-------
:py:class:`oggm.core.vascaling.VAScalingModel`
"""
# instance mass balance model
mb_mod = RandomVASMassBalance(gdir, y0=y0, halfsize=halfsize, bias=bias,
seed=seed, filename=climate_filename,
input_filesuffix=climate_input_filesuffix,
unique_samples=unique_samples)
if temperature_bias is not None:
# add given temperature bias to mass balance model
mb_mod.temp_bias = temperature_bias
# where to store the model output
diag_path = gdir.get_filepath('model_diagnostics', filesuffix='vas',
delete=True)
# instance the model
min_hgt, max_hgt = get_min_max_elevation(gdir)
if init_area_m2 is None:
init_area_m2 = gdir.rgi_area_m2
model = VAScalingModel(year_0=0, area_m2_0=init_area_m2,
min_hgt=min_hgt, max_hgt=max_hgt,
mb_model=mb_mod)
# specify path where to store model diagnostics
diag_path = gdir.get_filepath('model_diagnostics',
filesuffix=output_filesuffix,
delete=True)
# run model
model.run_until_and_store(year_end=nyears, diag_path=diag_path)
return model
class ConstantVASMassBalance(MassBalanceModel):
"""Constant mass-balance during a chosen period.
This is useful for equilibrium experiments.
"""
def __init__(self, gdir, mu_star=None, bias=None,
y0=None, halfsize=15, filename='climate_monthly',
input_filesuffix=''):
"""Initialize.
Parameters
----------
gdir : GlacierDirectory
the glacier directory
mu_star : float, optional
set to the alternative value of mu* you want to use
(the default is to use the calibrated value)
bias : float, optional
set to the alternative value of the calibration bias [mm we yr-1]
you want to use (the default is to use the calibrated value)
Note that this bias is *substracted* from the computed MB. Indeed:
BIAS = MODEL_MB - REFERENCE_MB.
y0 : int, optional, default: tstar
the year at the center of the period of interest. The default
is to use tstar as center.
halfsize : int, optional
the half-size of the time window (window size = 2 * halfsize + 1)
filename : str, optional
set to a different BASENAME if you want to use alternative climate
data.
input_filesuffix : str
the file suffix of the input climate file
"""
super(ConstantVASMassBalance, self).__init__()
# initialize the VAS equivalent of the PastMassBalance model over the
# whole available climate period
self.mbmod = VAScalingMassBalance(gdir, mu_star=mu_star, bias=bias,
filename=filename,
input_filesuffix=input_filesuffix)
# use t* as the center of the climatological period if not given
if y0 is None:
df = gdir.read_json('vascaling_mustar')
y0 = df['t_star']
# set model properties
self.prcp_clim = self.mbmod.prcp_clim
self.y0 = y0
self.halfsize = halfsize
self.years = np.arange(y0 - halfsize, y0 + halfsize + 1)
self.hemisphere = gdir.hemisphere
@property
def temp_bias(self):
"""Temperature bias to add to the original series."""
return self.mbmod.temp_bias
@temp_bias.setter
def temp_bias(self, value):
"""Temperature bias to add to the original series."""
for attr_name in ['_lazy_interp_yr', '_lazy_interp_m']:
if hasattr(self, attr_name):
delattr(self, attr_name)
self.mbmod.temp_bias = value
@property
def prcp_bias(self):
"""Precipitation factor to apply to the original series."""
return self.mbmod.prcp_bias
@prcp_bias.setter
def prcp_bias(self, value):
"""Precipitation factor to apply to the original series."""
for attr_name in ['_lazy_interp_yr', '_lazy_interp_m']:
if hasattr(self, attr_name):
delattr(self, attr_name)
self.mbmod.prcp_bias = value
@property
def bias(self):
"""Residual bias to apply to the original series."""
return self.mbmod.bias
@bias.setter
def bias(self, value):
"""Residual bias to apply to the original series."""
self.mbmod.bias = value
def get_climate(self, min_hgt, max_hgt, year=None):
"""Average mass balance climate information for given glacier.
Note that prcp is corrected with the precipitation factor and that
all other biases (precipitation, temp) are applied.
Returns
-------
[float, float]
(temp_for_melt) positive terminus temperature [degC] and
(prcp_solid) solid precipitation amount [kg/m^2]
"""
# create monthly timeseries over whole climate period
yrs = utils.monthly_timeseries(self.years[0], self.years[-1],
include_last_year=True)
# create empty containers
temp = list()
prcp = list()
# iterate over all months
for i, yr in enumerate(yrs):
# get positive melting temperature and solid precipitaion
t, p = self.mbmod.get_monthly_climate(min_hgt, max_hgt, year=yr)
temp.append(t)
prcp.append(p)
# Note that we do not weight for number of days per month - bad
return (np.mean(temp, axis=0),
np.mean(prcp, axis=0))
def get_monthly_mb(self, min_hgt, max_hgt, year=None):
""" Wrapper around the class intern mass balance model function.
Compute and return the glacier wide mass balance
for the given year/month combination.
Possible mb bias is applied...
Parameters
----------
min_hgt : float
glacier terminus elevation [m asl.]
max_hgt : float
maximal glacier (surface) elevation [m asl.]
year : float
floating year and month, following the hydrological year convention
Returns
-------
float
average glacier wide mass balance [m/s]
"""
# extract month from year
_, m = utils.floatyear_to_date()
# sum up the mass balance over all years in climate period
years = [utils.date_to_floatyear(yr, m) for yr in self.years]
mb = [self.mbmod.get_annual_mb(min_hgt, max_hgt, year=yr)
for yr in years]
# return average value
return np.average(mb)
def get_annual_mb(self, min_hgt, max_hgt, year=None):
""" Wrapper around the class intern mass balance model function.
Compute and return the annual glacier wide mass balance for the given
year. Possible mb bias is applied.
Parameters
----------
min_hgt : float
glacier terminus elevation
max_hgt : float
maximal glacier (surface) elevation
year : float
floating year, following the hydrological year convention
Returns
-------
float
average glacier wide mass balance [m/s]
"""
# sum up the mass balance over all years in climate period
mb = [self.mbmod.get_annual_mb(min_hgt, max_hgt, year=yr)
for yr in self.years]
# return average value
return np.average(mb)
def get_specific_mb(self, min_hgt, max_hgt, year=None):
""" Wrapper around the class intern mass balance model function.
Compute and return the annual specific mass balance for the given year.
Possible mb bias is applied.
Parameters
----------
min_hgt : float
glacier terminus elevation
max_hgt : float
maximal glacier (surface) elevation
year : float
float year, using the hydrological year convention
Returns
-------
float
glacier wide average mass balance, units of millimeter water
equivalent per year [mm w.e./yr]
"""
mb = [self.mbmod.get_specific_mb(min_hgt, max_hgt, year=yr)
for yr in self.years]
# return average value
return np.average(mb)
def get_ela(self, year=None):
"""The ELA can not be calculated using this mass balance model.
Parameters
----------
year : float, optional
Raises
-------
NotImplementedError
"""
return self.mbmod.get_ela(year=self.y0)
@entity_task(log)
def run_constant_climate(gdir, nyears=1000, y0=None, halfsize=15,
bias=None, temperature_bias=None,
climate_filename='climate_monthly',
climate_input_filesuffix='', output_filesuffix='',
init_area_m2=None):
"""
Runs the constant mass balance model for a given number of years.
This initializes a :py:class:`oggm.core.vascaling.ConstantVASMassBalance`,
and runs and stores a :py:class:`oggm.core.vascaling.VAScalingModel` with
the given mass balance model.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
the glacier directory to process
nyears : int, optional
length of the simulation, default = 1000
y0 : int, optional
central year of the random climate period. The default is to be
centred on t*. Default = None
halfsize : int, optional
the half-size of the time window (window size = 2 * halfsize + 1),
default = 15
bias : float, optional
bias of the mb model. Default is to use the calibrated one, which
is often a better idea. For t* experiments it can be useful to set it
to zero. Default = None
temperature_bias : float, optional
add a bias to the temperature timeseries, default = None
climate_filename : str, optional
name of the climate file, e.g. 'climate_monthly' (default) or
'gcm_data'
climate_input_filesuffix: str, optional
filesuffix for the input climate file
output_filesuffix : str, optional
this add a suffix to the output file (useful to avoid overwriting
previous experiments)
init_area_m2: float, optional
glacier area with which the model is initialized, default is RGI value
Returns
-------
:py:class:`oggm.core.vascaling.VAScalingModel`
"""
# instance mass balance model
mb_mod = ConstantVASMassBalance(gdir, mu_star=None, bias=bias, y0=y0,
halfsize=halfsize,
filename=climate_filename,
input_filesuffix=climate_input_filesuffix)
if temperature_bias is not None:
# add given temperature bias to mass balance model
mb_mod.temp_bias = temperature_bias
# instance the model
min_hgt, max_hgt = get_min_max_elevation(gdir)
if init_area_m2 is None:
init_area_m2 = gdir.rgi_area_m2
model = VAScalingModel(year_0=0, area_m2_0=init_area_m2,
min_hgt=min_hgt, max_hgt=max_hgt,
mb_model=mb_mod)
# specify path where to store model diagnostics
diag_path = gdir.get_filepath('model_diagnostics',
filesuffix=output_filesuffix,
delete=True)
# run model
model.run_until_and_store(year_end=nyears, diag_path=diag_path)
return model
class VAScalingModel(object):
"""The volume area scaling glacier model following Marzeion et. al., 2012.
@TODO: finish DocString
All used parameters are in SI units (even the climatological precipitation
(attribute of the mass balance model) is given in [m. we yr-1]).
Parameters
----------
"""
def __repr__(self):
"""Object representation."""
return "{}: {}".format(self.__class__, self.__dict__)
def __str__(self):
"""String representation of the dynamic model, includes current
year, area, volume, length and terminus elevation."""
return "{}\nyear: {}\n".format(self.__class__, self.year) \
+ "area [km2]: {:.2f}\n".format(self.area_m2 / 1e6) \
+ "volume [km3]: {:.3f}\n".format(self.volume_m3 / 1e9) \
+ "length [km]: {:.2f}\n".format(self.length_m / 1e3) \
+ "min elev [m asl.]: {:.0f}\n".format(self.min_hgt) \
+ "spec mb [mm w.e. yr-1]: {:.2f}".format(self.spec_mb)
def __init__(self, year_0, area_m2_0, min_hgt, max_hgt, mb_model):
"""Instance new glacier model.
year_0: float
year when the simulation starts
area_m2_0: float
starting area at year_0 [m2]
min_hgt: float
glacier terminus elevation at year_0 [m asl.]
max_hgt: float
maximal glacier surface elevation at year_0 [m asl.]
mb_model: :py:class:òggm.core.vascaling.VAScalingMassBalance`
instance of mass balance model
"""
# get constants from cfg.PARAMS
self.rho = cfg.PARAMS['ice_density']
# get scaling constants
self.cl = cfg.PARAMS['vas_c_length_m']
self.ca = cfg.PARAMS['vas_c_area_m2']
# get scaling exponents
self.ql = cfg.PARAMS['vas_q_length']
self.gamma = cfg.PARAMS['vas_gamma_area']
# define temporal index
self.year_0 = year_0
self.year = year_0
# define geometrical/spatial parameters
self.area_m2_0 = area_m2_0
self.area_m2 = area_m2_0
self.min_hgt = min_hgt
self.min_hgt_0 = min_hgt
self.max_hgt = max_hgt
# compute volume (m3) and length (m) from area (using scaling laws)
self.volume_m3_0 = self.ca * self.area_m2_0 ** self.gamma
self.volume_m3 = self.volume_m3_0
# self.length = self.cl * area_0**self.ql
self.length_m_0 = (self.volume_m3 / self.cl) ** (1 / self.ql)
self.length_m = self.length_m_0
# define mass balance model and spec mb
self.mb_model = mb_model
self.spec_mb = self.mb_model.get_specific_mb(self.min_hgt,
self.max_hgt,
self.year)
# create geometry change parameters
self.dL = 0
self.dA = 0
self.dV = 0
# create time scale parameters
self.tau_a = 1
self.tau_l = 1
def _get_specific_mb(self):
"""Invoke `get_specific_mb()` from mass balance model for current year
and glacier terminus elevation."""
self.spec_mb = self.mb_model.get_specific_mb(self.min_hgt,
self.max_hgt,
self.year)
def _compute_time_scales(self):
"""Compute the time scales for glacier length `tau_l`
and glacier surface area `tau_a` for current time step."""
self.tau_l = self.volume_m3 / (self.mb_model.prcp_clim * self.area_m2)
self.tau_a = self.tau_l * self.area_m2 / self.length_m ** 2
def reset(self):
"""Set model attributes back to starting values."""
self.year = self.year_0
self.length_m = self.length_m_0
self.area_m2 = self.area_m2_0
self.volume_m3 = self.volume_m3_0
self.min_hgt = self.min_hgt_0
# define mass balance model and spec mb
self._get_specific_mb()
# reset geometry change parameters
self.dL = 0
self.dA = 0
self.dV = 0
# create time scale parameters
self.tau_a = 1
self.tau_l = 1
pass
def step(self):
"""Advance model glacier by one year. This includes the following:
- computing time scales
- computing the specific mass balance
- computing volume change and new volume
- computing area change and new area
- computing length change and new length
- computing new terminus elevation
"""
# compute time scales
self._compute_time_scales()
# get specific mass balance B(t)
self._get_specific_mb()
# compute volume change dV(t)
self.dV = self.area_m2 * self.spec_mb / self.rho
# compute new volume V(t+1)
self.volume_m3 += self.dV
# compute area change dA(t)
self.dA = ((self.volume_m3 / self.ca) ** (1 / self.gamma)
- self.area_m2) / self.tau_a
# compute new area A(t+1)
self.area_m2 += self.dA
# compute length change dL(t)
self.dL = ((self.volume_m3 / self.cl) ** (1 / self.ql)
- self.length_m) / self.tau_l
# compute new length L(t+1)
self.length_m += self.dL
# compute new terminus elevation min_hgt(t+1)
self.min_hgt = self.max_hgt + (self.length_m / self.length_m_0
* (self.min_hgt_0 - self.max_hgt))
# increment year
self.year += 1
def run_until(self, year_end, reset=False):
"""Runs the model till the specified year.
Returns all geometric parameters (i.e. length, area, volume, terminus
elevation and specific mass balance) at the end of the model evolution.
Parameters
----------
year_end : float
end of modeling period
reset : bool, optional
If `True`, the model will start from `year_0`, otherwise from its
current position in time (default).
Returns
-------
[float, float, float, float, float, float]
the geometric glacier parameters at the end of the model evolution:
year, length [m], area [m2], volume [m3], terminus elevation
[m asl.], specific mass balance [mm w.e.]
"""
# reset parameters to starting values
if reset:
self.reset()
# check validity of end year
if year_end < self.year:
# raise warning if model year already past given year, and don't
# run the model - return current parameters
raise Warning('Cannot run until {}, already at year {}'.format(
year_end, self.year))
else:
# iterate over all years
while self.year < year_end:
# run model for one year
self.step()
# return metrics
return (self.year, self.length_m, self.area_m2,
self.volume_m3, self.min_hgt, self.spec_mb)
def run_until_and_store(self, year_end, diag_path=None, reset=False):
"""Runs the model till the specified year. Returns all relevant
parameters (i.e. length, area, volume, terminus elevation and specific
mass balance) for each time step as a xarray.Dataset. If a file path is
give the dataset is written to file.
Parameters
----------
year_end : float
end of modeling period
diag_path : str, optional
path where to store glacier diagnostics, default = None
reset : bool, optional
If `True`, the model will start from `year_0`, otherwise from its
current position in time (default).
Returns
-------
:py:class:`xarray.Dataset`
model parameters for each time step (year)
"""
# reset parameters to starting values
# TODO: is this OGGM compatible
if reset:
self.reset()
# check validity of end year
# TODO: find out how OGGM handles this
if year_end < self.year:
raise ValueError('Cannot run until {}, already at year {}'.format(
year_end, self.year))
if not self.mb_model.hemisphere:
raise InvalidParamsError('run_until_and_store needs a '
'mass-balance model with an unambiguous '
'hemisphere.')
# define different temporal indices
yearly_time = np.arange(np.floor(self.year), np.floor(year_end) + 1)
# TODO: include `store_monthly_step` in parameter list or remove IF:
store_monthly_step = False
if store_monthly_step:
# get monthly time index
monthly_time = utils.monthly_timeseries(self.year, year_end)
else:
# monthly time
monthly_time = yearly_time.copy()
# get years and month for hydrological year and calender year
yrs, months = utils.floatyear_to_date(monthly_time)
sm = cfg.PARAMS['hydro_month_' + self.mb_model.hemisphere]
cyrs, cmonths = utils.hydrodate_to_calendardate(yrs, months,
start_month=sm)
# get number of temporal indices
ny = len(yearly_time)
nm = len(monthly_time)
# deal with one dimensional temporal indices
if ny == 1:
yrs = [yrs]
cyrs = [cyrs]
months = [months]
cmonths = [cmonths]
# initialize diagnostics output file
diag_ds = xr.Dataset()
# Global attributes
diag_ds.attrs['description'] = 'VAS model output'
diag_ds.attrs['oggm_version'] = __version__
diag_ds.attrs['calendar'] = '365-day no leap'
diag_ds.attrs['creation_date'] = strftime("%Y-%m-%d %H:%M:%S",
gmtime())
diag_ds.attrs['hemisphere'] = self.mb_model.hemisphere
# Coordinates
diag_ds.coords['time'] = ('time', monthly_time)
diag_ds.coords['hydro_year'] = ('time', yrs)
diag_ds.coords['hydro_month'] = ('time', months)
diag_ds.coords['calendar_year'] = ('time', cyrs)
diag_ds.coords['calendar_month'] = ('time', cmonths)
# add description as attribute to coordinates
diag_ds['time'].attrs['description'] = 'Floating hydrological year'
diag_ds['hydro_year'].attrs['description'] = 'Hydrological year'
diag_ds['hydro_month'].attrs['description'] = 'Hydrological month'
diag_ds['calendar_year'].attrs['description'] = 'Calendar year'
diag_ds['calendar_month'].attrs['description'] = 'Calendar month'
# create empty variables and attributes
diag_ds['volume_m3'] = ('time', np.zeros(nm) * np.NaN)
diag_ds['volume_m3'].attrs['description'] = 'Total glacier volume'
diag_ds['volume_m3'].attrs['unit'] = 'm 3'
diag_ds['area_m2'] = ('time', np.zeros(nm) * np.NaN)
diag_ds['area_m2'].attrs['description'] = 'Total glacier area'
diag_ds['area_m2'].attrs['unit'] = 'm 2'
diag_ds['length_m'] = ('time', np.zeros(nm) * np.NaN)
diag_ds['length_m'].attrs['description'] = 'Glacier length'
diag_ds['length_m'].attrs['unit'] = 'm 3'
diag_ds['ela_m'] = ('time', np.zeros(nm) * np.NaN)
diag_ds['ela_m'].attrs['description'] = ('Annual Equilibrium Line '
'Altitude (ELA)')
diag_ds['ela_m'].attrs['unit'] = 'm a.s.l'
diag_ds['spec_mb'] = ('time', np.zeros(nm) * np.NaN)
diag_ds['spec_mb'].attrs['description'] = 'Specific mass balance'
diag_ds['spec_mb'].attrs['unit'] = 'mm w.e. yr-1'
diag_ds['min_hgt'] = ('time', np.zeros(nm) * np.NaN)
diag_ds['min_hgt'].attrs['description'] = 'Terminus elevation'
diag_ds['min_hgt'].attrs['unit'] = 'm asl.'
diag_ds['tau_l'] = ('time', np.zeros(nm) * np.NaN)
diag_ds['tau_l'].attrs['description'] = 'Length change response time'
diag_ds['tau_l'].attrs['unit'] = 'years'
diag_ds['tau_a'] = ('time', np.zeros(nm) * np.NaN)
diag_ds['tau_a'].attrs['description'] = 'Area change response time'
diag_ds['tau_a'].attrs['unit'] = 'years'
# TODO: handel tidewater glaciers
# run the model
for i, yr in enumerate(monthly_time):
self.run_until(yr)
# store diagnostics
diag_ds['volume_m3'].data[i] = self.volume_m3
diag_ds['area_m2'].data[i] = self.area_m2
diag_ds['length_m'].data[i] = self.length_m
diag_ds['spec_mb'].data[i] = self.spec_mb
diag_ds['min_hgt'].data[i] = self.min_hgt
diag_ds['tau_l'].data[i] = self.tau_l
diag_ds['tau_a'].data[i] = self.tau_a
if diag_path is not None:
# write to file
diag_ds.to_netcdf(diag_path)
return diag_ds
def run_until_equilibrium(self, rate=0.001, ystep=5, max_ite=200):
""" Try to run the glacier model until an equilibirum is reached.
Works only with a constant mass balance model.
Parameters
----------
rate: float, optional
rate of volume change for which the glacier is considered to be in
equilibrium, whereby rate = |V0 - V1| / V0. default is 0.1 percent
ystep: int, optional
number of years per iteration step, default is 5
max_ite: int, optional
maximum number of iterations, default is 200
"""
# TODO: isinstance is not working...
# if not isinstance(self.mb_model, ConstantVASMassBalance):
# raise TypeError('The mass balance model must be of type ' +
# 'ConstantVASMassBalance.')
# initialize the iteration counters and the volume change parameter
ite = 0
was_close_zero = 0
t_rate = 1
# model runs for a maximum fixed number of iterations
# loop breaks if an equilibrium is reached (t_rate small enough)
# or the glacier volume is below 1 for a defined number of times
while (t_rate > rate) and (ite <= max_ite) and (was_close_zero < 5):
# increment the iteration counter
ite += 1
# store current volume ('before')
v_bef = self.volume_m3
# run for the given number of years
self.run_until(self.year + ystep)
# store new volume ('after')
v_af = self.volume_m3
#
if np.isclose(v_bef, 0., atol=1):
# avoid division by (values close to) zero
t_rate = 1
was_close_zero += 1
else:
# compute rate of volume change
t_rate = np.abs(v_af - v_bef) / v_bef
# raise RuntimeError if maximum number of iterations is reached
if ite > max_ite:
raise RuntimeError('Did not find equilibrium.')
def create_start_glacier(self, area_m2_start, year_start,
adjust_term_elev=False):
"""Instance model with given starting glacier area, for the iterative
process of seeking the glacier’s surface area at the beginning of the
model integration.
Per default, the terminus elevation is not scaled (i.e. is the same as
for the initial glacier (probably RGI values)). This corresponds to
the code of Marzeion et. al. (2012), but is physically not consistent.
It is possible to scale the corresponding terminus elevation given the
most recent (measured) outline. However, this is not recommended since
the results may be strange. TODO: this should be fixed sometime...
Parameters
----------
area_m2_start : float
starting surface area guess [m2]
year_start : flaot
corresponding starting year
adjust_term_elev : bool, optional, default = False
"""
# compute volume (m3) and length (m) from area (using scaling laws)
volume_m3_start = self.ca * area_m2_start ** self.gamma
length_m_start = (volume_m3_start / self.cl) ** (1 / self.ql)
min_hgt_start = self.min_hgt_0
# compute corresponding terminus elevation
if adjust_term_elev:
min_hgt_start = self.max_hgt + (length_m_start / self.length_m_0
* (self.min_hgt_0 - self.max_hgt))
self.__init__(year_start, area_m2_start, min_hgt_start,
self.max_hgt, self.mb_model)
def run_and_compare(self, model_ref):
"""Let the model glacier evolve to the same year as the reference
model (`model_ref`). Compute and return the relative error in area.
Parameters
----------
model_ref : :py:class:`oggm.vascaling.VAScalingModel`
Returns
-------
float
relative surface area error
"""
# run model and store area
year, _, area, _, _, _ = self.run_until(year_end=model_ref.year,
reset=True)
assert year == model_ref.year
# compute relative difference to reference area
rel_error = 1 - area/model_ref.area_m2
return rel_error
def start_area_minimization(self):
"""Find the start area which results in a best fitting area after
model integration. TODO:
"""
raise NotImplementedError
| StarcoderdataPython |
3366996 | <filename>src/softmax_mnist.py
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
DATA_DIR = "/tmp/data"
NUM_STEPS = 1000
MINIBATCH_SIZE = 100
data = input_data.read_data_sets(DATA_DIR, one_hot=True)
# define inputs
x = tf.placeholder(tf.float32, [None, 784])
# define weights
W = tf.Variable(tf.zeros([784, 10]))
# define outputs
y_true = tf.placeholder(tf.float32, [None, 10])
y_pred = tf.matmul(x, W)
# define loss function
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=y_pred, labels=y_true)
)
# define optimization algorithm
gd_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
correct_mask = tf.equal(tf.argmax(y_pred, 1), tf.argmax(y_true, 1))
accuracy = tf.reduce_mean(tf.cast(correct_mask, tf.float32))
# train the model
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for _ in range(NUM_STEPS):
batch_xs, batch_ys = data.train.next_batch(MINIBATCH_SIZE)
sess.run(gd_step, feed_dict={ x: batch_xs, y_true: batch_ys })
ans = sess.run(accuracy, feed_dict={ x: batch_xs, y_true: batch_ys })
print("Accuracy: {:.4}%".format(ans*100))
| StarcoderdataPython |
16392 | <filename>ui/main_window.py
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'untitled.ui'
#
# Created by: PyQt5 UI code generator 5.12.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(513, 403)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setEnabled(False)
self.pushButton.setGeometry(QtCore.QRect(80, 70, 91, 31))
self.pushButton.setObjectName("pushButton")
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_2.setEnabled(False)
self.pushButton_2.setGeometry(QtCore.QRect(10, 70, 71, 31))
self.pushButton_2.setObjectName("pushButton_2")
self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_3.setEnabled(False)
self.pushButton_3.setGeometry(QtCore.QRect(194, 20, 311, 31))
self.pushButton_3.setObjectName("pushButton_3")
self.pushButton_4 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_4.setEnabled(False)
self.pushButton_4.setGeometry(QtCore.QRect(194, 72, 311, 31))
self.pushButton_4.setObjectName("pushButton_4")
self.pushButton2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton2.setGeometry(QtCore.QRect(10, 20, 161, 31))
self.pushButton2.setObjectName("pushButton2")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(200, 0, 311, 16))
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(200, 50, 281, 16))
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(30, 110, 461, 251))
self.label_3.setText("")
self.label_3.setPixmap(QtGui.QPixmap("C:/Users/arsen/Desktop/riemann.jpg"))
self.label_3.setObjectName("label_3")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 513, 21))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Головне Меню"))
self.pushButton.setText(_translate("MainWindow", "Відобразити все"))
self.pushButton_2.setText(_translate("MainWindow", "Відобразити"))
self.pushButton_3.setText(_translate("MainWindow", "Створити"))
self.pushButton_4.setText(_translate("MainWindow", "Створити"))
self.pushButton2.setText(_translate("MainWindow", "Задати аналітичну функцію"))
self.label.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" color:#aa0000;\">Мінімальна поверхня з квазіконформною заміною параметра</span></p></body></html>"))
self.label_2.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" color:#005500;\">Мінімальна поверхня з конформною заміною параметра</span></p></body></html>"))
| StarcoderdataPython |
3340906 | from picamera import PiCamera
from time import sleep
camera = PiCamera()
# We can adjust the Brightness and contrast of the image
camera.start_preview()
for i in range(100):
camera.annotate_text = "Brightness: %s" % i
camera.brightness = i
sleep(0.1)
camera.stop_preview()
camera.brightness = 50
camera.start_preview()
for i in range(100):
camera.annotate_text = "Contrast: %s" % i
camera.contrast = i
sleep(0.1)
camera.stop_preview() | StarcoderdataPython |
69740 | import h5py as h5
import feather
import pandas as pd
import numpy as np
import os
correlation_files = os.listdir("correlation_folder")
for i in range(0, len(correlation_files)):
print(i)
correlation = pd.read_feather("correlation_folder/"+correlation_files[i])
f = h5.File("h5/"+correlation_files[i].replace(".f","")+".h5", "w")
dset = f.create_dataset("data/correlation", correlation.shape, dtype=np.float16, chunks=(1,correlation.shape[0]))
dset[:,:] = correlation
genemeta = f.create_dataset("meta/genes", data=np.array(list(map(str.upper, correlation.columns)), dtype='S10'), dtype='S10')
f.close()
def load_correlation(gene):
f = h5.File("h5/correlation_2.h5", "r")
genes = np.array(f["meta/genes"]).astype(np.str)
idx = list(genes).index(gene)
cor = np.array(f["data/correlation"][:,idx]).astype(np.float64)
f.close()
return(cor)
start = time.time()
coco = load_correlation("SOX2")
print(time.time() - start)
import h5py as h5
import s3fs
import numpy as np
import time
from multiprocessing import Process
import random
def loadGenesS3():
genes = 0
s3 = s3fs.S3FileSystem(anon=True)
with h5.File(s3.open("s3://mssm-prismx/correlation_0.h5", 'rb'), 'r', lib_version='latest') as f:
genes = np.array(f["meta/genes"]).astype(np.str)
return genes
def load_correlationS3(gene, genes, cormat, results):
cor = 0
s3 = s3fs.S3FileSystem(anon=True)
with h5.File(s3.open("s3://mssm-prismx/correlation_"+str(cormat)+".h5", 'rb'), 'r', lib_version='latest') as f:
idx = list(genes).index(gene)
cor = np.array(f["data/correlation"][idx,:]).astype(np.float64)
results[cormat] = cor
genes = loadGenesS3()
start = time.time()
coco = load_correlationS3("MAPK1", genes)
print(time.time() - start)
from multiprocessing.pool import ThreadPool as Pool
import pandas as pd
start = time.time()
pool = Pool(1)
cormats = list(range(0,50))
cormats.append("global")
results = pd.DataFrame(np.zeros(shape=(len(genes), len(cormats))), columns=cormats)
for i in cormats:
pool.apply_async(load_correlationS3, ("P53", genes, i, results))
pool.close()
pool.join()
print(time.time() - start)
start = time.time()
pool = Pool(10)
results = pd.DataFrame(np.zeros(shape=(len(genes), 20)), columns=genes[1000:1020])
for gene in genes[1000:1010]:
results[gene] = pool.apply_async(load_correlationS3, (gene, genes,)).get()
pool.close()
pool.join()
print(time.time() - start)
start = time.time()
for gene in genes[2000:2050]:
load_correlationS3(gene, genes, results)
print(time.time() - start)
f = h5.File("h5/correlation_0.h5", "r")
genes = np.array(f["meta/genes"]).astype(np.str)
f.close()
idx = list(genes).index("0610009L18")
print(idx)
list(genes).index('0610009L18')
f = h5.File("h5/correlation_0.h5", "r")
genes = np.array(f["meta/genes"]).astype(np.str)
f.close()
f = h5.File("h5/correlation_0.h5", "w")
dset = f.create_dataset("data/correlation", correlation.shape, dtype=np.float16, chunks=(1,correlation.shape[0]))
dset[:,:] = correlation
genemeta = f.create_dataset("meta/genes", data=np.array(list(map(str.upper, correlation.columns)), dtype='S10'), dtype='S10')
f.close()
import numpy as np
import matplotlib.pyplot as plt
import numpy as np
t1 = np.arange(0, 8, 0.001)
s1 = np.sin(t1) + 1.5
s2 = np.sin(t1*6)/5
s3 = s1+s2-3.5
g1 = np.sin(t1+np.pi/2) + 1.5
g2 = np.sin(t1*6)/5
g3 = g1+g2-3.5
plt.plot(t1, s1, label="low frequency")
plt.plot(t1, s2, label="high frequency")
plt.plot(t1, s3, label="combined frequency")
plt.legend()
plt.title("gene A")
#plt.show()
plt.savefig("genea.png")
plt.close()
plt.plot(t1, g1, label="low frequency")
plt.plot(t1, g2, label="high frequency")
plt.plot(t1, g3, label="combined frequency")
plt.legend()
plt.title("gene B")
#plt.show()
plt.savefig("geneb.png")
plt.close()
plt.plot(t1, s3+3.5, label="gene A")
plt.plot(t1, g3+3.5, label="gene B")
plt.legend()
plt.title("full spectrum gene similarity")
#plt.show()
plt.savefig("fullspectrum.png")
plt.close()
plt.plot(t1, s2, label="gene A")
plt.plot(t1, g2, label="gene B")
plt.legend()
plt.title("high frequency spectrum gene similarity")
#plt.show()
plt.savefig("highspectrum.png")
plt.close()
np.corrcoef(s3,g3)
k1 = list(s3[4000:8000])+list(s3[0:4000])
k2 = list(g3[4000:8000])+list(g3[0:4000])
plt.plot(t1, np.array(k1)+3.5, label="gene A")
plt.plot(t1, np.array(k2)+3.5, label="gene B")
plt.legend()
plt.title("shuffled spectrum gene similarity")
#plt.show()
plt.savefig("shufflespectrum.png")
plt.close()
| StarcoderdataPython |
4817502 | <reponame>bdowning/aiotools
from pprint import pprint
import asyncio
import aiotools
@aiotools.actxmgr
async def mygen(id):
yield f'mygen id is {id}'
async def run():
ctxgrp = aiotools.actxgroup(mygen(i) for i in range(10))
async with ctxgrp as values:
pprint(values)
if __name__ == '__main__':
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
loop.run_until_complete(run())
finally:
loop.stop()
| StarcoderdataPython |
124111 | import sys
from cx_Freeze import setup, Executable
build_exe_options = {
'includes': [
'ninfs',
'ninfs.gui',
'ninfs.mount.cci',
'ninfs.mount.cdn',
'ninfs.mount.cia',
'ninfs.mount.exefs',
'ninfs.mount.nandctr',
'ninfs.mount.nandhac',
'ninfs.mount.nandtwl',
'ninfs.mount.nandbb',
'ninfs.mount.ncch',
'ninfs.mount.romfs',
'ninfs.mount.sd',
'ninfs.mount.srl',
'ninfs.mount.threedsx',
'ninfs.main',
'ninfs.reg_shell',
'ninfs.fmt_detect',
'ninfs.fuse',
'pyctr.type.cci',
'pyctr.type.cdn',
'pyctr.type.cia',
'pyctr.type.exefs',
'pyctr.type.ncch',
'pyctr.type.romfs',
'pyctr.type.sd',
'pyctr.type.smdh',
'pyctr.type.tmd',
],
}
build_msi_options = {
'upgrade_code': '{4BC1D604-0C12-428A-AA22-7BB673EC8266}',
'install_icon': 'ninfs/gui/data/windows.ico'
}
executables = [
Executable('ninfs/_frozen_main.py',
target_name='ninfs',
icon='ninfs/gui/data/windows.ico')
]
if sys.platform == 'win32':
executables.append(Executable('ninfs/_frozen_main.py',
base='Win32GUI',
target_name='ninfsw',
icon='ninfs/gui/data/windows.ico'))
executables.append(Executable('ninfs/winpathmodify.py',
target_name='winpathmodify'))
# based on https://github.com/Legrandin/pycryptodome/blob/b3a394d0837ff92919d35d01de9952b8809e802d/setup.py
with open('ninfs/__init__.py', 'r', encoding='utf-8') as f:
for line in f:
if line.startswith('__version__'):
version = eval(line.split('=')[1])
setup(
name='ninfs',
version=version,
description='FUSE filesystem Python scripts for Nintendo console files',
options={'build_exe': build_exe_options},
executables=executables
)
| StarcoderdataPython |
3276104 | # -*- coding: utf-8 -
"""Event driven concurrent framework for Python"""
from .utils.version import get_version
VERSION = (2, 0, 2, 'final', 0)
__version__ = version = get_version(VERSION)
__author__ = "<NAME>"
DEFAULT_PORT = 8060
ASYNC_TIMEOUT = None
SERVER_NAME = 'pulsar'
JAPANESE = b'\xe3\x83\x91\xe3\x83\xab\xe3\x82\xb5\xe3\x83\xbc'.decode('utf-8')
CHINESE = b'\xe8\x84\x89\xe5\x86\xb2\xe6\x98\x9f'.decode('utf-8')
HINDI = (b'\xe0\xa4\xaa\xe0\xa4\xb2\xe0\xa5\x8d'
b'\xe0\xa4\xb8\xe0\xa4\xb0').decode('utf-8')
SERVER_SOFTWARE = "{0}/{1}".format(SERVER_NAME, version)
| StarcoderdataPython |
3266984 | import random
NUMBER_OF_TRIALS = 100000 # Constant
numberOfHits = 0
for i in range(NUMBER_OF_TRIALS):
x = random.random() * 2 - 1
y = random.random() * 2 - 1
if x * x + y * y <= 1:
numberOfHits += 1
pi = 4 * numberOfHits / NUMBER_OF_TRIALS
print("PI is", pi)
| StarcoderdataPython |
1692578 | import struct
import socket
def scannerIp(stringIp):
sock = socket.socket(socket.AF_INET)
sock.settimeout(3)
sock.connect((stringIp, 445))
packet = b'\x00\x00\x00\xc0\xfeSMB@\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00$\x00\x08\x00\x01\x00\x00\x00\x7f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00x\x00\x00\x00\x02\x00\x00\x00\x02\x02\x10\x02"\x02$\x02\x00\x03\x02\x03\x10\x03\x11\x03\x00\x00\x00\x00\x01\x00&\x00\x00\x00\x00\x00\x01\x00 \x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\n\x00\x00\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00'
sock.send(packet)
length = sock.recv(4)
# print(length)
nb, = struct.unpack(">I", length)
# print(nb)
result = sock.recv(nb)
# if not result[68:70] == b"\x11\x03":
#
# print("Not vulnerable")
# if not result[70:72] == b"\x02\x00":
# print("Not vulnerable")
# print("Vulnerable")
# if result[68:70] == b"\x11\x03":
# exit("vulnerable")
# if result[70:72] == b"\x02\x00":
# exit("vulnerable")
# exit("Not Vulnerable")
# print(result[68:70])
# print(result[70:72])
# ifVulnerable = False
if result[68:72] != b"\x11\x03\x02\x00":
print("Not Vulnerable")
ifVulnerable = False
return False
else:
print("vulnerable")
ifVulnerable = True
return True
return False
exit()
# scannerIp() | StarcoderdataPython |
1720742 | #! /usr/bin/env python3
import sys
import ftplib
import os
import re
import datetime
def getDirDate(inDir=''):
"""Strip the date from the cdas2 directory name.
The directory name that contains the cdas2 files has the pattern
cdas2.YYYYmmdd. This function will verify the directory contains
the correct format, and then return the date portion as a datetime
object.
If the directory does not follow the correct format, then return
None.
"""
# Default return value
oDate = None
# Verify the directory name has the correct format:
if re.fullmatch(r'cdas2\.\d{8}', inDir):
oDate = datetime.datetime.strptime(inDir, 'cdas2.%Y%m%d')
return oDate
def getFile(ftp='', source='', target=''):
"""Download the ftp file listed in source, and place in target
ftp must be a ftplib.FTP instace, and logged in.
This function will check if the file exists. If it does exist,
then it will check the size and date stamp. If the file on the
ftp site is newer, or a different size, then the file will be
downloaded again. If the size is the same, and the date stamp on
the ftp site is older, then the download will not be retried.
This funtion will return True if successful (or if the file didn't
need to be downloaded).
"""
# Default return value
myRet=False
# Check if ftp is an ftplib.FTP instance
if isinstance(ftp, ftplib.FTP):
# Check if the target file exists
# To indicate if the download should be attempted.
doDownload=True
# Check if the target file exists
if os.path.isfile(target):
# Need to get file sizes and ctime
try:
target_size=os.path.getsize(target)
target_ctime=datetime.datetime.fromtimestamp(os.path.getctime(target))
except OSError as err:
print("WARNING: Unable to get the size or ctime of the target file \"{0}\".".format(target), file=sys.stderr)
print("WARNING: Retrying the download. ([{0}] {1})".format(err.errno, err.strerror), file=sys.stderr)
else:
# Need to get source size and ctime
try:
source_size=ftp.size(source)
source_ctime_str=ftp.sendcmd('MDTM {}'.format(source))
source_ctime=datetime.datetime.strptime(source_ctime_str, '213 %Y%m%d%H%M%S')
except ftplib.all_errors as err:
print("WARNING: Unable to get the size or ctime of the source file \"{0}\".".format(source), file=sys.stderr)
print("WARNING: Retrying the download. ({1})".format(err), file=sys.stderr)
# Check if the files are the _same_. Same here is that
# the file sizes are the same, and the source ctime is
# older than the target's ctime.
if source_size == target_size and source_ctime < target_ctime:
print("NOTE: File \"{0}\" already retrieved.".format(source), file=sys.stderr)
doDownload = False
else:
print("WARNING: Target \"{0}\" exists, but does not match the source \"{1}\".".format(target, source), file=sys.stderr)
print("WARNING: Retrieving.", file=sys.stderr)
# Now do the download
try:
ftp.retrbinary('RETR {}'.format(source), open(target, 'wb').write)
except ftplib.all_errors as err:
print("WARNING: Error while attemptint to retrieve file \"{0}\". ({1})".format(source, err), file=sys.stderr)
except OSError as err:
print("WARNING: Unable to write target file \"{0}\". ([{1}] {2})".format(target, err.errno, err.strerror), file=sys.stderr)
else:
myRet = True
return myRet
def main():
"""Download cdas2 files from ftp.ncep.noaa.gov
This application will download the files from ncep for use in the
GFDL seasonal prediction.
TODO: Add real logging (to a file)
TODO: Add a configuration file
"""
ftpUrl="ftp.ncep.noaa.gov"
ftpPath="pub/data/nccf/com/cdas2/prod"
OUTPUT_DIR="/home/sdu/Development/nmme/pythonFtpTests/ncep_reanal/testData"
# Make sure OUTPUT_DIR exists.
# Exit if it doesn't.
if not os.path.isdir(OUTPUT_DIR):
exit("ERROR: Directory \"{0}\" does not exist. Please create, and try again.".format(OUTPUT_DIR))
# Connect to host, default port
try:
ftp=ftplib.FTP(ftpUrl)
except ftplib.all_errors as err:
exit("ERROR: Unable to connect to ftp site \"{0}\": ({1}).".format(ftpUrl, re.sub(r'\[.+\]', '', str(err)).strip()))
# Anonymous login
try:
ftp.login()
except ftplib.all_errors as err:
# Clean up ftp connection, and exit
ftp.quit()
exit("ERROR: Unable to login to ftp site \"{0}\": ({1}).".format(ftpUrl, err))
# change to the correct directory
try:
ftp.cwd(ftpPath)
except ftplib.all_errors as err:
# Clean up ftp connection, and exit
ftp.quit()
exit("ERROR: Unable to change to directory \"{0}\": ({1}).".format(ftpUrl, err))
# Get the names of all directories in the cwd
try:
dirs=ftp.nlst()
except ftplib.all_errors as err:
exit("ERROR: Unable to list directories: ({0}).".format(err))
for inDir in dirs:
# Get the date from the filename, which is the extension of the
# directory, and remove the '.' from the extension.
dirDate=getDirDate(inDir)
if not dirDate:
print("WARNING: Not able to extract the date from the directory name. Skipping {1} . . .".format(inDir),
file=sys.stderr)
continue
# Set the output directory to be YYYYmmm where mmm is the
# lowercase month abbreviation.
outDir=dirDate.strftime('%Y%b').lower()
fullOutDir=os.path.join(OUTPUT_DIR, outDir)
# Need to make sure the output directory exists
if not os.path.isdir(fullOutDir):
try:
os.mkdir(fullOutDir)
except OSError as err:
print("WARNING: Unable to create directory \"{0}\". Skipping all files in \"{1}\". ([{3}] {4})".format(fullOutDir,
inDir,
err.errno,
err.strerror),
file=sys.stderr)
continue
print("NOTE: Files from directory \"{0}\" will be placed in \"{1}\".".format(inDir, outDir), file=sys.stderr)
# Enter the directory
try:
ftp.cwd(inDir)
except ftplib.all_errors as err:
print("WARNING: Unable to enter ftp directory \"{0}\". Skipping . . .".format(inDir),
file=sys.stderr)
# Get a list of file in the new directory
try:
files=ftp.nlst()
except ftplib.all_errors as err:
print("WARNING: Unable to get a list of file in directory \"{0}\". Skipping . . .".format(inDir), file=sys.stderr)
ftp.cwd("..")
continue
for inFile in files:
# The inFile names have the format: cdas2.t??z.sanl
# where ?? is the two digit hour.
# Set the output file name, need the date from the directory,
# will have the format: sig.anl.YYYYMMDDHH.
outFile="sig.anl.{0}{1}.ieee".format(dirDate.strftime('%Y%m%d'), inFile[7:9])
# Download the file
getFile(ftp, inFile, os.path.join(fullOutDir, outFile))
# Return to the parent directory
ftp.cwd("..")
ftp.quit()
if __name__ == '__main__':
main()
| StarcoderdataPython |
1752192 | <gh_stars>0
class Something :
def __init__ ( self ):
pass
def do_something(self):
print("class asdf")
def do_something_else(self):
self.do_something()
if __name__ == '__main__':
x = (1, 2)
y = Something()
print('I finished running')
| StarcoderdataPython |
3389436 | <gh_stars>1-10
# -*- coding: UTF-8 -*-
#Exercício Python 28: Escreva um programa que faça o computador “pensar” em um número inteiro entre 0 e 5 e peça para o usuário tentar descobrir qual foi o número escolhido pelo computador. O programa deverá escrever na tela se o usuário venceu ou perdeu.
from random import randint
from time import sleep
pensamento = randint(0,5)
print("---> JOGO DA ADIVINHAÇÃO <---\n")
print("[INFORME UM NÚMERO ENTRE 0-5]")
numero = int(input("Qual o seu palpite? "))
print("PROCESSANDO...")
sleep(3)
if numero == pensamento:
print(f"\nVocê acertou!!\nO computador pensou em {pensamento} e você digitou {numero}")
else:
print("\nTENTE OUTRA VEZ - VOCÊ ERROU!!!")
print("---> FIM JOGO <---\n") | StarcoderdataPython |
1697424 | #!.venv/bin/python -W ignore
from libs.bunq_lib import BunqLib
from libs.share_lib import ShareLib
def main():
all_option = ShareLib.parse_all_option()
environment_type = ShareLib.determine_environment_type_from_all_option(all_option)
ShareLib.print_header()
bunq = BunqLib(environment_type)
account_id = ShareLib.determine_account_id_from_all_option_or_std_in(all_option)
name = ShareLib.determine_name_from_all_option_or_std_in(all_option)
print(f'''
| Updating Name: {name}
| of Account: {account_id}
...
''')
bunq.update_account(name, int(account_id))
print('''
| ✅ Account updated
| ▶ Check your changed overview
''')
bunq.update_context()
if __name__ == '__main__':
main()
| StarcoderdataPython |
67923 | <reponame>geostk/deepSVDD<gh_stars>1-10
from datasets.base import DataLoader
from datasets.preprocessing import center_data, normalize_data, \
rescale_to_unit_interval, global_contrast_normalization, zca_whitening, \
make_unit_norm, extract_norm_and_out, learn_dictionary, pca
from utils.visualization.mosaic_plot import plot_mosaic
from utils.misc import flush_last_line
from config import Configuration as Cfg
import os
import numpy as np
import cPickle as pickle
class CIFAR_10_DataLoader(DataLoader):
def __init__(self):
DataLoader.__init__(self)
self.dataset_name = "cifar10"
self.n_train = 45000
self.n_val = 5000
self.n_test = 10000
self.seed = Cfg.seed
if Cfg.ad_experiment:
self.n_classes = 2
else:
self.n_classes = 10
Cfg.n_batches = int(np.ceil(self.n_train * 1. / Cfg.batch_size))
self.data_path = "../data/cifar-10-batches-py/"
self.on_memory = True
Cfg.store_on_gpu = True
# load data from disk
self.load_data()
def load_data(self, original_scale=False):
print("Loading data...")
# load training data
X, y = [], []
count = 1
filename = '%s/data_batch_%i' % (self.data_path, count)
while os.path.exists(filename):
with open(filename, 'rb') as f:
batch = pickle.load(f)
X.append(batch['data'])
y.append(batch['labels'])
count += 1
filename = '%s/data_batch_%i' % (self.data_path, count)
# reshape data and cast them properly
X = np.concatenate(X).reshape(-1, 3, 32, 32).astype(np.float32)
y = np.concatenate(y).astype(np.int32)
# load test set
path = '%s/test_batch' % self.data_path
with open(path, 'rb') as f:
batch = pickle.load(f)
# reshaping and casting for test data
X_test = batch['data'].reshape(-1, 3, 32, 32).astype(np.float32)
y_test = np.array(batch['labels'], dtype=np.int32)
if Cfg.ad_experiment:
# set normal and anomalous class
normal = []
outliers = []
if Cfg.cifar10_normal == -1:
normal = range(0, 10)
normal.remove(Cfg.cifar10_outlier)
else:
normal.append(Cfg.cifar10_normal)
if Cfg.cifar10_outlier == -1:
outliers = range(0, 10)
outliers.remove(Cfg.cifar10_normal)
else:
outliers.append(Cfg.cifar10_outlier)
# extract normal and anomalous class
X_norm, X_out, y_norm, y_out \
= extract_norm_and_out(X, y, normal=normal, outlier=outliers)
# reduce outliers in training set to fraction defined
n_norm = len(y_norm)
n_out = int(np.ceil(Cfg.out_frac * n_norm / (1 - Cfg.out_frac)))
np.random.seed(self.seed)
perm = np.random.permutation(len(y_out)) # shuffle outliers
self._X_train = np.concatenate((X_norm, X_out[perm[:n_out]]))
self._y_train = np.append(y_norm, y_out[perm[:n_out]])
# shuffle data (since batches are extracted block-wise)
perm_train = np.random.permutation(len(self._y_train))
self._X_train = self._X_train[perm_train]
self._y_train = self._y_train[perm_train]
# Subset train set such that we only get batches of the same size
self.n_train = ((n_norm + n_out) / Cfg.batch_size) * Cfg.batch_size
subset = np.random.choice(len(self._X_train), self.n_train,
replace=False)
self._X_train = self._X_train[subset]
self._y_train = self._y_train[subset]
# Adjust number of batches
Cfg.n_batches = int(np.ceil(self.n_train * 1. / Cfg.batch_size))
# validation data (use test set)
X_norm, X_out, y_norm, y_out \
= extract_norm_and_out(X_test, y_test, normal=normal,
outlier=outliers)
self._X_val = np.concatenate((X_norm, X_out))
self._y_val = np.append(y_norm, y_out)
perm_val = np.random.permutation(len(self._y_val))
self._X_val = self._X_val[perm_val]
self._y_val = self._y_val[perm_val]
self.n_val = len(self._y_val)
self._X_test = self._X_val.copy()
self._y_test = self._y_val.copy()
self.n_test = len(self._y_val)
else:
# split into training and validation sets with stored seed
np.random.seed(self.seed)
perm = np.random.permutation(len(X))
self._X_train = X[perm[self.n_val:]]
self._y_train = y[perm[self.n_val:]]
self._X_val = X[perm[:self.n_val]]
self._y_val = y[perm[:self.n_val]]
self._X_test = X_test
self._y_test = y_test
# normalize data (if original scale should not be preserved)
if not original_scale:
# simple rescaling to [0,1]
normalize_data(self._X_train, self._X_val, self._X_test,
scale=np.float32(255))
# global contrast normalization
if Cfg.gcn:
global_contrast_normalization(self._X_train, self._X_val,
self._X_test,
scale=Cfg.unit_norm_used)
# ZCA whitening
if Cfg.zca_whitening:
self._X_train, self._X_val, self._X_test = zca_whitening(
self._X_train, self._X_val, self._X_test)
# PCA
if Cfg.pca:
self._X_train, self._X_val, self._X_test = pca(
self._X_train,
self._X_val,
self._X_test,
0.95)
# rescale to [0,1] (w.r.t. min and max in train data)
if not Cfg.pca:
rescale_to_unit_interval(self._X_train,
self._X_val,
self._X_test)
flush_last_line()
print("Data loaded.")
def build_architecture(self, nnet):
if Cfg.weight_dict_init & (not Cfg.pretrain):
# initialize first layer filters by atoms of a dictionary
W1_init = learn_dictionary(nnet.data._X_train, 16, 5, n_sample=500)
plot_mosaic(W1_init, title="First layer filters initialization",
canvas="black",
export_pdf=(Cfg.xp_path + "/filters_init"))
nnet.addInputLayer(shape=(None, 3, 32, 32))
if Cfg.cifar10_bias:
nnet.addConvLayer(num_filters=16, filter_size=(5, 5),
pad='same')
else:
if Cfg.weight_dict_init:
nnet.addConvLayer(num_filters=16, filter_size=(5, 5),
pad='same', W=W1_init, b=None)
else:
nnet.addConvLayer(num_filters=16, filter_size=(5, 5),
pad='same', b=None)
if Cfg.leaky_relu:
nnet.addLeakyReLU()
else:
nnet.addReLU()
if Cfg.cifar10_bias:
nnet.addConvLayer(num_filters=16, filter_size=(5, 5),
pad='same')
else:
nnet.addConvLayer(num_filters=16, filter_size=(5, 5),
pad='same', b=None)
if Cfg.leaky_relu:
nnet.addLeakyReLU()
else:
nnet.addReLU()
nnet.addMaxPool(pool_size=(2, 2))
if Cfg.cifar10_bias:
nnet.addConvLayer(num_filters=32, filter_size=(5, 5),
pad='same')
else:
nnet.addConvLayer(num_filters=32, filter_size=(5, 5),
pad='same', b=None)
if Cfg.leaky_relu:
nnet.addLeakyReLU()
else:
nnet.addReLU()
if Cfg.cifar10_bias:
nnet.addConvLayer(num_filters=32, filter_size=(5, 5),
pad='same')
else:
nnet.addConvLayer(num_filters=32, filter_size=(5, 5),
pad='same', b=None)
if Cfg.leaky_relu:
nnet.addLeakyReLU()
else:
nnet.addReLU()
nnet.addMaxPool(pool_size=(2, 2))
if Cfg.cifar10_bias:
nnet.addConvLayer(num_filters=64, filter_size=(5, 5),
pad='same')
else:
nnet.addConvLayer(num_filters=64, filter_size=(5, 5),
pad='same', b=None)
if Cfg.leaky_relu:
nnet.addLeakyReLU()
else:
nnet.addReLU()
if Cfg.cifar10_bias:
nnet.addConvLayer(num_filters=64, filter_size=(5, 5),
pad='same')
else:
nnet.addConvLayer(num_filters=64, filter_size=(5, 5),
pad='same', b=None)
if Cfg.leaky_relu:
nnet.addLeakyReLU()
else:
nnet.addReLU()
nnet.addMaxPool(pool_size=(2, 2))
if Cfg.cifar10_bias:
nnet.addDenseLayer(num_units=Cfg.cifar10_rep_dim)
else:
nnet.addDenseLayer(num_units=Cfg.cifar10_rep_dim, b=None)
if Cfg.svdd_loss:
return
elif Cfg.softmax_loss:
if Cfg.ad_experiment:
nnet.addDenseLayer(num_units=1)
nnet.addSigmoidLayer()
else:
nnet.addSVMLayer()
nnet.addSoftmaxLayer()
else:
nnet.addSVMLayer()
def check_specific(self):
# store primal variables on RAM
assert Cfg.store_on_gpu
def build_autoencoder(self, nnet):
if Cfg.weight_dict_init:
# initialize first layer filters by atoms of a dictionary
W1_init = learn_dictionary(nnet.data._X_train, 16, 5, n_sample=500)
plot_mosaic(W1_init, title="First layer filters initialization",
canvas="black",
export_pdf=(Cfg.xp_path + "/filters_init"))
nnet.addInputLayer(shape=(None, 3, 32, 32))
if Cfg.weight_dict_init:
nnet.addConvLayer(num_filters=16, filter_size=(5, 5),
pad='same', W=W1_init, b=None)
else:
nnet.addConvLayer(num_filters=16, filter_size=(5, 5),
pad='same', b=None)
if Cfg.leaky_relu:
nnet.addLeakyReLU()
else:
nnet.addReLU()
nnet.addConvLayer(num_filters=16, filter_size=(5, 5),
pad='same', b=None)
if Cfg.leaky_relu:
nnet.addLeakyReLU()
else:
nnet.addReLU()
nnet.addMaxPool(pool_size=(2, 2))
nnet.addConvLayer(num_filters=32, filter_size=(5, 5),
pad='same', b=None)
if Cfg.leaky_relu:
nnet.addLeakyReLU()
else:
nnet.addReLU()
nnet.addConvLayer(num_filters=32, filter_size=(5, 5),
pad='same', b=None)
if Cfg.leaky_relu:
nnet.addLeakyReLU()
else:
nnet.addReLU()
nnet.addMaxPool(pool_size=(2, 2))
nnet.addConvLayer(num_filters=64, filter_size=(5, 5),
pad='same', b=None)
if Cfg.leaky_relu:
nnet.addLeakyReLU()
else:
nnet.addReLU()
nnet.addConvLayer(num_filters=64, filter_size=(5, 5),
pad='same', b=None)
if Cfg.leaky_relu:
nnet.addLeakyReLU()
else:
nnet.addReLU()
nnet.addMaxPool(pool_size=(2, 2))
nnet.addDenseLayer(num_units=Cfg.cifar10_rep_dim, b=None)
nnet.addReshapeLayer(shape=([0], (Cfg.cifar10_rep_dim / 4), 2, 2))
if Cfg.leaky_relu:
nnet.addLeakyReLU()
else:
nnet.addReLU()
nnet.addUpscale(scale_factor=(2, 2))
nnet.addConvLayer(num_filters=64, filter_size=(5, 5),
pad='same', b=None)
if Cfg.leaky_relu:
nnet.addLeakyReLU()
else:
nnet.addReLU()
nnet.addConvLayer(num_filters=64, filter_size=(5, 5),
pad='same', b=None)
if Cfg.leaky_relu:
nnet.addLeakyReLU()
else:
nnet.addReLU()
nnet.addUpscale(scale_factor=(2, 2))
nnet.addConvLayer(num_filters=32, filter_size=(5, 5),
pad='same', b=None)
if Cfg.leaky_relu:
nnet.addLeakyReLU()
else:
nnet.addReLU()
nnet.addConvLayer(num_filters=32, filter_size=(5, 5),
pad='same', b=None)
if Cfg.leaky_relu:
nnet.addLeakyReLU()
else:
nnet.addReLU()
nnet.addUpscale(scale_factor=(2, 2))
nnet.addConvLayer(num_filters=16, filter_size=(5, 5),
pad='same', b=None)
if Cfg.leaky_relu:
nnet.addLeakyReLU()
else:
nnet.addReLU()
nnet.addConvLayer(num_filters=16, filter_size=(5, 5),
pad='same', b=None)
if Cfg.leaky_relu:
nnet.addLeakyReLU()
else:
nnet.addReLU()
nnet.addUpscale(scale_factor=(2, 2))
nnet.addConvLayer(num_filters=3,
filter_size=(5, 5),
pad='same',
b=None)
nnet.addSigmoidLayer()
| StarcoderdataPython |
1633396 | from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class FAQConfig(AppConfig):
name = 'faq'
verbose_name = _("FAQ")
default_app_config = 'faq.FAQConfig'
| StarcoderdataPython |
1611715 | import os
import sys
import mock
import pytest
import torch
import random
# mock detection module
sys.modules['torchvision._C'] = mock.Mock()
import segmentation_models_pytorch as smp
def get_encoder():
is_travis = os.environ.get('TRAVIS', False)
exclude = ['senet154']
encoders = smp.encoders.get_encoder_names()
if is_travis:
encoders = [e for e in encoders if e not in exclude]
return encoders
def get_pretrained_weights_name(encoder_name):
return list(smp.encoders.encoders[encoder_name]['pretrained_settings'].keys())[0]
ENCODERS = get_encoder()
def _select_names(names, k=2):
is_full = os.environ.get('FULL_TEST', False)
if not is_full:
return random.sample(names, k)
else:
return names
def _test_forward_backward(model_fn, encoder_name):
model = model_fn(encoder_name, encoder_weights=None)
x = torch.ones((1, 3, 64, 64))
y = model.forward(x)
l = y.mean()
l.backward()
def _test_pretrained_model(model_fn, encoder_name, encoder_weights):
model = model_fn(encoder_name, encoder_weights=encoder_weights)
x = torch.ones((1, 3, 64, 64))
y = model.predict(x)
assert x.shape[2:] == y.shape[2:]
@pytest.mark.parametrize('encoder_name', _select_names(ENCODERS, k=1))
def test_unet(encoder_name):
_test_forward_backward(smp.Unet, encoder_name)
_test_pretrained_model(smp.Unet, encoder_name, get_pretrained_weights_name(encoder_name))
@pytest.mark.parametrize('encoder_name', _select_names(ENCODERS, k=1))
def test_fpn(encoder_name):
_test_forward_backward(smp.FPN, encoder_name)
_test_pretrained_model(smp.FPN, encoder_name, get_pretrained_weights_name(encoder_name))
@pytest.mark.parametrize('encoder_name', _select_names(ENCODERS, k=1))
def test_linknet(encoder_name):
_test_forward_backward(smp.Linknet, encoder_name)
_test_pretrained_model(smp.Linknet, encoder_name, get_pretrained_weights_name(encoder_name))
@pytest.mark.parametrize('encoder_name', _select_names(ENCODERS, k=1))
def test_pspnet(encoder_name):
_test_forward_backward(smp.PSPNet, encoder_name)
_test_pretrained_model(smp.PSPNet, encoder_name, get_pretrained_weights_name(encoder_name))
if __name__ == '__main__':
pytest.main([__file__])
| StarcoderdataPython |
4841828 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import Tkinter as tk
import threading
#import modułów konektora msg_stream_connector
from ComssServiceDevelopment.connectors.tcp.msg_stream_connector import InputMessageConnector
#import modułu klasy testowego kontrolera usługi
from ComssServiceDevelopment.development import DevServiceController
import cv2 #import modułu biblioteki OpenCV
import numpy as np #import modułu biblioteki Numpy
#utworzenie obiektu kontroletra testowego, jako parametr podany
#jest plik konfiguracji usługi, do której "zaślepka" jest dołączana
service_controller = DevServiceController("src/output_descriptor.json")
#deklaracja interfejsu wejściowego konektora msg_stream_connector,
#należy zwrócić uwagę, iż identyfikator musi być zgodny z WYJŚCIEM usługi,
#do której "zaślepka" jest podłączana
service_controller.declare_connection("videoOutput", InputMessageConnector(service_controller))
#utworzenie połączenia wejściwoego należy zwrócić uwagę,
#iż identyfikator musi być zgodny z WYJŚCIEM usługi,
#do której "zaślepka" jest podłączana
connection = service_controller.get_connection("videoOutput")
print 'starting output script'
#główna pętla programu
while True:
obj = connection.read() #odczyt danych z interfejsu wejściowego
frame = np.loads(obj) #załadownaie ramki do obiektu NumPy
cv2.imshow('Camera',frame) #wyświetlenie ramki na ekran
cv2.waitKey(1) | StarcoderdataPython |
3250194 | <gh_stars>0
"""Handle cross-origin resource sharing (CORS) preflight requests. See:
https://developer.mozilla.org/en-US/docs/HTTP/Access_control_CORS
"""
_max_age_header = str(86400 * 365)
def tween_factory(handler, registry):
def cors_tween(request):
if request.method == 'OPTIONS':
# Tell the browser that CORS is OK here, but the only special
# headers we accept are Authorization and Content-Type.
response = request.response
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = (
'GET, POST, OPTIONS, HEAD, PUT, DELETE')
response.headers['Access-Control-Allow-Headers'] = (
'Authorization,Content-Type')
response.headers['Access-Control-Max-Age'] = _max_age_header
else:
response = handler(request)
if response is not None:
response.headers['Access-Control-Allow-Origin'] = '*'
if not response.headers.get('Cache-Control'):
# Add a default cache-control header.
response.headers['Cache-Control'] = 'no-store'
return response
return cors_tween
def includeme(config):
config.add_tween('opnreco.cors.tween_factory')
| StarcoderdataPython |
100100 | # Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import json
import luigi
from servicecatalog_factory import aws
from servicecatalog_factory.workflow.portfolios.create_portfolio_task import (
CreatePortfolioTask,
)
from servicecatalog_factory.workflow.portfolios.create_product_task import (
CreateProductTask,
)
from servicecatalog_factory.workflow.tasks import FactoryTask, logger
class AssociateProductWithPortfolioTask(FactoryTask):
region = luigi.Parameter()
portfolio_args = luigi.DictParameter()
product_args = luigi.DictParameter()
def params_for_results_display(self):
return {
"region": self.region,
"portfolio": f"{self.portfolio_args.get('portfolio_group_name')}-{self.portfolio_args.get('display_name')}",
"product": self.product_args.get("name"),
}
def output(self):
return luigi.LocalTarget(
f"output/AssociateProductWithPortfolioTask/"
f"{self.region}"
f"{self.product_args.get('name')}"
f"_{self.portfolio_args.get('portfolio_group_name')}"
f"_{self.portfolio_args.get('display_name')}.json"
)
def requires(self):
return {
"create_portfolio_task": CreatePortfolioTask(**self.portfolio_args),
"create_product_task": CreateProductTask(**self.product_args),
}
def run(self):
logger_prefix = f"{self.region}-{self.portfolio_args.get('portfolio_group_name')}-{self.portfolio_args.get('display_name')}"
portfolio = json.loads(
self.input().get("create_portfolio_task").open("r").read()
)
portfolio_id = portfolio.get("Id")
product = json.loads(self.input().get("create_product_task").open("r").read())
product_id = product.get("ProductId")
with self.regional_client("servicecatalog") as service_catalog:
logger.info(f"{logger_prefix}: Searching for existing association")
aws.ensure_portfolio_association_for_product(
portfolio_id, product_id, service_catalog
)
with self.output().open("w") as f:
logger.info(f"{logger_prefix}: about to write!")
f.write("{}")
| StarcoderdataPython |
61508 | <filename>examples/gridworld/GridWorldUnitTests/GridFactory.py<gh_stars>1-10
import numpy as np
from examples.gridworld.Grid import Grid
from examples.gridworld.SimpleGridOne import SimpleGridOne
class GridFactory:
step = SimpleGridOne.STEP
fire = SimpleGridOne.FIRE
blck = SimpleGridOne.BLCK
goal = SimpleGridOne.GOAL
@classmethod
def test_grid_one(cls) -> Grid:
grid = [
[cls.step, cls.fire, cls.goal, cls.step, cls.step],
[cls.step, cls.blck, cls.blck, cls.fire, cls.step],
[cls.step, cls.blck, cls.blck, cls.blck, cls.step],
[cls.step, cls.step, cls.step, cls.step, cls.step]
]
sg1 = SimpleGridOne(3,
grid,
[3, 0])
return sg1
@classmethod
def test_grid_two(cls) -> Grid:
grid = [
[cls.step, cls.step, cls.step, cls.step, cls.goal]
]
sg1 = SimpleGridOne(3,
grid,
[0, 0])
return sg1
@classmethod
def test_grid_three(cls) -> Grid:
grid = [
[cls.step, cls.step, cls.step, cls.step, cls.goal],
[cls.step, cls.step, cls.step, cls.step, cls.step],
[cls.step, cls.step, cls.step, cls.step, cls.step],
[cls.step, cls.step, cls.step, cls.step, cls.step],
[cls.step, cls.step, cls.step, cls.step, cls.step]
]
sg1 = SimpleGridOne(3,
grid,
[4, 0])
return sg1
@classmethod
def test_grid_four(cls) -> Grid:
r = 20
c = 20
grid = np.full((r, c), cls.step)
grid[5][5] = cls.goal
grid[15][15] = cls.fire
sg1 = SimpleGridOne(4,
grid,
[r - 1, c - 1])
return sg1
@classmethod
def test_grid_five(cls) -> Grid:
r = 10
c = 10
grid = np.full((r, c), cls.step)
grid[2][2] = cls.goal
grid[7][7] = cls.fire
sg1 = SimpleGridOne(4,
grid,
[r - 1, c - 1])
return sg1
| StarcoderdataPython |
343 | #!/usr/bin/python2.7
"""
Extract unique set of station locations (and names) along with number of obs
RJHD - Exeter - October 2017
"""
# ECMWF import defaults
import traceback
import sys
from eccodes import *
# RJHD imports
import cartopy
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import gc
VERBOSE = 1 # verbose error reporting.
ATTRS = [
'code',
'units',
'scale',
'reference',
'width'
]
INTMDI = 2147483647
#***************************************************
def process_file(infilename, station_names, fixed_station, latitudes, longitudes, observations, start_year, end_year):
infile = open(infilename)
year = int(infilename.split(".")[0].split("_")[-1])
cmatch = 0
counter = 0
# loop all messages (with stop statement)
while 1:
"""OPEN MESSAGE"""
# get handle for message
bufr = codes_bufr_new_from_file(infile)
if bufr is None:
break
if counter%100000 == 0:
print "message: {:d}".format(counter)
# we need to instruct ecCodes to expand all the descriptors
# i.e. unpack the data values
codes_set(bufr, 'unpack', 1)
"""ITERATOR TO EXTRACT KEYS"""
these_keys = []
# get BUFR key iterator
iterid = codes_bufr_keys_iterator_new(bufr)
# loop over the keys
while codes_bufr_keys_iterator_next(iterid):
# print key name
keyname = codes_bufr_keys_iterator_get_name(iterid)
# print(" %s" % keyname)
these_keys += [keyname]
# delete the key iterator
codes_bufr_keys_iterator_delete(iterid)
# Use these to select obs from land/marine surface
name_keys = ["#1#shipOrMobileLandStationIdentifier", "#1#stationNumber"]
processed = False
for nk in name_keys:
if nk in these_keys:
try:
name = codes_get(bufr, nk)
lat = codes_get(bufr, "#1#latitude")
lon = codes_get(bufr, "#1#longitude")
sloc = tloc = nloc = [-1]
if name in station_names:
sloc, = np.where(station_names == name)
if lat in latitudes:
tloc, = np.where(latitudes == lat)
if lon in longitudes:
nloc, = np.where(longitudes == lon)
if tloc[0] == -1 and nloc[0] == -1:
# if not in list, then add
station_names = np.append(station_names, name)
latitudes = np.append(latitudes, lat)
longitudes = np.append(longitudes, lon)
observations = np.append(observations, 1)
start_year = np.append(start_year, year)
end_year = np.append(end_year, year)
# allow splitting of land and marine/mobile
if nk == "#1#stationNumber":
fixed_station = np.append(fixed_station, True)
else:
fixed_station = np.append(fixed_station, False)
elif (tloc[0] != -1 or nloc[0] != -1) and tloc[0] != nloc[0]:
# add if one element of position is unique
station_names = np.append(station_names, name)
latitudes = np.append(latitudes, lat)
longitudes = np.append(longitudes, lon)
observations = np.append(observations, 1)
start_year = np.append(start_year, year)
end_year = np.append(end_year, year)
# allow splitting of land and marine/mobile
if nk == "#1#stationNumber":
fixed_station = np.append(fixed_station, True)
else:
fixed_station = np.append(fixed_station, False)
elif tloc[0] != -1 and tloc[0] == nloc[0]:
# if position matches exactly, up observation counter
observations[tloc[0]] += 1
end_year[tloc[0]] = year
# allow splitting of land and marine/mobile
if nk == "#1#stationNumber":
if fixed_station[tloc[0]] != True:
# if listed as land and now marine, take marine
fixed_station[tloc[0]] = False
else:
if fixed_station[tloc[0]] != False:
# easier to leave as mobile/marine than to move
# hopefully will stand out later
pass
else:
cmatch += 1
processed = True
except CodesInternalError:
raw_input("key error?")
# check for new keys which give station ID information
if not processed:
other_keys = ["#1#carrierBalloonOrAircraftIdentifier", "#1#aircraftFlightNumber"]
new_key = True
for ok in other_keys:
if ok in these_keys: new_key = False
if new_key:
raw_input(these_keys)
# if counter > 10000: break
counter += 1
codes_release(bufr)
# print "Number of unique locations in this year: {}".format(len(latitudes))
return station_names, fixed_station, latitudes, longitudes, observations, start_year, end_year # process_file
#***************************************************
def scatter_map(outname, data, lons, lats, cmap, bounds, cb_label, title = "", figtext = "", doText = False):
'''
Standard scatter map
:param str outname: output filename root
:param array data: data to plot
:param array lons: longitudes
:param array lats: latitudes
:param obj cmap: colourmap to use
:param array bounds: bounds for discrete colormap
:param str cb_label: colorbar label
'''
norm=mpl.cm.colors.BoundaryNorm(bounds,cmap.N)
fig = plt.figure(figsize =(10,6.5))
plt.clf()
ax = plt.axes([0.05, 0.10, 0.90, 0.90], projection=cartopy.crs.Robinson())
ax.gridlines() #draw_labels=True)
ax.add_feature(cartopy.feature.LAND, zorder = 0, facecolor = "0.9", edgecolor = "k")
ax.coastlines()
ext = ax.get_extent() # save the original extent
scatter = plt.scatter(lons, lats, c = data, cmap = cmap, norm = norm, s=10, \
transform = cartopy.crs.Geodetic(), edgecolor = "r", linewidth = 0.1)
cb=plt.colorbar(scatter, orientation = 'horizontal', pad = 0.05, fraction = 0.05, \
aspect = 30, ticks = bounds[1:-1], label = cb_label, drawedges=True)
# thicken border of colorbar and the dividers
# http://stackoverflow.com/questions/14477696/customizing-colorbar-border-color-on-matplotlib
# cb.set_ticklabels(["{:g}".format(b) for b in bounds[1:-1]])
# cb.outline.set_color('k')
# cb.outline.set_linewidth(2)
cb.dividers.set_color('k')
cb.dividers.set_linewidth(2)
ax.set_extent(ext, ax.projection) # fix the extent change from colormesh
plt.title(title)
if doText: plt.text(0.01, 0.98, "#stations: {}".format(data.shape[0]), transform = ax.transAxes, fontsize = 10)
plt.savefig(outname)
plt.close()
return # scatter_map
#***************************************************
def main(ms = "era40_", year = 1980):
LOCS = "/group_workspaces/jasmin2/c3s311a_lot2/data/incoming/mars/v20170628/data/"
print year
station_names = np.array([])
fixed_station = np.array([])
latitudes = np.array([])
longitudes = np.array([])
observations = np.array([])
start_year = np.array([])
end_year = np.array([])
if ms == "erai_" and year < 1979:
return
else:
INFILE = "{}mars_{}{}.bufr".format(LOCS, ms, year)
try:
station_names, fixed_station, latitudes, longitudes, observations, start_year, end_year = \
process_file(INFILE, station_names, fixed_station, latitudes, longitudes, observations, start_year, end_year)
except CodesInternalError as err:
if VERBOSE:
traceback.print_exc(file=sys.stderr)
else:
sys.stderr.write(err.msg + '\n')
land = np.where(np.array(fixed_station) == True)
marine = np.where(np.array(fixed_station) == False)
bounds = np.linspace(0,max(observations),10).astype(int)
cmap = plt.cm.YlOrRd_r
if ms == "erai_":
title = "MARS - SYNOP - {}".format(year)
else:
title = "MARS - ERA40 - {}".format(year)
scatter_map("mars_{}{}_land_observations.png".format(ms, year), observations[land], longitudes[land], latitudes[land], cmap, bounds, "Number of Observations", title, doText = True)
scatter_map("mars_{}{}_marine_observations.png".format(ms, year), observations[marine], longitudes[marine], latitudes[marine], cmap, bounds, "Number of Observations", title)
station_names = 0
fixed_station = 0
latitudes = 0
longitudes = 0
observations = 0
start_year = 0
end_year = 0
land = 0
marine = 0
gc.collect()
return # main
#***************************************************
if __name__ == "__main__":
import argparse
# set up keyword arguments
parser = argparse.ArgumentParser()
parser.add_argument('--ms', dest='ms', action='store', default = "era40_",
help='Run on ERA40 ["era40_"] (default) or ERA-I ["erai_"] data')
parser.add_argument('--year', dest='year', action='store', default = 1980,
help='Which year to process - default 1980')
args = parser.parse_args()
main(ms = args.ms, year = args.year)
sys.exit()
#***************************************************
# END
#***************************************************
| StarcoderdataPython |
3337760 | <filename>search_blog/urls.py
from django.conf.urls import url
from .views import do_search_blog
urlpatterns = [
url(r'^$', do_search_blog, name='search_blog')
] | StarcoderdataPython |
3289128 | #!/usr/bin/env python3
# encoding: utf-8
from easy_rmg_model.template_writer.submit.gaussian_submit import GaussianSubmit
from easy_rmg_model.template_writer.submit.slurm import SLURMSubmitScript
| StarcoderdataPython |
1692634 | from sklearn.datasets import load_boston
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
#from icecream import ic
import random
from functools import reduce
from collections import defaultdict
from nn import Placeholder#导入
# sns.heatmap(dataframe.corr())
# x, y ; x with 13 dimensions
# sns.heatmap(dataframe.corr())
# plt.subplots(1, 2, figsize=(20, 20))
# plt.scatter(dataframe['RM'], dataframe['price'])
# plt.scatter(dataframe['LSTAT'], dataframe['price'])
# plt.show()
#介绍KNN
def k_nearest_neighbors(train_rm, train_lstat, train_y, query_rm, query_lstat, topn=3):
""""
KNN model
--
input is the rm and lstat value of a perspective house
return: predicted house price
"""
elements = [(r, ls, y) for r, ls, y in zip(train_rm, train_lstat, train_y)]
def distance(e): return (e[0] - query_rm) ** 2 + (e[1] - query_lstat) ** 2
neighbors = sorted(elements, key=distance, reverse=True)[:topn]
return np.mean([y for r, ls, y in neighbors])
# => rm -> price
#有关计算数学公式
def random_linear(x):
w, b = np.random.normal(scale=10, size=(1, 2))[0]
return linear(x, w, b)
def linear(x, w, b):
return w * x + b
def loss(yhat, y):
return np.mean((yhat - y) ** 2)
def partial_w(y, yhat, x):
return -2 * np.mean((y - yhat) * x)
def partial_b(y, yhat):
return -2 * np.mean(y - yhat)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def complexity_function_fitting():
sub_x = np.linspace(-5, 5)
random_i = np.random.randint(0, len(sub_x))
left, right = sub_x[:random_i], sub_x[random_i:]
output = np.concatenate((
random_linear(sigmoid(random_linear(left))),
random_linear(sigmoid(random_linear(right)))
))
plt.plot(sub_x, output)
def topological_sort(graph: dict):#拓扑排序 1. topological sorting
"""
:graph: {
'node': [adjacent1, adjacent2, .. adjacentN],
}
:return: the topological sorting for this graph
"""
while graph:#图不为空时循环执行
all_inputs = reduce(lambda a, b: a + b, map(list,graph.values()))#list与graph进行相加,reduce() 函数会对参数序列中元素进行累积 此函数功能是将value单独分离
# print(all_inputs)
'''
map() 会根据提供的函数对指定序列做映射。
>>> map(square, [1,2,3,4,5]) # 计算列表各个元素的平方
[1, 4, 9, 16, 25]
>>> map(lambda x: x ** 2, [1, 2, 3, 4, 5]) # 使用 lambda 匿名函数
[1, 4, 9, 16, 25]
'''
need_remove = set(graph.keys()) - set(all_inputs)#输入减去输出 = 有输出无输入值
if need_remove: # len(need_remove) > 0 #将节点进行遍历输出,并将其删除输出了的节点
node = random.choice(list(need_remove))#随机选择节点
# print(node)#如b3
exit_node = graph[node][0]#随机选择对应计算节点
# print(exit_node)#如f5
graph.pop(node)
# print(graph)#b3:f5被移除
yield node# yield用于返回多个值,本式存到node数组中
if not graph:
yield exit_node #解决最后一个单独节点问题
'''
return:在程序函数中返回某个值,返回之后函数不在继续执行,彻底结束。
yield: 带有yield的函数是一个迭代器,函数返回某个值时,会停留在某个位置,返回函数值后,会在前面停留的位置继续执行, 直到程序结束
'''
else:
raise TypeError('the graph contain a cycle, the computing graph need acyclic graph')#有环图错误
def convert_feed_dict_to_graph(feed_dict: dict):
computing_graph = defaultdict(list)#defaultdict(list),会构建一个默认value为list的字典,
"""
from collections import defaultdict
result = defaultdict(list)
data = [("p", 1), ("p", 2), ("p", 3),
("h", 1), ("h", 2), ("h", 3)]
for (key, value) in data:
result[key].append(value)
print(result)#defaultdict(<class 'list'>, {'p': [1, 2, 3], 'h': [1, 2, 3]})
"""
nodes = list(feed_dict.keys())
print(feed_dict.keys())
print(feed_dict.values())
while nodes: #循环把节点连接起来,形成图
#node里没有f1,f2...计算节点
n = nodes.pop(0)#删除表中内容
print(n)
if n in computing_graph: continue #代替方案,直接初始化f1,f2,f3,f4,不需要通过append引出,需要有序???
if isinstance(n, Placeholder):#判断两个类型是否相同推荐使用 isinstance()
n.value = feed_dict[n]
for m in n.outputs:
computing_graph[n].append(m)#列表末尾添加新的对象.append() computing_graph[n]是defaultdict类型,写法result[key].append(value)直接载入,与传统数组不同
# print(n.outputs)
# print(computing_graph)
nodes.append(m)#连接,计算节点从这里被append进去
print(nodes)
return computing_graph#所有包括计算节点连成的图会被返回
def forward_and_backward(graph):
for node in graph:#正向排序输出
node.forward()
for node in graph[::-1]:#反向排序输出
node.backward()
def optimize(nodes, lr):
for node in nodes:
if node.trainable:
node.value = node.value - node.loss_gradient[node] * lr
# remains
"""
[done] 1. topological sorting
2. using topological sorting implement auto-grade
3. create a neural network framework
4. convert single-dimension version to multiply version
5. distribute neural network framework to internet (pip)
"""
if __name__ == '__main__':
data = load_boston()
x_data = data['data']
y = data['target']
desc = data['DESCR']
# x, y ; x with 13 dimensions
# let computer could predict house price using some features automatically
# correlation analysis
dataframe = pd.DataFrame(x_data)
dataframe.columns = data['feature_names']
dataframe['price'] = y
rm = dataframe['RM']
lstat = dataframe['LSTAT']
y = dataframe['price']
complex_graph = {#键值对
'x': ['f1', 'f2'],
'b1': ['f1'],
'w1': ['f1'],
'f1': ['f3'],
'f3': ['f4', 'f5'],
'f2': ['f5'],
'w2': ['f2'],
'b2':['f2'],
'f5': ['loss'],
'f4': ['loss'],
'y': ['loss']
}
ic(list(topological_sort(complex_graph)))
| StarcoderdataPython |
3342956 | <reponame>kaiker19/incubator-doris<filename>samples/insert/python/insert_utils.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import re
import time
import os
import sys
import MySQLdb
def insert_func(host, port, user, password, database, select_sql, insert_sql):
"""
insert into doris table select xxx
:param host:
:param port:
:param user:
:param password:
:param database:
:param select_sql: SELECT column1, column2,..., columnN [FROM TABLE_X WHERE xxx]
:param insert_sql: INSERT INTO TABLE_Y[(column1, column2,...,columnN)]
:return:
"""
db_conn = MySQLdb.connect(host=host,
port=port,
user=user,
passwd=password,
db=database)
db_cursor = db_conn.cursor()
insert_process(select_sql, insert_sql, db_cursor)
def insert_process(select_sql, insert_sql, cursor):
"""
issue insert task and check insert task status.
:param select_sql: SELECT column1, column2,..., columnN [FROM TABLE_X WHERE xxx]
:param insert_sql: INSERT INTO TABLE_Y[(column1, column2,...,columnN)]
:param cursor:
:return:
"""
print insert_sql
print select_sql
cursor.execute(select_sql)
rows = cursor.fetchall()
if len(rows) == 0:
print "select result is empty, don't need insert"
return
cursor.execute(insert_sql + select_sql)
label_info = cursor._info
label = re.match(
r'{\'label\':\'([a-zA-Z0-9]{8}-[a-zA-Z0-9]{4}-[a-zA-Z0-9]{4}-[a-zA-Z0-9]{4}-[a-zA-Z0-9]{12})\'}',
label_info).group(1)
print label
# check insert task status
sql = "show load where label = '" + label + "' order by CreateTime desc limit 1"
print sql
cursor.execute(sql)
rows = cursor.fetchall()
timeout = 60 * 60
load_status = ""
while timeout > 0:
load_status = rows[0][2]
print "insert status: " + load_status
if load_status == 'FINISHED' or load_status == 'CANCELLED':
break
time.sleep(5)
timeout = timeout - 5
cursor.execute(sql)
rows = cursor.fetchall()
if load_status == "CANCELLED":
exit("error: insert data CANCELLED")
elif load_status != "FINISHED":
exit("error: insert data timout")
else:
print "insert success."
if __name__ == '__main__':
"""
Befor you run this demo, you should do as below.
First, you need install MySQLdb, execute cmd by root:
pip install MySQL-python
# if you met error: "mysql_config not found", you can execute the following cmd to solve it.
ln -s /usr/local/mysql/bin/mysql_config /usr/local/bin/mysql_config
Second, you need input your db connect config & input insert/select sql
"""
insert_func("127.0.0.1", 8080, 'user', 'password', '<PASSWORD>',
"SELECT column1, column2 FROM TABLE_X WHERE column1 = 'test'",
"INSERT INTO TABLE_Y(column1, column2)")
| StarcoderdataPython |
1726881 | from datetime import date, datetime
from typing import Optional
import pytest
import statey as st
from statey.syms import encoders
@pytest.mark.parametrize(
"type, check",
[
pytest.param(int, lambda x: isinstance(x, encoders.IntegerEncoder), id="int"),
pytest.param(str, lambda x: isinstance(x, encoders.StringEncoder), id="str"),
pytest.param(bool, lambda x: isinstance(x, encoders.BooleanEncoder), id="bool"),
pytest.param(float, lambda x: isinstance(x, encoders.FloatEncoder), id="float"),
pytest.param(
st.Struct["a" : st.Array[int], "b" : st.Boolean],
lambda x: (
isinstance(x, encoders.StructEncoder)
and set(x.field_encoders) == {"a", "b"}
and isinstance(x.field_encoders["a"], encoders.ArrayEncoder)
and isinstance(x.field_encoders["b"], encoders.BooleanEncoder)
),
id="struct",
),
pytest.param(
st.Array[str],
lambda x: isinstance(x, encoders.ArrayEncoder)
and isinstance(x.element_encoder, encoders.StringEncoder),
id="array",
),
pytest.param(
st.Map[str, st.Struct["a" : st.String]],
lambda x: (
isinstance(x, encoders.MapEncoder)
and isinstance(x.key_encoder, encoders.StringEncoder)
and isinstance(x.value_encoder, encoders.StructEncoder)
),
id="map",
),
pytest.param(
st.TypeType(), lambda x: isinstance(x, encoders.TypeEncoder), id="type"
),
pytest.param(
st.DateType(), lambda x: isinstance(x, encoders.DateEncoder), id="date"
),
pytest.param(
st.DateTimeType(),
lambda x: isinstance(x, encoders.DateTimeEncoder),
id="datetime",
),
],
)
def test_get_encoder(type, check, registry):
st_type = registry.get_type(type)
encoder = registry.get_encoder(st_type)
assert check(encoder)
RAISES = object()
@pytest.mark.parametrize(
"type, data, result",
[
pytest.param(int, 1, 1, id="int"),
pytest.param(int, "1", 1, id="int_from_str"),
pytest.param(int, None, RAISES, id="int_null_fail"),
pytest.param(~st.Integer, None, None, id="int_null"),
pytest.param(str, "abc", "abc", id="str"),
pytest.param(str, 1, RAISES, id="str_from_int_fail"),
pytest.param(bool, True, True, id="bool"),
pytest.param(bool, 1, True, id="bool_from_1"),
pytest.param(float, 1.234, 1.234, id="float"),
pytest.param(float, "1.234", 1.234, id="float_from_str"),
pytest.param(st.Array[str], ["a", "b", "c"], ["a", "b", "c"], id="array_str"),
pytest.param(
st.Struct["a":str, "b":int],
{"a": "1", "b": "2"},
{"a": "1", "b": 2},
id="struct",
),
pytest.param(
st.Map[str, st.Array[int]],
{"a": [1, "2", 3], "b": [], "c": ["53"]},
{"a": [1, 2, 3], "b": [], "c": [53]},
id="map",
),
pytest.param(
st.Struct["nullable" : ~st.String, "non_nullable":bool],
{"non_nullable": False},
{"nullable": None, "non_nullable": False},
id="struct_nullable_field",
),
pytest.param(
st.Struct[
"nullable" : ~st.Struct[
"sub_1":str, "sub_2" : ~st.Integer, "sub_3" : st.Array[str]
],
"non_nullable":bool,
],
{"non_nullable": False},
{"nullable": None, "non_nullable": False},
id="struct_nullable_nested",
),
pytest.param(
st.Struct[
"nullable" : ~st.Struct[
"sub_1":str, "sub_2" : ~st.Integer, "sub_3" : st.Array[str]
],
"non_nullable":bool,
],
{
"non_nullable": False,
"nullable": st.Object[
~st.Struct[
"sub_1":str, "sub_2" : ~st.Integer, "sub_3" : st.Array[str]
]
](None),
},
{"nullable": None, "non_nullable": False},
id="struct_nullable_nested_obj",
),
pytest.param(st.TypeType(), st.Integer, {"type": "integer"}, id="type_int"),
pytest.param(
st.TypeType(),
st.Struct["a":int, "b":bool],
{
"type": "object",
"properties": {"a": {"type": "integer"}, "b": {"type": "boolean"}},
"required": ["a", "b"],
},
id="type_struct",
),
pytest.param(st.DateType(), date(2020, 1, 1), "2020-01-01", id="date"),
pytest.param(
st.DateTimeType(),
datetime(2020, 11, 2, 11, 5),
"2020-11-02T11:05:00",
id="datetime",
),
pytest.param(
st.DateTime,
date(2020, 11, 2),
"2020-11-02T00:00:00",
id="datetime_from_date",
),
pytest.param(
st.Date, datetime(2020, 1, 1, 23, 59), "2020-01-01", id="date_from_datetime"
),
],
)
def test_encode(type, data, result, session, registry):
type = registry.get_type(type)
encoder = registry.get_encoder(type)
if result is RAISES:
with pytest.raises(st.exc.InputValidationError):
encoder.encode(data)
else:
obj = st.Object[type](data)
resolved = session.resolve(obj, decode=False)
assert result == resolved
| StarcoderdataPython |
3398967 | # Copyright (C) <NAME>. All Rights Reserved.
# email: <EMAIL>
from engine.data.transforms import GaussianBlur, TwoCropsTransfrom
from engine.data.build import build_dataset, DatasetCatalog
from engine.utils.metric_logger import MetricLogger
from engine.utils.logger import GroupedLogger
from engine.solver import WarmupMultiStepLR
from engine.utils.checkpoint import Checkpointer
from engine.contrastive.build import build_contrastive_model
from engine.utils.logger import setup_logger, setup_mute_logger
from engine.inference import contrastive_inference, lincls_inference
from engine.data.eval import contrastive_accuracy
from engine.data.samplers import DropLastDistributedSampler
from engine.contrastive.simclr import SimCLRModel
import datetime
import logging
import time
import os
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.modules.loss as loss
import torch.optim as optim
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
def build_metric_logger():
pass
def lincls_train_worker(device, ngpus_per_node, cfg):
r"""Distributed parallel training worker
"""
# suppress logging display if not master
if cfg.MULTIPROC_DIST and device != 0:
# Capture display logger
logger = setup_mute_logger("kknight-mute")
else:
if not os.path.exists(cfg.OUTPUT_DIR):
os.mkdir(cfg.OUTPUT_DIR)
logger = setup_logger("kknight", cfg.OUTPUT_DIR)
logger.info(cfg)
if device is not None:
logger.info("Use GPU: {id} for training".format(id=device))
if cfg.DISTRIBUTED:
if cfg.DIST_URL == "envs://" and cfg.RANK == -1:
cfg.RANK = int(os.environ["RANK"])
if cfg.MULTIPROC_DIST:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
cfg.RANK = cfg.RANK * ngpus_per_node + device
dist.init_process_group(backend=cfg.DIST_BACKEND, init_method=cfg.DIST_URL,
world_size=cfg.WORLD_SIZE, rank=cfg.RANK)
# Create model
logger.info("Creating model.")
# TODO: call model builderi
if cfg.MODEL.CONTRASTIVE == "moco":
model = models.__dict__[cfg.MODEL.ARCH](num_classes=10)
# freeze all layers but the last fc
for name, param in model.named_parameters():
if name not in ['fc.weight', 'fc.bias']:
param.requires_grad = False
# initialize fc layer
model.fc.weight.data.normal_(mean=0.0, std=0.01)
model.fc.bias.data.zero_()
elif cfg.MODEL.CONTRASTIVE == "simclr":
model = SimCLRModel(num_classes=10)
# _state_dict = model.state_dict()
for name, param in model.named_parameters():
if 'headding' not in name:
param.requires_grad = False
# model = build_contrastive_model(cfg, device=device)
if os.path.isfile(cfg.PRETRAINED):
logger.info("Loading pretrained model from {}".format(cfg.PRETRAINED))
checkpoint = torch.load(cfg.PRETRAINED, map_location="cpu")
# Load pretrained model
state_dict = checkpoint["model"]
if cfg.MODEL.CONTRASTIVE == "moco":
for k in list(state_dict.keys()):
# Copy the module named module
if k.startswith('module.encoder_q') and not k.startswith('module.encoder_q.fc'):
# remove prefix
state_dict[k[len("module.encoder_q."):]] = state_dict[k]
# delete renamed or unused k
del state_dict[k]
elif cfg.MODEL.CONTRASTIVE == "simclr":
local_dict = model.state_dict()
for k in list(state_dict.keys()):
if not k.startswith('features'):
del state_dict[k]
msg = model.load_state_dict(state_dict, strict=False)
assert set(msg.missing_keys) == {"headding.0.weight", "headding.0.bias"}
if cfg.DISTRIBUTED:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if device is not None:
torch.cuda.set_device(device)
model.cuda(device)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
cfg.SOLVER.BATCH_PER_NODE = int(cfg.SOLVER.BATCH_SIZE / ngpus_per_node)
cfg.EVAL.BATCH_PER_NODE = int(cfg.EVAL.BATCH_SIZE / ngpus_per_node)
cfg.DATALOADER.NUM_WORKERS = int((cfg.DATALOADER.NUM_WORKERS + ngpus_per_node - 1)
/ ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[device])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif device is not None:
torch.cuda.set_device(device)
model = model.cuda(device)
# comment out the following line for debugging
# raise NotImplementedError("Only DistributedDataParallel is supported.")
else:
# AllGather implementation (batch shuffle, queue update, etc.) in
# this code only supports DistributedDataParallel.
raise NotImplementedError("Only DistributedDataParallel is supported.")
# define loss function (criterion) and optimizer
# Build up criterion and deploy on corresponding device
factory = getattr(loss, cfg.CRITERION)
criterion = factory().cuda(device=device)
# Optimizer
parameters = list(filter(lambda p: p.requires_grad, model.parameters()))
optimizer = torch.optim.Adam(parameters, cfg.SOLVER.BASE_LR, betas=cfg.SOLVER.BETAS,
weight_decay=cfg.SOLVER.WEIGHT_DECAY)
# Learn rate scheduler
scheduler = WarmupMultiStepLR(optimizer,
milestones=cfg.SOLVER.MILESTONES,
gamma=cfg.SOLVER.GAMMA,
warmup_factor=cfg.SOLVER.WARMUP_FACTOR,
warmup_iters=cfg.SOLVER.WARMUP_EPOCHS,
warmup_method=cfg.SOLVER.WARMUP_METHOD)
# Checkpoint the model, optimizer and learn rate scheduler if is master node
arguments = dict()
arguments["epoch"] = 0
# checkpointer = None
# if not cfg.MULTIPROC_DIST or (cfg.MULTIPROC_DIST and cfg.RANK % ngpus_per_node == 0):
checkpointer = Checkpointer(
model=model,
optimizer=optimizer,
scheduler=scheduler,
save_dir=cfg.OUTPUT_DIR,
save_to_disk=True,
logger=logger
)
# read from checkpoint
extra_checkpoint_data = checkpointer.load(cfg.CHECKPOINT)
arguments.update(extra_checkpoint_data)
start_epoch = arguments["epoch"]
# TODO: dataset and transforms
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
trans = transforms.Compose([
transforms.RandomResizedCrop(112, scale=(0.7, 1.)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
train_dataset = build_dataset(cfg.DATA.TRAIN, trans, DatasetCatalog, is_train=True)
trans = transforms.Compose([
transforms.Resize(112),
transforms.CenterCrop(112),
transforms.ToTensor(),
normalize,
])
test_dataset = build_dataset(cfg.DATA.TEST, trans, DatasetCatalog, is_train=False)
# TODO: dataloader and sampler
if cfg.DISTRIBUTED:
train_sampler = DropLastDistributedSampler(train_dataset, batch_size=cfg.SOLVER.BATCH_SIZE)
test_sampler = DropLastDistributedSampler(test_dataset, batch_size=cfg.SOLVER.BATCH_SIZE)
else:
train_sampler = None
test_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=cfg.SOLVER.BATCH_PER_NODE,
shuffle=(train_sampler is None),
num_workers=cfg.DATALOADER.NUM_WORKERS,
pin_memory=True,
sampler=train_sampler,
drop_last=True,
)
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=cfg.EVAL.BATCH_PER_NODE,
shuffle=False,
num_workers=cfg.DATALOADER.NUM_WORKERS,
pin_memory=True,
drop_last=True,
sampler=test_sampler,
)
# TODO: metic logger or tensorboard logger
meters = MetricLogger(delimiter=" ")
meters_val = MetricLogger(delimiter=" ")
# TODO: epoch-wise training pipeline
for epoch in range(start_epoch, cfg.SOLVER.EPOCH):
if cfg.DISTRIBUTED:
train_sampler.set_epoch(epoch)
# train for one epoch
do_lincls_train(
cfg=cfg,
model=model,
data_loader=train_loader,
criterion=criterion,
optimizer=optimizer,
epoch=epoch,
device=device,
meters=meters,
logger=logger
)
if (epoch + 1) % cfg.EVAL.EVAL_INTERVAL == 0:
metric = lincls_inference(
cfg=cfg,
model=model,
data_loader=test_loader,
device=device,
logger=logger,
)
meters_val.update(**metric)
logger.info(
meters_val.delimiter.join(
[
"[Evaluation result]: ",
"epoch: {epoch}",
"{meters}",
]
).format(
epoch=epoch,
meters=str(meters_val),
)
)
scheduler.step()
arguments["epoch"] = epoch
# Produce checkpoint
if not cfg.MULTIPROC_DIST or (cfg.MULTIPROC_DIST and cfg.RANK % ngpus_per_node == 0):
checkpointer.save(
"checkpoint_{:03d}".format(epoch), **arguments
)
if epoch == start_epoch:
sanity_check(cfg.MODEL.CONTRASTIVE, model.state_dict(), cfg.PRETRAINED)
def sanity_check(contrastive, state_dict, pretrained_weights):
"""
Linear classifier should not change any weights other than the linear layer.
This sanity check asserts nothing wrong happens (e.g., BN stats updated).
"""
print("=> loading '{}' for sanity check".format(pretrained_weights))
checkpoint = torch.load(pretrained_weights, map_location="cpu")
state_dict_pre = checkpoint['model']
if contrastive == "moco":
for k in list(state_dict.keys()):
# only ignore fc layer
if 'fc.weight' in k or 'fc.bias' in k:
continue
# name in pretrained model
k_pre = 'module.encoder_q.' + k[len('module.'):] \
if k.startswith('module.') else 'module.encoder_q.' + k
assert ((state_dict[k].cpu() == state_dict_pre[k_pre]).all()), \
'{} is changed in linear classifier training.'.format(k)
elif contrastive == "simclr":
for k in list(state_dict.keys()):
if 'headding' in k:
continue
assert ((state_dict[k].cpu() == state_dict_pre[k]).all()), \
'{} is changed in linear classifier training.'.format(k)
print("=> sanity check passed.")
def do_lincls_train(
cfg,
model,
data_loader,
criterion,
optimizer,
epoch,
device=None,
meters=None,
logger=None,
):
# Capture display logger
# logger = logging.getLogger("kknight")
logger.info("Epoch {epoch} now started.".format(epoch=epoch))
# Switch to train mode
model.eval()
# Timers
end = time.time()
data_time, batch_time = 0, 0
# Gradient accumulation interval and statistic display interval
n_accum_grad = cfg.SOLVER.ACCUM_GRAD
n_print_intv = n_accum_grad * cfg.SOLVER.DISP_INTERVAL
max_iter = len(data_loader)
for iteration, (images, target) in enumerate(data_loader):
data_time += time.time() - end
if device is not None:
images = images.cuda(device, non_blocking=True)
target = target.cuda(device, non_blocking=True)
# Compute embedding and target label
# output, target, extra = model(xis, xjs)
output = model(images)
loss = criterion(output, target)
# acc1/acc5 are (k + 1)-way constant classifier accuracy
# measure accuracy and record loss
acc1, acc5 = contrastive_accuracy(output, target, topk=(1, 5))
# meters.update(loss=loss, **extra)
meters.update(loss=loss)
meters.update(acc1=acc1, acc5=acc5)
loss.backward()
# Compute batch time
batch_time += time.time() - end
end = time.time()
if (iteration + 1) % n_accum_grad == 0 or iteration + 1 == max_iter:
optimizer.step()
# scheduler.step()
optimizer.zero_grad()
# Record batch time and data sampling time
meters.update(time=batch_time, data=data_time)
data_time, batch_time = 0, 0
if (iteration + 1) % n_print_intv == 0 or iteration == max_iter:
# Estimated time of arrival of remaining epoch
total_eta = meters.time.global_avg * max_iter * (cfg.SOLVER.EPOCH - epoch)
eta_seconds = meters.time.global_avg * (max_iter - iteration) + total_eta
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
logger.info(
meters.delimiter.join(
[
"eta: {eta}",
"epoch: {epoch}",
"iter: {iter}",
"{meters}",
"lr: {lr:.6f}",
"max mem: {memory:.0f}"
]
).format(
eta=eta_string,
epoch=epoch,
iter=iteration,
meters=str(meters),
lr=optimizer.param_groups[0]["lr"],
memory=torch.cuda.max_memory_allocated() / 1024. / 1024.,
)
)
def train_worker(device, ngpus_per_node, cfg):
r"""Distributed parallel training worker
"""
# suppress logging display if not master
if cfg.MULTIPROC_DIST and device != 0:
# Capture display logger
logger = setup_mute_logger("kknight-mute")
else:
if not os.path.exists(cfg.OUTPUT_DIR):
os.mkdir(cfg.OUTPUT_DIR)
logger = setup_logger("kknight", cfg.OUTPUT_DIR)
logger.info(cfg)
if device is not None:
logger.info("Use GPU: {id} for training".format(id=device))
if cfg.DISTRIBUTED:
if cfg.DIST_URL == "envs://" and cfg.RANK == -1:
cfg.RANK = int(os.environ["RANK"])
if cfg.MULTIPROC_DIST:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
cfg.RANK = cfg.RANK * ngpus_per_node + device
dist.init_process_group(backend=cfg.DIST_BACKEND, init_method=cfg.DIST_URL,
world_size=cfg.WORLD_SIZE, rank=cfg.RANK)
# Create model
logger.info("Creating model.")
# TODO: call model builder
model = build_contrastive_model(cfg, device=device)
if cfg.DISTRIBUTED:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if device is not None:
torch.cuda.set_device(device)
model.cuda(device)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
cfg.SOLVER.BATCH_PER_NODE = int(cfg.SOLVER.BATCH_SIZE / ngpus_per_node)
cfg.EVAL.BATCH_PER_NODE = int(cfg.EVAL.BATCH_SIZE / ngpus_per_node)
cfg.DATALOADER.NUM_WORKERS = int((cfg.DATALOADER.NUM_WORKERS + ngpus_per_node - 1)
/ ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[device])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif device is not None:
torch.cuda.set_device(device)
model = model.cuda(device)
# comment out the following line for debugging
# raise NotImplementedError("Only DistributedDataParallel is supported.")
else:
# AllGather implementation (batch shuffle, queue update, etc.) in
# this code only supports DistributedDataParallel.
raise NotImplementedError("Only DistributedDataParallel is supported.")
# define loss function (criterion) and optimizer
# Build up criterion and deploy on corresponding device
factory = getattr(loss, cfg.CRITERION)
criterion = factory().cuda(device=device)
# Optimizer
optimizer = torch.optim.Adam(model.parameters(), cfg.SOLVER.BASE_LR, betas=cfg.SOLVER.BETAS,
weight_decay=cfg.SOLVER.WEIGHT_DECAY)
# Learn rate scheduler
scheduler = WarmupMultiStepLR(optimizer,
milestones=cfg.SOLVER.MILESTONES,
gamma=cfg.SOLVER.GAMMA,
warmup_factor=cfg.SOLVER.WARMUP_FACTOR,
warmup_iters=cfg.SOLVER.WARMUP_EPOCHS,
warmup_method=cfg.SOLVER.WARMUP_METHOD)
# Checkpoint the model, optimizer and learn rate scheduler if is master node
arguments = dict()
arguments["epoch"] = 0
# checkpointer = None
# if not cfg.MULTIPROC_DIST or (cfg.MULTIPROC_DIST and cfg.RANK % ngpus_per_node == 0):
checkpointer = Checkpointer(
model=model,
optimizer=optimizer,
scheduler=scheduler,
save_dir=cfg.OUTPUT_DIR,
save_to_disk=True,
logger=logger
)
# read from checkpoint
extra_checkpoint_data = checkpointer.load(cfg.CHECKPOINT)
arguments.update(extra_checkpoint_data)
start_epoch = arguments["epoch"]
# TODO: dataset and transforms
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
augmentation = [
transforms.RandomResizedCrop(112, scale=(0.7, 1.)),
transforms.RandomApply([
transforms.ColorJitter(0.4, 0.4, 0.4, 0.1) # not strengthened
], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([GaussianBlur([.1, 2.])], p=0.5),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]
trans = TwoCropsTransfrom(transforms.Compose(augmentation))
train_dataset = build_dataset(cfg.DATA.TRAIN, trans, DatasetCatalog, is_train=True)
test_dataset = build_dataset(cfg.DATA.TEST, trans, DatasetCatalog, is_train=False)
# TODO: dataloader and sampler
if cfg.DISTRIBUTED:
train_sampler = DropLastDistributedSampler(train_dataset, batch_size=cfg.SOLVER.BATCH_SIZE)
test_sampler = DropLastDistributedSampler(test_dataset, batch_size=cfg.SOLVER.BATCH_SIZE)
else:
train_sampler = None
test_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=cfg.SOLVER.BATCH_PER_NODE,
shuffle=(train_sampler is None),
num_workers=cfg.DATALOADER.NUM_WORKERS,
pin_memory=True,
sampler=train_sampler,
drop_last=True,
)
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=cfg.EVAL.BATCH_PER_NODE,
shuffle=False,
num_workers=cfg.DATALOADER.NUM_WORKERS,
pin_memory=True,
sampler=test_sampler,
drop_last=True,
)
# TODO: metic logger or tensorboard logger
meters = MetricLogger(delimiter=" ")
meters_val = MetricLogger(delimiter=" ")
# TODO: epoch-wise training pipeline
for epoch in range(start_epoch, cfg.SOLVER.EPOCH):
if cfg.DISTRIBUTED:
train_sampler.set_epoch(epoch)
# train for one epoch
do_contrastive_train(
cfg=cfg,
model=model,
data_loader=train_loader,
criterion=criterion,
optimizer=optimizer,
epoch=epoch,
device=device,
meters=meters,
logger=logger
)
if (epoch + 1) % cfg.EVAL.EVAL_INTERVAL == 0:
metric = contrastive_inference(
cfg=cfg,
model=model,
data_loader=test_loader,
device=device,
logger=logger,
)
meters_val.update(**metric)
logger.info(
meters_val.delimiter.join(
[
"[Evaluation result]: ",
"epoch: {epoch}",
"{meters}",
]
).format(
epoch=epoch,
meters=str(meters_val),
)
)
scheduler.step()
arguments["epoch"] = epoch + 1
# Produce checkpoint
if not cfg.MULTIPROC_DIST or (cfg.MULTIPROC_DIST and cfg.RANK % ngpus_per_node == 0):
checkpointer.save(
"checkpoint_{:03d}".format(epoch), **arguments
)
def do_contrastive_train(
cfg,
model,
data_loader,
criterion,
optimizer,
epoch,
device=None,
meters=None,
logger=None,
):
r"""Contrastive training implementation:
Args:
cfg: (CfgNode) Specified configuration. The final config settings
integrates the initial default setting and user defined settings given
by argparse.
model: (nn.Module) Under the context of this repository, the model
can be a simple PyTorch neural network or the instance wrapped by
ContrastiveWrapper.
data_loader:
criterion:
optimizer:
device:
epoch:
meters:
Returns:
"""
# Capture display logger
# logger = logging.getLogger("kknight")
logger.info("Epoch {epoch} now started.".format(epoch = epoch))
# Switch to train mode
model.train()
# Timers
end = time.time()
data_time, batch_time = 0, 0
# Gradient accumulation interval and statistic display interval
n_accum_grad = cfg.SOLVER.ACCUM_GRAD
n_print_intv = n_accum_grad * cfg.SOLVER.DISP_INTERVAL
max_iter = len(data_loader)
for iteration, ((xis, xjs), _) in enumerate(data_loader):
data_time += time.time() - end
if device is not None:
xis = xis.cuda(device, non_blocking=True)
xjs = xjs.cuda(device, non_blocking=True)
# Compute embedding and target label
# output, target, extra = model(xis, xjs)
output, target = model(xis, xjs)
loss = criterion(output, target)
# acc1/acc5 are (k + 1)-way constant classifier accuracy
# measure accuracy and record loss
acc1, acc5 = contrastive_accuracy(output, target, topk=(1, 5))
# meters.update(loss=loss, **extra)
meters.update(loss=loss)
meters.update(acc1=acc1, acc5=acc5)
loss.backward()
# Compute batch time
batch_time += time.time() - end
end = time.time()
if (iteration + 1) % n_accum_grad == 0 or iteration + 1 == max_iter:
optimizer.step()
# scheduler.step()
optimizer.zero_grad()
# Record batch time and data sampling time
meters.update(time=batch_time, data=data_time)
data_time, batch_time = 0, 0
if (iteration + 1) % n_print_intv == 0 or iteration == max_iter:
# Estimated time of arrival of remaining epoch
total_eta = meters.time.global_avg * max_iter * (cfg.SOLVER.EPOCH - epoch)
eta_seconds = meters.time.global_avg * (max_iter - iteration) + total_eta
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
logger.info(
meters.delimiter.join(
[
"eta: {eta}",
"epoch: {epoch}",
"iter: {iter}",
"{meters}",
"lr: {lr:.6f}",
"max mem: {memory:.0f}"
]
).format(
eta=eta_string,
epoch=epoch,
iter=iteration,
meters=str(meters),
lr=optimizer.param_groups[0]["lr"],
memory=torch.cuda.max_memory_allocated() / 1024. / 1024.,
)
)
| StarcoderdataPython |
4817415 | <gh_stars>1-10
"""
tests.__init__.py
~~~~~~~~~~~~~~~~~
"""
| StarcoderdataPython |
110838 | # Copyright 2020 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module with CRMint's base Worker and WorkerException classes."""
import json
import time
from functools import wraps
from googleapiclient.errors import HttpError
from random import random
from common import crmint_logging
from google.api_core.retry import Retry
_DEFAULT_MAX_RETRIES = 3
class WorkerException(Exception): # pylint: disable=too-few-public-methods
"""Worker execution exceptions expected in task handler."""
class Worker:
"""Abstract worker class."""
# A list describing worker parameters. Each element in the list is a tuple
# of five elements: 0) parameter's name, 1) parameter's type, 2) True if
# parameter is required, False otherwise, 3) default value to use when
# parameter value is missing, and 4) label to show near parameter's field in
# a web UI. See examples below in worker classes.
PARAMS = []
# A list with names of general settings that worker needs as its parameters.
GLOBAL_SETTINGS = []
# Maximum number of worker execution attempts.
MAX_ATTEMPTS = 1
def __init__(self, params, pipeline_id, job_id):
self._pipeline_id = pipeline_id
self._job_id = job_id
self._params = params
for p in self.PARAMS:
try:
self._params[p[0]]
except KeyError:
self._params[p[0]] = p[3]
self._workers_to_enqueue = []
@Retry()
def _log(self, level, message, *substs):
crmint_logging.logger.log_struct({
'labels': {
'pipeline_id': self._pipeline_id,
'job_id': self._job_id,
'worker_class': self.__class__.__name__,
},
'log_level': level,
'message': message % substs,
})
def log_info(self, message, *substs):
self._log('INFO', message, *substs)
def log_warn(self, message, *substs):
self._log('WARNING', message, *substs)
def log_error(self, message, *substs):
self._log('ERROR', message, *substs)
def execute(self):
self.log_info('Started with params: %s',
json.dumps(self._params, sort_keys=True, indent=2,
separators=(', ', ': ')))
# try:
# self._execute()
# except Exception as e:
# raise WorkerException(e) from e
self._execute()
#self.log_info('Finished successfully')
return self._workers_to_enqueue
def _execute(self):
"""Abstract method that does actual worker's job."""
def _enqueue(self, worker_class, worker_params, delay=0):
self._workers_to_enqueue.append((worker_class, worker_params, delay))
def retry(self, func, max_retries=_DEFAULT_MAX_RETRIES):
"""Decorator implementing retries with exponentially increasing delays."""
@wraps(func)
def func_with_retries(*args, **kwargs):
"""Retriable version of function being decorated."""
tries = 0
while tries < max_retries:
try:
return func(*args, **kwargs)
except HttpError as e:
# If it is a client side error, then there's no reason to retry.
if e.resp.status > 399 and e.resp.status < 500:
raise e
except HttpError as e:
# If it is a client side error, then there's no reason to retry.
if e.code > 399 and e.code < 500:
raise e
except Exception as e: # pylint: disable=broad-except
pass
tries += 1
delay = 5 * 2 ** (tries + random())
time.sleep(delay)
return func(*args, **kwargs)
return func_with_retries
| StarcoderdataPython |
1619580 | <gh_stars>1-10
#
# Copyright (c) 2019, Neptune Labs Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import base64
import json
import os
from typing import Optional, Dict
from dataclasses import dataclass
from neptune.new import envs
from neptune.new import ANONYMOUS, ANONYMOUS_API_TOKEN
from neptune.new.exceptions import (
NeptuneInvalidApiTokenException,
NeptuneMissingApiTokenException,
)
@dataclass(frozen=True)
class Credentials:
api_token: str
token_origin_address: str
api_url_opt: str
@classmethod
def from_token(cls, api_token: Optional[str] = None) -> "Credentials":
if api_token is None:
api_token = os.getenv(envs.API_TOKEN_ENV_NAME)
if api_token == ANONYMOUS:
api_token = ANONYMOUS_API_TOKEN
if api_token is None:
raise NeptuneMissingApiTokenException()
api_token = api_token.strip()
token_dict = Credentials._api_token_to_dict(api_token)
# TODO: Consider renaming 'api_address' (breaking backward compatibility)
if "api_address" not in token_dict:
raise NeptuneInvalidApiTokenException()
token_origin_address = token_dict["api_address"]
api_url = token_dict["api_url"] if "api_url" in token_dict else None
return Credentials(
api_token=api_token,
token_origin_address=token_origin_address,
api_url_opt=api_url,
)
@staticmethod
def _api_token_to_dict(api_token: str) -> Dict[str, str]:
try:
return json.loads(base64.b64decode(api_token.encode()).decode("utf-8"))
except Exception:
raise NeptuneInvalidApiTokenException()
| StarcoderdataPython |
1671022 | <reponame>CZ-NIC/deckard
"""Simple answer generator using local forwarder"""
# pylint: disable=C0301,C0111,C0103
# flake8: noqa
import ipaddress
import answer_checker
d = {"SIMPLE_ANSWER" : answer_checker.make_random_case_query("good-a.test.knot-resolver.cz", "A"),
"EDNS_ANSWER" : answer_checker.make_random_case_query("good-a.test.knot-resolver.cz", "A", use_edns=0),
"DO_ANSWER" : answer_checker.make_random_case_query("good-a.test.knot-resolver.cz", "A", want_dnssec=True),
"CD_ANSWER" : answer_checker.make_random_case_query("good-a.test.knot-resolver.cz", "A", want_dnssec=True),
"RRSIG_ANSWER" : answer_checker.make_random_case_query("good-a.test.knot-resolver.cz", "A", want_dnssec=True),
"DNSKEY_ANSWER" : answer_checker.make_random_case_query("test.knot-resolver.cz", "DNSKEY", want_dnssec=True),
"DS_ANSWER" : answer_checker.make_random_case_query("cz", "DS", want_dnssec=True),
"NSEC_NEGATIVE_ANSWER" : answer_checker.make_random_case_query("nonexistent.nsec.test.knot-resolver.cz", "A", want_dnssec=True),
"NSEC3_NEGATIVE_ANSWER" : answer_checker.make_random_case_query("nonexistent.nsec3.test.knot-resolver.cz", "A", want_dnssec=True),
"UNKNOWN_TYPE_ANSWER" : answer_checker.make_random_case_query("weird-type.test.knot-resolver.cz", "TYPE20025"),
"NONEXISTENT_DS_DELEGATION_NSEC_ANSWER" : answer_checker.make_random_case_query("unsigned.nsec.test.knot-resolver.cz", "DS", want_dnssec=True),
"NONEXISTENT_DS_DELEGATION_NSEC3_ANSWER" : answer_checker.make_random_case_query("unsigned.nsec3.test.knot-resolver.cz", "DS", want_dnssec=True),
"NONEXISTENT_DELEGATION_FROM_NSEC_ANSWER" : answer_checker.make_random_case_query("nonexistent.nsec.test.knot-resolver.cz", "DS", want_dnssec=True),
"NONEXISTENT_DELEGATION_FROM_NSEC3_ANSWER" : answer_checker.make_random_case_query("nonexistent.nsec3.test.knot-resolver.cz", "DS", want_dnssec=True),
"NONEXISTENT_TYPE_NSEC3_ANSWER" : answer_checker.make_random_case_query("nsec3.test.knot-resolver.cz", "TYPE65281", want_dnssec=True),
"NONEXISTENT_TYPE_NSEC_ANSWER" : answer_checker.make_random_case_query("nsec.test.knot-resolver.cz", "TYPE65281", want_dnssec=True)}
for k, v in d.items():
print('%s = dns.message.from_text("""%s""")\n' % (k, answer_checker.string_answer(v, ipaddress.IPv4Address("127.0.0.1"))))
| StarcoderdataPython |
16998 | #import
import os
#import torch
#import torch.nn as nn
import torch.utils.data as Data
#import torchvision
import matplotlib.pyplot as plt
import h5py
#from torch.autograd import Variable
import numpy as np
import torch
class rawdataDataset(Data.Dataset):
def __init__(self):
super(rawdataDataset, self).__init__()
#def __init__(self, filename, root_dir, transform=None):
# self.frame = h5py.File(root_dir + filename, 'r')
# self.root_dir = root_dir
# self.transform = transform
def name(self):
return 'rawdataDataset'
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
def initialize(self, opt):
self.opt = opt
self.root = opt.dataroot
self.dir_AB = os.path.join(opt.dataroot, opt.phase) # phase: train test
#self.AB_paths = sorted(make_dataset(self.dir_AB))
self.A_paths = self.dir_AB + "/A.h5"
self.B_paths = self.dir_AB + "/B.h5"
self.frameA = h5py.File(self.A_paths, 'r')
self.frameB = h5py.File(self.B_paths, 'r')
#assert(opt.resize_or_crop == 'resize_and_crop')
def __len__(self):
return len(self.frameA)
def __getitem__(self, index):
#img_name = torch.FloatTensor([[ self.frame["pic" + str(index)] ]])
#img_name = Variable(torch.FloatTensor([[ self.frame["pic" + str(index)] ]])
A = self.frameA["A" + str(index + 1)]
B = self.frameB["B" + str(index + 1)]
#A = torch.FloatTensor([[ self.frameA["A" + str(index)] ]])
#B = torch.FloatTensor([[ self.frameB["B" + str(index)] ]])
#AB_path = self.AB_paths[index]
#AB = Image.open(AB_path).convert('RGB')
#w, h = AB.size
#w2 = int(w / 2)
#A = AB.crop((0, 0, w2, h)).resize((self.opt.loadSize, self.opt.loadSize), Image.BICUBIC)
#B = AB.crop((w2, 0, w, h)).resize((self.opt.loadSize, self.opt.loadSize), Image.BICUBIC)
#A = transforms.ToTensor()(A)
#B = transforms.ToTensor()(B)
#w_offset = random.randint(0, max(0, self.opt.loadSize - self.opt.fineSize - 1))
#h_offset = random.randint(0, max(0, self.opt.loadSize - self.opt.fineSize - 1))
#A = A[:, h_offset:h_offset + self.opt.fineSize, w_offset:w_offset + self.opt.fineSize]
#B = B[:, h_offset:h_offset + self.opt.fineSize, w_offset:w_offset + self.opt.fineSize]
#A = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))(A)
#B = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))(B)
if self.opt.which_direction == 'BtoA':
input_nc = self.opt.output_nc
output_nc = self.opt.input_nc
else:
input_nc = self.opt.input_nc
output_nc = self.opt.output_nc
#return img_name
return {'A' : A, 'B' : B, 'A_paths' : self.A_paths, 'B_paths' : self.B_paths}
#%hist -f rawdata_dataset.py
| StarcoderdataPython |
1694193 | <gh_stars>0
import os
import ebooklib
from ebooklib import epub
from bs4 import BeautifulSoup
def epub2thtml(epub_path):
book = epub.read_epub(epub_path)
chapters = []
for item in book.get_items():
if item.get_type() == ebooklib.ITEM_DOCUMENT:
chapters.append(item.get_content())
return chapters
# <h1 class="chapterCaption"><a id="CHP14"></a>第十三回<br/>李傕郭汜大交兵<br/>杨奉董承双救驾</h1>
# <h1 class="chapterCaption1"><a id="CHP19"></a>第十八回<br/>贾文和料敌决胜<br/>夏侯惇拔矢啖睛<sup><a class="duokan-footnote" href="#jz_1_172" id="jzyy_1_172"><img alt="" src="../Images/note.png"/></a></sup></h1>
def split_chapter_caption(h1_element):
output = []
for child in h1_element.children:
if child.string != None:
# print(child.string)
output.append(child.string)
return ','.join(output) + "。"
def load_rare_chars(filepath):
d = {}
with open(filepath, 'r') as f:
for line in f:
(key, value) = line.split()
d[key] = value
return d
# <span class="rareFont"><img src="../Images/image01005.gif" alt=""/></span>
def convert_rare_characters(chapter, rare_chars_dict):
rare_chars = chapter.find_all('span', class_='rareFont')
for rare_char in rare_chars:
for child in rare_char.children:
image_path = child['src']
image_name = os.path.basename(image_path)
rare_char.replace_with(rare_chars_dict[image_name])
def thtml2text(thtml, rare_chars_dict):
# print(thtml.decode('utf-8'))
output = []
soup = BeautifulSoup(thtml.decode('utf-8'), 'html.parser')
convert_rare_characters(soup, rare_chars_dict)
captions = soup.find_all('h1')
for caption in captions:
splitted_caption = split_chapter_caption(caption)
# print(splitted_caption)
output.append(splitted_caption)
paragraphs = soup.find_all('p', class_='bodyContent')
for paragraph in paragraphs:
# print(paragraph.text)
output.append(paragraph.text)
return '\n'.join(output)
if __name__ == '__main__':
# load rare characters dictionary
rare_chars_dict = load_rare_chars("../data/raw/rare_characters.txt")
chapters = epub2thtml("../data/raw/sgyy.epub")
# print(len(chapters))
count = 0
for chapter in chapters:
# skip first 3 chapters
if count >= 3:
text = thtml2text(chapter, rare_chars_dict)
with open("../data/text/ch{:03d}.txt".format(count-2), 'w') as f:
f.write(text)
count += 1 | StarcoderdataPython |
1630418 | from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.modules as nnmodules
from torch.nn.modules.module import _addindent
import torchvision.transforms as transforms
import datasets.vegas_visual as vegas_visual
import numpy as np
import utils as ut # @UnresolvedImport
import values as v
device = 'cuda' if torch.cuda.is_available() else 'cpu' # @UndefinedVariable
input_height = 96
input_width = 96
input_channels_num = 3
conv1_num_maps = 96
conv2_num_maps = 192
conv3_num_maps = 192
conv4_num_maps = 208
conv5_num_maps = 224
conv6_num_maps = 240
conv7_num_maps = 256
conv8_num_maps = 256
conv9_num_maps = 256
conv10_num_maps = 256
conv11_num_maps = 256
conv12_num_maps = 256
conv12_output_shape = (2,2)
conv13_num_maps = 256
conv13_output_shape = (1,1)
last_conv_flat_dim = conv13_num_maps * conv13_output_shape[0] * conv13_output_shape[1]
fc1_dim = 256
fc2_dim = fc1_dim
fc3_dim = fc1_dim
bias_conv = False
num_classes = 1
class Net(nn.Module):
def __init__(self, conditioned=False, activation = 'relu', activation_alpha=1.0):
super(Net, self).__init__()
self.conditioned = conditioned
self.activation = activation # activation options: sigmoid, relu, l_relu, softplus, elu, celu, selu, tanh
self.activation_alpha = activation_alpha
self.conv1 = nn.Conv2d(input_channels_num, conv1_num_maps, kernel_size=(5,5), stride=(2), bias=bias_conv) # original kernel_size=(7,5)
self.conv1_bn = nn.BatchNorm2d(conv1_num_maps)
self.conv1_drop = nn.Dropout2d(0.5)
self.conv2 = nn.Conv2d(conv1_num_maps, conv2_num_maps, kernel_size=(3,3), stride=(2), bias=bias_conv)
self.conv2_bn = nn.BatchNorm2d(conv2_num_maps)
self.conv2_drop = nn.Dropout2d(0.5)
self.conv3 = nn.Conv2d(conv2_num_maps, conv3_num_maps, kernel_size=(3,3), stride=(1), bias=bias_conv)
self.conv3_bn = nn.BatchNorm2d(conv3_num_maps)
self.conv3_drop = nn.Dropout2d(0.5)
self.conv4 = nn.Conv2d(conv3_num_maps, conv4_num_maps, kernel_size=(3,3), stride=(1), bias=bias_conv)
self.conv4_bn = nn.BatchNorm2d(conv4_num_maps)
self.conv4_drop = nn.Dropout2d(0.5)
self.conv5 = nn.Conv2d(conv4_num_maps, conv5_num_maps, kernel_size=(3,3), stride=(1), bias=bias_conv)
self.conv5_bn = nn.BatchNorm2d(conv5_num_maps)
self.conv5_drop = nn.Dropout2d(0.5)
self.conv6 = nn.Conv2d(conv5_num_maps, conv6_num_maps, kernel_size=(3,3), stride=(1), bias=bias_conv)
self.conv6_bn = nn.BatchNorm2d(conv6_num_maps)
self.conv6_drop = nn.Dropout2d(0.5)
self.conv7 = nn.Conv2d(conv6_num_maps, conv7_num_maps, kernel_size=(3,3), stride=(1), bias=bias_conv)
self.conv7_bn = nn.BatchNorm2d(conv7_num_maps)
self.conv7_drop = nn.Dropout2d(0.5)
self.conv8 = nn.Conv2d(conv7_num_maps, conv8_num_maps, kernel_size=(3,3), stride=(1), bias=bias_conv)
self.conv8_bn = nn.BatchNorm2d(conv8_num_maps)
self.conv8_drop = nn.Dropout2d(0.5)
self.conv9 = nn.Conv2d(conv8_num_maps, conv9_num_maps, kernel_size=(3,3), stride=(1), bias=bias_conv)
self.conv9_bn = nn.BatchNorm2d(conv9_num_maps)
self.conv9_drop = nn.Dropout2d(0.5)
self.conv10 = nn.Conv2d(conv9_num_maps, conv10_num_maps, kernel_size=(3,3), stride=(1), bias=bias_conv)
self.conv10_bn = nn.BatchNorm2d(conv10_num_maps)
self.conv10_drop = nn.Dropout2d(0.5)
self.conv11 = nn.Conv2d(conv10_num_maps, conv11_num_maps, kernel_size=(3,3), stride=(1), bias=bias_conv)
self.conv11_bn = nn.BatchNorm2d(conv11_num_maps)
self.conv11_drop = nn.Dropout2d(0.5)
self.conv12 = nn.Conv2d(conv11_num_maps, conv12_num_maps, kernel_size=(3,3), stride=(1), bias=bias_conv)
self.conv12_bn = nn.BatchNorm2d(conv12_num_maps)
self.conv12_drop = nn.Dropout2d(0.5)
if self.conditioned:
input_dim = conv12_num_maps + int(v.AUDIO_EMBEDDING_DIMENSION/(conv12_output_shape[0]*conv12_output_shape[1]))
else:
input_dim = conv12_num_maps
self.conv13 = nn.Conv2d(input_dim, num_classes, kernel_size=(2,2), stride=(1), bias=bias_conv)
self.conv13_bn = nn.BatchNorm2d(num_classes)
self.conv13_drop = nn.Dropout2d(0.5)
def forward(self, x, emb=None):
x = self.conv1(x)
x = self.conv1_bn(x)
x = ut.get_activated(x, self.activation, alph=self.activation_alpha)
x = self.conv1_drop(x)
x = self.conv2(x)
x = self.conv2_bn(x)
x = ut.get_activated(x, self.activation, alph=self.activation_alpha)
x = self.conv2_drop(x)
x = self.conv3(x)
x = self.conv3_bn(x)
x = ut.get_activated(x, self.activation, alph=self.activation_alpha)
x = self.conv3_drop(x)
x = self.conv4(x)
x = self.conv4_bn(x)
x = ut.get_activated(x, self.activation, alph=self.activation_alpha)
x = self.conv4_drop(x)
x = self.conv5(x)
x = self.conv5_bn(x)
x = ut.get_activated(x, self.activation, alph=self.activation_alpha)
x = self.conv5_drop(x)
x = self.conv6(x)
x = self.conv6_bn(x)
x = ut.get_activated(x, self.activation, alph=self.activation_alpha)
x = self.conv6_drop(x)
x = self.conv7(x)
x = self.conv7_bn(x)
x = ut.get_activated(x, self.activation, alph=self.activation_alpha)
x = self.conv7_drop(x)
x = self.conv8(x)
x = self.conv8_bn(x)
x = ut.get_activated(x, self.activation, alph=self.activation_alpha)
x = self.conv8_drop(x)
x = self.conv9(x)
x = self.conv9_bn(x)
x = ut.get_activated(x, self.activation, alph=self.activation_alpha)
x = self.conv9_drop(x)
x = self.conv10(x)
x = self.conv10_bn(x)
x = ut.get_activated(x, self.activation, alph=self.activation_alpha)
x = self.conv10_drop(x)
x = self.conv11(x)
x = self.conv11_bn(x)
x = ut.get_activated(x, self.activation, alph=self.activation_alpha)
x = self.conv11_drop(x)
x = self.conv12(x)
x = self.conv12_bn(x)
x = ut.get_activated(x, self.activation, alph=self.activation_alpha)
x = self.conv12_drop(x)
if emb is not None:
emb_shape = emb.size()
emb = emb.view(emb_shape[0], -1, conv12_output_shape[0], conv12_output_shape[1])
x = torch.cat([x, emb], 1) # @UndefinedVariable
x = self.conv13(x)
x = self.conv13_bn(x)
x = ut.get_activated(x, 'tanh')
return x
def summary(self):
for param in self.parameters():
print(param.size())
def torch_summarize(self, show_weights=True, show_parameters=True):
"""Summarizes torch model by showing trainable parameters and weights."""
tmpstr = self.__class__.__name__ + ' (\n'
for key, module in self._modules.items():
# if it contains layers let call it recursively to get params and weights
if type(module) in [
nnmodules.container.Container,
nnmodules.container.Sequential
]:
modstr = self.torch_summarize(module)
else:
modstr = module.__repr__()
modstr = _addindent(modstr, 2)
params = sum([np.prod(p.size()) for p in module.parameters()])
weights = tuple([tuple(p.size()) for p in module.parameters()])
tmpstr += ' (' + key + '): ' + modstr
if show_weights:
tmpstr += ', weights={}'.format(weights)
if show_parameters:
tmpstr += ', parameters={}'.format(params)
tmpstr += '\n'
tmpstr = tmpstr + ')'
return tmpstr
def get_train_loader():
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
trainset = vegas_visual.VEGAS_VISUAL(root=v.BATCHES_VISUAL_DIR,
input_height=input_height, input_width=input_width, input_channels_num=input_channels_num,
train=True, transform=transform) # @UndefinedVariable
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True, num_workers=2) # @UndefinedVariable
return trainloader
def get_test_loader():
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
testset = vegas_visual.VEGAS_VISUAL(root=v.BATCHES_VISUAL_DIR,
input_height=input_height, input_width=input_width, input_channels_num=input_channels_num,
train=False, transform=transform_test) # @UndefinedVariable
testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=False, num_workers=2) # @UndefinedVariable
return testloader
| StarcoderdataPython |
166379 | from datetime import timedelta
from .interchange import WaypointType
class ActivityStatisticCalculator:
ImplicitPauseTime = timedelta(minutes=1, seconds=5)
def CalculateDistance(act, startWpt=None, endWpt=None):
import math
dist = 0
altHold = None # seperate from the lastLoc variable, since we want to hold the altitude as long as required
lastTimestamp = lastLoc = None
flatWaypoints = act.GetFlatWaypoints()
if not startWpt:
startWpt = flatWaypoints[0]
if not endWpt:
endWpt = flatWaypoints[-1]
for x in range(flatWaypoints.index(startWpt), flatWaypoints.index(endWpt) + 1):
timeDelta = flatWaypoints[x].Timestamp - lastTimestamp if lastTimestamp else None
lastTimestamp = flatWaypoints[x].Timestamp
if flatWaypoints[x].Type == WaypointType.Pause or (timeDelta and timeDelta > ActivityStatisticCalculator.ImplicitPauseTime):
lastLoc = None # don't count distance while paused
continue
loc = flatWaypoints[x].Location
if loc is None or loc.Longitude is None or loc.Latitude is None:
# Used to throw an exception in this case, but the TCX schema allows for location-free waypoints, so we'll just patch over it.
continue
if loc and lastLoc:
altHold = lastLoc.Altitude if lastLoc.Altitude is not None else altHold
latRads = loc.Latitude * math.pi / 180
meters_lat_degree = 1000 * 111.13292 + 1.175 * math.cos(4 * latRads) - 559.82 * math.cos(2 * latRads)
meters_lon_degree = 1000 * 111.41284 * math.cos(latRads) - 93.5 * math.cos(3 * latRads)
dx = (loc.Longitude - lastLoc.Longitude) * meters_lon_degree
dy = (loc.Latitude - lastLoc.Latitude) * meters_lat_degree
if loc.Altitude is not None and altHold is not None: # incorporate the altitude when possible
dz = loc.Altitude - altHold
else:
dz = 0
dist += math.sqrt(dx ** 2 + dy ** 2 + dz ** 2)
lastLoc = loc
return dist
def CalculateTimerTime(act, startWpt=None, endWpt=None):
flatWaypoints = []
for lap in act.Laps:
flatWaypoints.append(lap.Waypoints)
if len(flatWaypoints) < 3:
# Either no waypoints, or one at the start and one at the end
raise ValueError("Not enough waypoints to calculate timer time")
duration = timedelta(0)
if not startWpt:
startWpt = flatWaypoints[0]
if not endWpt:
endWpt = flatWaypoints[-1]
lastTimestamp = None
for x in range(flatWaypoints.index(startWpt), flatWaypoints.index(endWpt) + 1):
wpt = flatWaypoints[x]
delta = wpt.Timestamp - lastTimestamp if lastTimestamp else None
lastTimestamp = wpt.Timestamp
if wpt.Type is WaypointType.Pause:
lastTimestamp = None
elif delta and delta > act.ImplicitPauseTime:
delta = None # Implicit pauses
if delta:
duration += delta
if duration.total_seconds() == 0 and startWpt is None and endWpt is None:
raise ValueError("Zero-duration activity")
return duration
def CalculateAverageMaxHR(act, startWpt=None, endWpt=None):
flatWaypoints = act.GetFlatWaypoints()
# Python can handle 600+ digit numbers, think it can handle this
maxHR = 0
cumulHR = 0
samples = 0
if not startWpt:
startWpt = flatWaypoints[0]
if not endWpt:
endWpt = flatWaypoints[-1]
for x in range(flatWaypoints.index(startWpt), flatWaypoints.index(endWpt) + 1):
wpt = flatWaypoints[x]
if wpt.HR:
if wpt.HR > maxHR:
maxHR = wpt.HR
cumulHR += wpt.HR
samples += 1
if not samples:
return None, None
cumulHR = cumulHR / samples
return cumulHR, maxHR
| StarcoderdataPython |
1624198 | <reponame>nusherjk/DRF-system-for-ecommerce
from django.contrib import admin
from .models import *
from django.contrib.auth.admin import UserAdmin
# Register your models here.
admin.site.register(User, UserAdmin)
admin.site.register(Product)
admin.site.register(ProductReview)
admin.site.register(Order)
admin.site.register(Category) | StarcoderdataPython |
23919 | # Copyright 2021 Touca, Inc. Subject to Apache-2.0 License.
from ._types import IntegerType, VectorType, ToucaType
from datetime import datetime, timedelta
from enum import Enum
from typing import Dict, Tuple
class ResultCategory(Enum):
""" """
Check = 1
Assert = 2
class ResultEntry:
"""
Wrapper around a given ``ToucaType`` value that includes the category
it should belong to.
We are intentionally not using ``@dataclass`` to ensure the core library
has no dependency on ``dataclasses`` module. This may change in the future.
"""
def __init__(self, typ: ResultCategory, val: ToucaType):
"""
Creates an entry given its value and the category it should belong to.
:param typ: type of the entry
:param val: value of the entry
"""
self.typ = typ
self.val = val
class Case:
""" """
def __init__(self, **kwargs):
self._meta = kwargs
self._results: Dict[str, ResultEntry] = dict()
self._tics: Dict[str, datetime] = dict()
self._tocs: Dict[str, datetime] = dict()
def check(self, key: str, value: ToucaType):
"""
Logs a given value as a test result for the declared test case
and associates it with the specified key.
:param key: name to be associated with the logged test result
:param value: value to be logged as a test result
"""
self._results[key] = ResultEntry(typ=ResultCategory.Check, val=value)
def assume(self, key: str, value: ToucaType):
"""
Logs a given value as an assertion for the declared test case
and associates it with the specified key.
:param key: name to be associated with the logged test result
:param value: value to be logged as a test result
"""
self._results[key] = ResultEntry(typ=ResultCategory.Assert, val=value)
def add_array_element(self, key: str, value: ToucaType):
"""
Adds a given value to a list of results for the declared
test case which is associated with the specified key.
Could be considered as a helper utility function.
This method is particularly helpful to log a list of items as they
are found:
.. code-block:: python
for number in numbers:
if is_prime(number):
touca.add_array_element("prime numbers", number)
touca.add_hit_count("number of primes")
This pattern can be considered as a syntactic sugar for the following
alternative:
.. code-block:: python
primes = []
for number in numbers:
if is_prime(number):
primes.append(number)
if primes:
touca.check("prime numbers", primes)
touca.check("number of primes", len(primes))
The items added to the list are not required to be of the same type.
The following code is acceptable:
.. code-block:: python
touca.check("prime numbers", 42)
touca.check("prime numbers", "forty three")
:raises RuntimeError:
if specified key is already associated with
a test result which was not iterable
:param key: name to be associated with the logged test result
:param value: element to be appended to the array
:see also: :py:meth:`~check`
"""
if key not in self._results:
self._results[key] = ResultEntry(typ=ResultCategory.Check, val=VectorType())
vec = self._results.get(key)
if vec.typ is not ResultCategory.Check or not isinstance(vec.val, VectorType):
raise RuntimeError("specified key has a different type")
vec.val.add(value)
def add_hit_count(self, key: str):
"""
Increments value of key every time it is executed.
creates the key with initial value of one if it does not exist.
Could be considered as a helper utility function.
This method is particularly helpful to track variables whose values
are determined in loops with indeterminate execution cycles:
.. code-block:: python
for number in numbers:
if is_prime(number):
touca.add_array_element("prime numbers", number)
touca.add_hit_count("number of primes")
This pattern can be considered as a syntactic sugar for the following
alternative:
.. code-block:: python
primes = []
for number in numbers:
if is_prime(number):
primes.append(number)
if primes:
touca.check("prime numbers", primes)
touca.check("number of primes", len(primes))
:raises RuntimeError:
if specified key is already associated with
a test result which was not an integer
:param key: name to be associated with the logged test result
:see also: :py:meth:`~check`
"""
if key not in self._results:
self._results[key] = ResultEntry(
typ=ResultCategory.Check, val=IntegerType(1)
)
return
value = self._results.get(key)
if value.typ is not ResultCategory.Check or not isinstance(
value.val, IntegerType
):
raise RuntimeError("specified key has a different type")
value.val._value += 1
def add_metric(self, key: str, milliseconds: int):
"""
Adds an already obtained measurements to the list of captured
performance benchmarks.
Useful for logging a metric that is measured without using this SDK.
:param key: name to be associated with this performance benchmark
:param milliseconds: duration of this measurement in milliseconds
"""
value = datetime.now()
self._tics[key] = value
self._tocs[key] = value + timedelta(microseconds=milliseconds * 1000)
def start_timer(self, key: str):
"""
Starts timing an event with the specified name.
Measurement of the event is only complete when function
:py:meth:`~stop_timer` is later called for the specified name.
:param key: name to be associated with the performance metric
"""
self._tics[key] = datetime.now()
def stop_timer(self, key: str):
"""
Stops timing an event with the specified name.
Expects function :py:meth:`~start_timer` to have been called previously
with the specified name.
:param key: name to be associated with the performance metric
"""
if key in self._tics:
self._tocs[key] = datetime.now()
def _metrics(self) -> Tuple[str, ToucaType]:
for key, tic in self._tics.items():
if key not in self._tocs:
continue
diff = (self._tocs.get(key) - tic).microseconds / 1000
yield key, IntegerType(int(diff))
def _metadata(self) -> Dict[str, str]:
return {
"teamslug": self._meta.get("team") or "unknown",
"testsuite": self._meta.get("suite") or "unknown",
"version": self._meta.get("version") or "unknown",
"testcase": self._meta.get("name") or "unknown",
"builtAt": datetime.now().isoformat(),
}
def json(self):
return {
"metadata": self._metadata(),
"results": [
{"key": k, "value": v.val.json()}
for k, v in self._results.items()
if v.typ is ResultCategory.Check
],
"assertions": [
{"key": k, "value": v.val.json()}
for k, v in self._results.items()
if v.typ is ResultCategory.Assert
],
"metrics": [{"key": k, "value": v.json()} for k, v in self._metrics()],
}
def serialize(self) -> bytearray:
from flatbuffers import Builder
import touca._schema as schema
dicts = {
ResultCategory.Check: schema.ResultType.Check,
ResultCategory.Assert: schema.ResultType.Assert,
}
builder = Builder(1024)
metadata = {k: builder.CreateString(v) for k, v in self._metadata().items()}
schema.MetadataStart(builder)
schema.MetadataAddTeamslug(builder, metadata.get("teamslug"))
schema.MetadataAddTestsuite(builder, metadata.get("testsuite"))
schema.MetadataAddVersion(builder, metadata.get("version"))
schema.MetadataAddTestcase(builder, metadata.get("testcase"))
schema.MetadataAddBuiltAt(builder, metadata.get("builtAt"))
fbs_metadata = schema.MetadataEnd(builder)
result_entries = []
for k, v in self._results.items():
fbs_key = Builder.CreateString(builder, k)
fbs_value = v.val.serialize(builder)
schema.ResultStart(builder)
schema.ResultAddKey(builder, fbs_key)
schema.ResultAddValue(builder, fbs_value)
schema.ResultAddTyp(builder, dicts.get(v.typ))
result_entries.append(schema.ResultEnd(builder))
schema.ResultsStartEntriesVector(builder, len(result_entries))
for item in reversed(result_entries):
builder.PrependUOffsetTRelative(item)
fbs_result_entries = builder.EndVector()
schema.ResultsStart(builder)
schema.ResultsAddEntries(builder, fbs_result_entries)
fbs_results = schema.ResultsEnd(builder)
metric_entries = []
for k, v in self._metrics():
fbs_key = Builder.CreateString(builder, k)
fbs_value = v.serialize(builder)
schema.MetricStart(builder)
schema.MetricAddKey(builder, fbs_key)
schema.MetricAddValue(builder, fbs_value)
metric_entries.append(schema.MetricEnd(builder))
schema.MetricsStartEntriesVector(builder, len(metric_entries))
for item in reversed(metric_entries):
builder.PrependUOffsetTRelative(item)
fbs_metric_entries = builder.EndVector()
schema.MetricsStart(builder)
schema.MetricsAddEntries(builder, fbs_metric_entries)
fbs_metrics = schema.MetricsEnd(builder)
schema.MessageStart(builder)
schema.MessageAddMetadata(builder, fbs_metadata)
schema.MessageAddResults(builder, fbs_results)
schema.MessageAddMetrics(builder, fbs_metrics)
fbs_message = schema.MessageEnd(builder)
builder.Finish(fbs_message)
return builder.Output()
| StarcoderdataPython |
48056 | #Función que calcula la matriz resultante "C" después de aplicar la operación convolución de A*B=
# EJERCICIO 28 DE OCTUBRE
# <NAME> A01377098
import numpy as np
def convolucion (A, B):
contaFil = 0
contaCol = 0
limiteFil = len(A)
limiteCol = len(A)
longitudB = len(B)
for x in range (len(C)):
for y in range (len(C)):
for n in range (contaFil, len(B)+contaFil):
if len(B)+contaFil > limiteFil:
break
for o in range (contaCol, len(B)+contaCol):
if len(B)+contaCol> limiteCol:
break
C[x][y] += A[n][o] * B[n-contaFil][o-contaCol]
if contaCol+longitudB<limiteCol:
contaCol += 1
elif contaCol+longitudB== limiteCol:
contaCol = 0
if contaFil+longitudB< limiteFil:
contaFil += 1
elif contaFil+longitudB == limiteFil:
return
Matriz = [[6, 9, 0, 3], [8, 4, 9, 1], [4, 1, 3, 12], [3, 2, 1, 100]]
Filtro = [[1, 0, 2], [5, 0, 9], [6, 2, 1]]
A = np.array(Matriz)
B = np.array(Filtro)
C = np.zeros((2,2))
convolucion(A,B)
print (C)
| StarcoderdataPython |
1692996 | from relogic.logickit.scorer.scorer import Scorer
from relogic.logickit.utils.utils import softmax, sigmoid
import torch.nn.functional as F
import torch
from tqdm import tqdm
import os
import subprocess
import json
class RecallScorer(Scorer):
def __init__(self, label_mapping, topk, correct_label='1', dump_to_file=None):
super(RecallScorer, self).__init__()
self.label_mapping = label_mapping
self._inv_label_mapping = {v: k for k, v in label_mapping.items()}
self._examples = []
self._preds = []
self.topk = topk
self.correct_label = correct_label
if dump_to_file:
self.dump_to_file_path = os.path.join(dump_to_file["output_dir"], dump_to_file["task_name"] + "_dump.json")
def update(self, mbs, predictions, loss, extra_args):
super(RecallScorer, self).update(mbs, predictions, loss, extra_args)
for example, preds in zip(mbs.examples, predictions):
self._examples.append(example)
self._preds.append(preds)
def get_loss(self):
return 0
def _get_results(self):
if self.dump_to_file_path:
self.dump_to_file_handler = open(self.dump_to_file_path, 'w')
self._n_hit_left, self._n_hit_right, self._n_total_left, self._n_total_right = 0, 0, 0, 0
pred_collection = [{}, {}] # forward direction and backward direction
gold_collection = [{}, {}]
for example, preds in zip(self._examples, self._preds):
prob = preds[self.label_mapping[self.correct_label]].item()
query_id, candidate_id, direction = example.guid.split('-')
direction = int(direction)
if direction == 1:
query_id, candidate_id = candidate_id, query_id
if self.dump_to_file_handler:
self.dump_to_file_handler.write("{} {} {} {}\n".format(query_id, candidate_id, direction, prob))
if query_id not in pred_collection[direction]:
pred_collection[direction][query_id] = []
pred_collection[direction][query_id].append((candidate_id, prob))
# if example.label == self.correct_label:
# gold_collection[direction][query_id] = candidate_id
gold_query_id, gold_candidate_id, gold_direction = example.gold_pair.split('-')
gold_direction = int(gold_direction)
if gold_direction == 1:
gold_query_id, gold_candidate_id = gold_candidate_id, gold_query_id
gold_collection[gold_direction][gold_query_id] = gold_candidate_id
if len(pred_collection[0]) != len(gold_collection[0]) or len(pred_collection[1]) != len(gold_collection[1]):
raise ValueError("The query size in pred collectdion {}|{} is different from gold collection {}|{}".format(
len(pred_collection[0]), len(pred_collection[1]), len(gold_collection[0]), len(gold_collection[1])))
for d in range(2):
for query_id in pred_collection[d]:
sorted_list = sorted(pred_collection[d][query_id], key=lambda x: x[1], reverse=True)
candidate_ids = [x[0] for x in sorted_list][:self.topk]
if gold_collection[d][query_id] in candidate_ids:
if d == 0:
self._n_hit_left += 1
else:
self._n_hit_right += 1
self._n_total_left = len(gold_collection[0])
self._n_total_right = len(gold_collection[1])
if self.dump_to_file_path:
self.dump_to_file_handler.close()
return [("hits_left", self._n_hit_left),
("hits_right", self._n_hit_right),
("total_left", self._n_total_left),
("total_right", self._n_total_right),
("recall_left", self._n_hit_left / self._n_total_left),
("recall_right", self._n_hit_right / self._n_total_right)]
class CartesianMatchingRecallScorer(Scorer):
def __init__(self, topk, qrels_file_path, dump_to_file=None):
super(CartesianMatchingRecallScorer, self).__init__()
self.topk = topk
self.qrels_file_path = qrels_file_path
if dump_to_file:
self.dump_to_file_path = os.path.join(dump_to_file["output_dir"], dump_to_file["task_name"] + "_dump.json")
self.dump_to_file_handler = open(self.dump_to_file_path, 'w')
else:
print("You need to specify the dump_to_file path for the pair matching task")
exit()
def update(self, mbs, reprs, loss, extra_args):
super(CartesianMatchingRecallScorer, self).update(mbs, reprs, loss, extra_args)
for example, repr in zip(mbs.examples, reprs):
self.dump_to_file_handler.write(json.dumps({
"guid": example.guid,
"feature": " ".join([str(f) for f in repr.cpu().data.numpy()])}) + "\n")
def get_loss(self):
return 0
def _get_results(self):
self.dump_to_file_handler.close()
dir = os.path.abspath(os.path.dirname(__file__))
recall_eval_path = os.path.join(dir, '..', '..', '..', 'evals', 'pair_matching', 'entity_align_eval.py')
eval_out = subprocess.check_output(["python", recall_eval_path, "-e", self.dump_to_file_path, "-g", self.qrels_file_path])
eval_out_lines = str(eval_out, 'utf-8').split('\n')
results = []
for line in eval_out_lines:
if line.startswith("Hits@1:"):
score = float(line.strip().split(" ")[1].strip("%"))
results.append(score)
return [("recall_left", results[0]),
("recall_right", results[1])]
# class CartesianMatchingRecallScorer(Scorer):
# def __init__(self, topk, dump_to_file=None):
# super(CartesianMatchingRecallScorer, self).__init__()
# self.topk = topk
# self.left = {}
# self.right = {}
# self.left_to_right_gold = {}
# self.right_to_left_gold = {}
# if dump_to_file:
# self.dump_to_file_path = os.path.join(dump_to_file["output_dir"], dump_to_file["task_name"] + "_dump.json")
#
#
# def update(self, mbs, reprs, loss, extra_args):
# super(CartesianMatchingRecallScorer, self).update(mbs, reprs, loss, extra_args)
# for example, repr in zip(mbs.examples, reprs):
# left_id, right_id, direc = example.guid.split('-')
# if direc == "0":
# self.left[left_id] = repr
# else:
# self.right[right_id] = repr
# left_id, right_id, direc = example.gold_pair.split('-')
# if direc == "0":
# self.left_to_right_gold[left_id] = right_id
# else:
# self.right_to_left_gold[right_id] = left_id
#
# def get_loss(self):
# return 0
#
# def _get_results(self):
# if self.dump_to_file_path:
# self.dump_to_file_handler = open(self.dump_to_file_path, 'w')
#
# self._n_hit_left, self._n_hit_right, self._n_total_left, self._n_total_right = 0, 0, 0, 0
# if self.dump_to_file_path:
# for key in self.left:
# self.dump_to_file_handler.write("{}\t".format(key) + " ".join([str(f) for f in self.left[key].cpu().data.numpy()]) + '\n')
# for key in self.right:
# self.dump_to_file_handler.write("{}\t".format(key) + " ".join([str(f) for f in self.right[key].cpu().data.numpy()]) + '\n')
# left_to_right_distance = {}
# right_to_left_distance = {}
# print("Evaluating left to right")
# for left_id in self.left.keys():
# left_to_right_distance[left_id] = {}
# for right_id in self.right.keys():
# left_to_right_distance[left_id][right_id] = torch.sum(torch.abs(self.left[left_id] - self.right[right_id])).item()
# print("Evaluating right to left")
# for right_id in self.right.keys():
# right_to_left_distance[right_id] = {}
# for left_id in self.left.keys():
# right_to_left_distance[right_id][left_id] = torch.sum(torch.abs(self.right[right_id] - self.left[left_id])).item()
#
# for left_id in self.left.keys():
# sorted_list = sorted(left_to_right_distance[left_id].items(), key=lambda x: x[1])
# candidate_ids = [x[0] for x in sorted_list][:self.topk]
# if self.left_to_right_gold[left_id] in candidate_ids:
# self._n_hit_left += 1
# # if dump_to_file:
# # for right_id, dist in sorted_list:
# # fout.write("{}\t{}\t{}\n".format(left_id, right_id, dist))
# for right_id in self.right.keys():
# sorted_list = sorted(right_to_left_distance[right_id].items(), key=lambda x: x[1])
# candidate_ids = [x[0] for x in sorted_list][:self.topk]
# if self.right_to_left_gold[right_id] in candidate_ids:
# self._n_hit_right += 1
# # if dump_to_file:
# # for left_id, dist in sorted_list:
# # fout.write("{}\t{}\t{}\n".format(right_id, left_id, dist))
#
# if self.dump_to_file_path:
# self.dump_to_file_handler.close()
#
# self._n_total_left = len(self.left_to_right_gold)
# self._n_total_right = len(self.right_to_left_gold)
# return [("hits_left", self._n_hit_left),
# ("hits_right", self._n_hit_right),
# ("total_left", self._n_total_left),
# ("total_right", self._n_total_right),
# ("recall_left", self._n_hit_left / self._n_total_left),
# ("recall_right", self._n_hit_right / self._n_total_right)]
class RetrievalScorer(Scorer):
"""
"""
def __init__(self, label_mapping, qrels_file_path, correct_label='1', dump_to_file=None, regression=False):
super(RetrievalScorer, self).__init__()
self.label_mapping = label_mapping
self._inv_label_mapping = {v: k for k, v in label_mapping.items()}
self._examples = []
self._preds = []
self.correct_label = correct_label
self.qrels_file_path = qrels_file_path
self.regression = regression
# Because we need to leverage trec_eval to calculate the scores, so dump_to_file can not be None
if dump_to_file:
self.dump_to_file_path = os.path.join(dump_to_file["output_dir"], dump_to_file["task_name"] + "_dump.json")
self.dump_to_file_handler = open(self.dump_to_file_path, 'w')
else:
print("You need to specify the dump_to_file path for the retrieval task")
exit()
def update(self, mbs, predictions, loss, extra):
super(RetrievalScorer, self).update(mbs, predictions, loss, extra)
# TODO: we are going to migrate the interface !
predictions = predictions["logits"]
for example, preds in zip(mbs.examples, predictions):
self._examples.append(example)
self._preds.append(preds.data.cpu().numpy())
def get_loss(self):
return 0
def _get_results(self):
topic_doc_collection = {}
for example, preds in zip(self._examples, self._preds):
if self.regression:
score = sigmoid(preds)[0]
else:
preds = softmax(preds)
score = preds[self.label_mapping[self.correct_label]]
text_a_id, text_b_id = example.guid.split('|')
if text_a_id not in topic_doc_collection:
topic_doc_collection[text_a_id] = {}
topic_doc_collection[text_a_id][text_b_id] = max(topic_doc_collection[text_a_id].get(text_b_id, 0), score)
for text_a_id in topic_doc_collection:
for text_b_id in topic_doc_collection[text_a_id]:
score = topic_doc_collection[text_a_id][text_b_id]
self.dump_to_file_handler.write("{} Q0 {} 0 {} rerank\n".format(text_a_id, text_b_id, score))
self.dump_to_file_handler.flush()
self.dump_to_file_handler.close()
dir = os.path.abspath(os.path.dirname(__file__))
trec_eval_path = os.path.join(dir, '..', '..', '..', 'evals', 'trec_eval', 'trec_eval.9.0.4/trec_eval')
trec_out = subprocess.check_output([trec_eval_path, self.qrels_file_path , self.dump_to_file_path])
trec_out_lines = str(trec_out, 'utf-8').split('\n')
mean_average_precision = float(trec_out_lines[5].split('\t')[-1])
# mean_reciprocal_rank = float(trec_out_lines[9].split('\t')[-1])
# p_30 = float(trec_out_lines[25].split('\t')[-1])
return [("map", mean_average_precision)]
| StarcoderdataPython |
4816908 | # 组合模式
class Store(object):
'''店面基类'''
# 添加店面
def add(self, store):
pass
# 删除店面
def remove(self, store):
pass
def pay_by_card(self):
pass
class BranchStore(Store):
def __init__(self, name):
self.name = name
self.my_store_list = []
def pay_by_card(self):
print("店面[%s]的积分已累加进该会员卡" % self.name)
for s in self.my_store_list:
s.pay_by_card()
# 添加店面
def add(self, store):
self.my_store_list.append(store)
# 删除店面
def remove(self, store):
self.my_store_list.remove(store)
class JoinStore(Store):
'''加盟店'''
def __init__(self, name):
self.name = name
def pay_by_card(self):
print("店面[%s]的积分已累加进该会员卡" % self.name)
def add(self, store):
print("无添加子店权限")
def remove(self, store):
print("无删除子店权限")
if __name__ == '__main__':
store = BranchStore("朝阳总店")
branch = BranchStore("海滨分店")
join_branch = JoinStore("昌平加盟1店")
join_branch2 = JoinStore("昌平加盟2店")
branch.add(join_branch)
branch.add(join_branch2)
store.add(branch)
store.pay_by_card()
print(store.my_store_list) | StarcoderdataPython |
3392152 | # Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gtk
from util import *
from overlay import *
from butter_bar import *
from tab_panel import *
import ui
class MainWindowBase(gtk.Window):
def __init__(self, settings, resources):
gtk.Window.__init__(self)
self._settings = settings
self._resources = resources
self._layout = None
self._layout_changing = False
self._pending_stop_ignoring_changes_message = None
self._init_window()
self._overlays = []
self._ids_in_use = set()
self._tab_owner_overlays = {}
@property
def resources(self):
return self._resources
def _handle_escape_pressed(self):
log3("Escape with %s focused: " % self.get_focus())
if _is_child_of(self.get_focus(), self._cstage) == False:
if len(self._butter_bar_collection):
print("Closing first butter bar")
self._butter_bar_collection.remove_bar(self._butter_bar_collection[0])
return True
children = self._cstage.get_children()
if len(children):
print("Focusing center stage")
children[0].grab_focus()
return True
def _on_exit(self,u):
ui.quit()
def _on_destroy(self,*args):
if ui.is_running():
ui.quit()
def destroy(self):
for ovl in self._overlays:
ovl.destroy()
gtk.Window.destroy(self)
def new_overlay(self,name):
ovl = MainWindowOverlay(self,self._settings,name,self._layout)
self._overlays.append(ovl)
return ovl
@property
def overlays(self):
return list(self._overlays) # copy so client can't mutate the overlay
def _make_menu(self):
menu_bar = gtk.MenuBar()
def getM(t):
return self._resources.get_resource_of_type(MenuResource,t)
def getMI(t):
return self._resources.get_resource_of_type(MenuItemResource,t)
file_menu_item = gtk.MenuItem(getM('main_menu.file').text)
file_menu = gtk.Menu()
file_menu_item.set_submenu(file_menu)
self.file_menu = file_menu; # remember it, let it be extended
exit_item = gtk.MenuItem(getMI('main_menu.file.exit').text)
exit_item.connect_object("activate", self._on_exit, None)
file_menu.append(exit_item)
menu_bar.append(file_menu_item)
# debug menu
debug_menu_item = gtk.MenuItem(getM("main_menu.debug").text)
debug_menu = gtk.Menu()
debug_menu_item.set_submenu(debug_menu)
self.debug_menu = debug_menu
menu_bar.append(debug_menu_item)
# tools menu
tools_menu_item = gtk.MenuItem(getM('main_menu.tools').text)
tools_menu = gtk.Menu()
tools_menu_item.set_submenu(tools_menu)
self.tools_menu = tools_menu
menu_bar.append(tools_menu_item)
# tabs menu
tabs_menu_item = gtk.MenuItem(getM('main_menu.tabs').text)
tabs_menu = gtk.Menu()
tabs_menu_item.set_submenu(tabs_menu)
self.tabs_menu = tabs_menu
menu_bar.append(tabs_menu_item)
menu_bar.show_all()
return menu_bar
def _on_key_press_event(self,w,event):
# log3("Processing %s", event)
for ovl in self._overlays:
if ovl._handle_key_press(event):
return True
keyname = gtk.gdk.keyval_name(event.keyval)
if keyname == 'F10': # eat f10 in all cases...
return True
if keyname == 'Escape' and event.state == 0:
if self._handle_escape_pressed():
log3("Escape was handled by MainWindow.last_chance_escape_handler")
return True
def _on_delete_event(self,*args):
return False # let the delete proceed
def _init_window(self):
self.set_title("Nicer Debugger")
self.add_events(gtk.gdk.KEY_PRESS_MASK)
self.connect_object("destroy", self._on_destroy, None)
self.connect_object("delete_event", self._on_delete_event, None)
# keypress hook for pesky keys
self.connect('key_press_event', self._on_key_press_event)
# primary objects
menu_bar = self._make_menu()
butter_bar_collection = ButterBarCollection()
butter_bar_collection.show()
cstage = gtk.VBox()
cstage.show()
panel1 = TabPanel(self,"panel1")
panel2 = TabPanel(self,"panel2")
# save the important ones
self._menu_bar = menu_bar
self._butter_bar_collection = butter_bar_collection
self._cstage = cstage
self._panels = {}
for panel in [panel1, panel2]:
self._panels[panel.id] = panel
# layout objects
vbox = gtk.VBox()
vbox.show()
vpane = BetterVPaned()
vpane.id = "vpane1"
vpane.position_gravity = BETTER_PANE_POSITION_GRAVITY_END
vpane.show()
hpane = BetterHPaned()
hpane.id = "vpane2"
hpane.position_gravity = BETTER_PANE_POSITION_GRAVITY_RELATIVE
hpane.show()
self._pending_save = None
self._splitters = [vpane, hpane]
self._init_sizes()
# add them together
self.add(vbox)
vbox.pack_start(menu_bar, False,False, 0)
vbox.pack_start(butter_bar_collection, False,False, 0)
vbox.pack_start(vpane,True,True,0)
vpane.pack1(cstage)
vpane.pack2(hpane)
hpane.pack1(panel1)
hpane.pack2(panel2)
self._pane_default_positions = {
vpane.id : 150,
hpane.id : 50,
}
@property
def panels(self):
return self._panels
@property
def butter_bar_collection(self):
return self._butter_bar_collection
@property
def menu_bar(self):
return self._menu_bar
def add_center_stage(self,widget):
if len(self._cstage.get_children()) != 0:
raise Exception("Center stage full")
self._cstage.add(widget)
widget.show()
###########################################################################
def _init_sizes(self):
self._settings.register("WindowSize", dict, {})
try:
self.set_size_request(self._settings.WindowSize["width"], self._settings.WindowSize["height"])
# print "MainWindow: Using saved size"
except KeyError:
# print "MainWindow: Using default size"
self.set_size_request(750,650)
self._settings.register("SplitterSizes", dict, {})
# add listeners
self.connect('size-allocate', self._window_size_changed)
for splitter in self._splitters:
splitter.position_changed.add_listener(self._splitter_position_changed)
# update pane sizes
self._update_splitter_sizes()
def _window_size_changed(self, *args):
if self._layout_changing:
return
self._save_sizes()
def _splitter_position_changed(self):
assert self._layout_changing == False
self._save_sizes()
def _update_splitter_sizes(self):
if self._layout == None:
return
# print "MW: Splitter layout updating"
self._layout_changing = True
splitter_sizes = self._settings.SplitterSizes
for splitter in self._splitters:
assert hasattr(splitter,'id')
if splitter_sizes.has_key(self._layout) == False:
splitter_sizes[self._layout] = {}
if splitter_sizes[self._layout].has_key(splitter.id):
pos = splitter_sizes[self._layout][splitter.id]
# print "%s: spos %s->%s" % (self._layout, splitter.id,pos)
splitter.set_position(pos)
else:
if self._pane_default_positions.has_key(splitter.id):
# print "setting position %i" % self._pane_default_positions[splitter.id]
splitter.set_position(self._pane_default_positions[splitter.id])
def stop_ignoring_changes():
# import pdb; pdb.set_trace()
assert self._pending_stop_ignoring_changes_message
self._pending_stop_ignoring_changes_message = None
self._layout_changing = False
# print "MW: Splitter layout completely done"
if self._pending_stop_ignoring_changes_message:
self._pending_stop_ignoring_changes_message.cancel()
self._pending_stop_ignoring_changes_message = MessageLoop.add_cancellable_delayed_message(stop_ignoring_changes, 250)
def _save_sizes(self):
# import traceback
# traceback.print_stack()
if self._layout == None:
return
size = self.get_allocation()
if self.get_window().get_state() & (gtk.gdk.WINDOW_STATE_MAXIMIZED | gtk.gdk.WINDOW_STATE_ICONIFIED) == 0:
newSize = {"width" : size.width, "height" : size.height}
if pson.dumps(newSize) != pson.dumps(self._settings.WindowSize):
# print "window size changed"
self._settings.WindowSize = newSize
import copy
splitter_sizes = copy.deepcopy(self._settings.SplitterSizes)
needs_commit = False
for splitter in self._splitters:
if splitter_sizes.has_key(self._layout) == False:
splitter_sizes[self._layout] = {}
if splitter_sizes[self._layout].has_key(splitter.id) and splitter.get_position() != splitter_sizes[self._layout][splitter.id]:
# print "%s: save %s<-%s" % (self._layout, splitter.id, splitter.get_position())
needs_commit = True
elif not splitter_sizes[self._layout].has_key(splitter.id):
# print "%s: save %s<-%s" % (self._layout, splitter.id, splitter.get_position())
needs_commit = True
# import pdb; pdb.set_trace()
splitter_sizes[self._layout][splitter.id] = splitter.get_position()
if needs_commit:
self._settings.SplitterSizes = splitter_sizes
@property
def layout(self):
return self._layout
@layout.setter
def layout(self,layout):
# print "MW: Layout changing"
self._layout = layout
MainWindowOverlay.set_layout(self._settings, self, layout)
self._update_splitter_sizes()
# print "MW: Layout change done. Splitter change pending."
| StarcoderdataPython |
4801735 | <reponame>morganwillisaws/codeguru
from threading import Thread
class Counter(object):
def __init__(self):
self.value = 0
def increment(self):
self.value += 1
c = Counter()
def go():
for i in range(1000000):
c.increment()
# Run two threads that increment the counter:
t1 = Thread(target=go)
t1.start()
t2 = Thread(target=go)
t2.start()
t1.join()
t2.join()
print(c.value) | StarcoderdataPython |
168199 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Pocket PiAP
# ......................................................................
# Copyright (c) 2017-2020, <NAME>
# ......................................................................
# Licensed under MIT (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# ......................................................................
# http://www.github.com/reactive-firewall/PiAP-python-tools/LICENSE.rst
# ......................................................................
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ......................................................................
try:
import sys
if sys.__name__ is None: # pragma: no branch
raise ImportError("[CWE-758] OMG! we could not import sys! ABORT. ABORT.")
except Exception as err: # pragma: no branch
raise ImportError(err)
try:
try:
import context
except Exception as ImportErr: # pragma: no branch
ImportErr = None
del ImportErr
from . import context
if context.__name__ is None:
raise ImportError("[CWE-758] Failed to import context")
else:
from context import unittest as unittest
except Exception:
raise ImportError("[CWE-758] Failed to import test context")
class logsTestSuite(unittest.TestCase):
"""Special Pocket logbook test cases."""
def test_syntax(self):
"""Test case importing code."""
theResult = False
try:
from .context import piaplib
if piaplib.__name__ is None:
theResult = False
from piaplib import pocket
if pocket.__name__ is None:
theResult = False
from piaplib import book as book
if book.__name__ is None:
theResult = False
theResult = True
except Exception as impErr:
print(str(type(impErr)))
print(str(impErr))
theResult = False
assert theResult
@unittest.skipUnless((sys.version_info >= (3, 4)), "Requires Python 3.4+")
def test_a_case_log_call(self):
"""Tests the odd state for logs called as class"""
theResult = True
try:
from piaplib import book as book
if book.__name__ is None:
raise ImportError("Failed to import book")
from book.logs import logs as logs
if logs.__name__ is None:
raise ImportError("Failed to import logs")
with self.assertLogs(None, level='INFO') as cm:
logobj = logs()
logobj(msg=str("test log call"), loglevel="INFO")
self.assertIsNotNone(cm)
except Exception as err:
print(str(""))
print(str(type(err)))
print(str(err))
print(str((err.args)))
print(str(""))
err = None
del err
theResult = False
assert theResult
@unittest.skipUnless((sys.version_info >= (3, 4)), "Requires Python 3.4+")
def test_b_case_log_call(self):
"""Tests the imposible state for logs missing input"""
theResult = True
try:
from piaplib import book as book
if book.__name__ is None:
raise ImportError("Failed to import book")
from book.logs import logs as logs
if logs.__name__ is None:
raise ImportError("Failed to import logs")
with self.assertLogs(None, level='INFO') as cm:
logs.log(str("test log call"), 'INFO')
with self.assertRaises(Exception):
logs.log(None, 'INFO')
with self.assertRaises(Exception):
logs.log(None, None)
logs.log(str("test log None"), None)
self.assertIsNotNone(cm)
except Exception as err:
print(str(""))
print(str(type(err)))
print(str(err))
print(str((err.args)))
print(str(""))
err = None
del err
theResult = False
assert theResult
def test_c_case_log_call(self):
"""Tests the imposible state for logs given junk input"""
theResult = True
try:
from piaplib import book as book
if book.__name__ is None:
raise ImportError("Failed to import book")
from book.logs import logs as logs
if logs.__name__ is None:
raise ImportError("Failed to import logs")
with self.assertRaises(Exception):
logs.log(["test log call"], 'INFO')
with self.assertRaises(Exception):
logs.log("test log call", ['INFO'])
with self.assertRaises(Exception):
logs.log("test log call", 'JUNK_VALUE')
except Exception as err:
print(str(""))
print(str(type(err)))
print(str(err))
print(str((err.args)))
print(str(""))
err = None
del err
theResult = False
assert theResult
def test_d_case_log_bad_main(self):
"""Tests the imposible state for logs main"""
theResult = True
try:
from piaplib import book as book
if book.__name__ is None:
raise ImportError("Failed to import book")
from book.logs import logs as logs
if logs.__name__ is None:
raise ImportError("Failed to import logs")
with self.assertRaises(Exception):
logs.main(["test log call"])
except Exception as err:
print(str(""))
print(str(type(err)))
print(str(err))
print(str((err.args)))
print(str(""))
err = None
del err
theResult = False
assert theResult
if __name__ == u'__main__':
unittest.main()
| StarcoderdataPython |
1781623 | from .models import Project, Site
from onadata.apps.fsforms.models import FieldSightXF
def get_form_answer(site_id, meta):
fxf = FieldSightXF.objects.filter(pk=int(meta.get('form_id', "0")))
if fxf:
sub = fxf[0].project_form_instances.filter(site_id=site_id).order_by('-instance_id')[:1]
if sub:
sub_answers = sub[0].instance.json
if meta['question']['type'] == "repeat":
answer = ""
else:
answer = sub_answers.get(meta.get('question').get('name') ,'')
if meta['question']['type'] in ['photo', 'video', 'audio'] and answer is not "":
question_type = "Media"
answer = 'http://app.fieldsight.org/attachment/medium?media_file='+ fxf.xf.user.username +'/attachments/'+answer
else:
answer = "No Submission Yet."
else:
# answer = "No Form"
answer = "No Submission Yet."
return answer
def get_form_sub_status(site_id, meta):
fxf = FieldSightXF.objects.filter(pk=int(meta.get('form_id', "0")))
if fxf:
sub_date = fxf[0].project_form_instances.filter(site_id=site_id).order_by('-instance_id').values('date')[:1]
if sub_date:
answer = "Last submitted on " + sub_date[0]['date'].strftime("%d %b %Y %I:%M %P")
else:
answer = "No submission yet."
else:
# answer = "No Form"
answer = "No Submission Yet."
return answer
def get_form_ques_ans_status(site_id, meta):
fxf = FieldSightXF.objects.filter(pk=int(meta.get('form_id', "0")))
if fxf:
sub = fxf[0].project_form_instances.filter(site_id=site_id).order_by('-instance_id')[:1]
if sub:
sub_answers = sub[0].instance.json
get_answer = sub_answers.get(meta.get('question').get('name'), None)
if get_answer:
answer = "Answered"
else:
answer = "Not Answered"
else:
answer = "No Submission Yet."
else:
# answer = "No Form"
answer = "No Submission Yet."
return answer
def get_form_submission_count(site_id, meta):
fxf = FieldSightXF.objects.filter(pk=int(meta.get('form_id', "0")))
if fxf:
answer = fxf[0].project_form_instances.filter(site_id=site_id).count()
else:
# answer = "No Form"
answer = "No Submission Yet."
return answer
def generateSiteMetaAttribs(pk):
metas = []
site = Site.objects.get(pk=pk)
project = site.project
main_project = project.id
def generate(metas, project_id, metas_to_parse, meta_answer, parent_selected_metas, project_metas):
for meta in metas_to_parse:
# if project_metas and meta not in project_metas:
# continue
if meta.get('question_type') == "Link":
if parent_selected_metas:
selected_metas = parent_selected_metas
else:
selected_metas = meta.get('metas')
if meta.get('project_id') == main_project:
continue
sitenew = Site.objects.filter(identifier = meta_answer.get(meta.get('question_name'), None), project_id = meta.get('project_id'))
if sitenew and str(sitenew[0].project_id) in selected_metas:
answer = meta_answer.get(meta.get('question_name'))
sub_metas = []
generate(sub_metas, sitenew[0].project_id, selected_metas[str(sitenew[0].project_id)], sitenew[0].site_meta_attributes_ans, selected_metas, sitenew[0].project.site_meta_attributes)
metas.append({'question_text': meta.get('question_text'), 'project_id':meta.get('project_id'), 'answer':answer, 'question_type':'Link', 'children':sub_metas})
else:
answer = "No Site Refrenced"
metas.append({'question_text': meta.get('question_text'), 'answer':answer, 'question_type':'Normal'})
else:
answer=""
question_type="Normal"
if meta.get('question_type') == "Form":
answer = get_form_answer(pk, meta)
elif meta.get('question_type') == "FormSubStat":
answer = get_form_sub_status(pk, meta)
elif meta.get('question_type') == "FormQuestionAnswerStatus":
answer = get_form_ques_ans_status(pk, meta)
elif meta.get('question_type') == "FormSubCountQuestion":
answer = get_form_submission_count(pk, meta)
else:
answer = meta_answer.get(meta.get('question_name'), "")
metas.append({'question_text': meta.get('question_text'), 'answer':answer, 'question_type':question_type})
generate(metas, project.id, project.site_meta_attributes, site.site_meta_attributes_ans, None, None)
return metas
| StarcoderdataPython |
1616149 | import os
import sys
import json
import torch
import logging
from tqdm import tqdm
from . import loader_utils
from ..constant import BOS_WORD, EOS_WORD, Tag2Idx
logger = logging.getLogger()
# -------------------------------------------------------------------------------------------
# preprocess label
# ------------------------------------------------------------------------------------------
def get_tag_label(start_end_pos, doc_length):
# flatten, rank, filter overlap for answer positions
sorted_positions = loader_utils.flat_rank_pos(start_end_pos)
filter_positions = loader_utils.strict_filter_overlap(sorted_positions)
if len(filter_positions) != len(sorted_positions):
overlap_flag = True
else:
overlap_flag = False
label = [Tag2Idx['O']] * doc_length
for s, e in filter_positions:
if s == e:
label[s] = Tag2Idx['U']
elif (e-s) == 1:
label[s] = Tag2Idx['B']
label[e] = Tag2Idx['E']
elif (e-s) >=2:
label[s] = Tag2Idx['B']
label[e] = Tag2Idx['E']
for i in range(s+1, e):
label[i] = Tag2Idx['I']
else:
logger.info('ERROR')
break
return {'label':label, 'overlap_flag':overlap_flag}
def bert2tag_preprocessor(examples, tokenizer, max_token, pretrain_model, mode, max_phrase_words, stem_flag=False):
logger.info('start preparing (%s) features for bert2tag (%s) ...' % (mode, pretrain_model))
overlap_num = 0
new_examples = []
for idx, ex in enumerate(tqdm(examples)):
# tokenize
tokenize_output = loader_utils.tokenize_for_bert(doc_words=ex['doc_words'], tokenizer=tokenizer)
if len(tokenize_output['tokens']) < max_token:
max_word = max_token
else:
max_word = tokenize_output['tok_to_orig_index'][max_token-1] + 1
new_ex = {}
new_ex['url'] = ex['url']
new_ex['tokens'] = tokenize_output['tokens'][:max_token]
new_ex['valid_mask'] = tokenize_output['valid_mask'][:max_token]
new_ex['doc_words'] = ex['doc_words'][:max_word]
assert len(new_ex['tokens']) == len(new_ex['valid_mask'])
assert sum(new_ex['valid_mask']) == len(new_ex['doc_words'])
if mode == 'train':
parameter = {'start_end_pos': ex['start_end_pos'],
'doc_length': len(ex['doc_words'])}
# ------------------------------------------------
label_dict = get_tag_label(**parameter)
new_ex['label'] = label_dict['label'][:max_word]
assert sum(new_ex['valid_mask']) == len(new_ex['label'])
if label_dict['overlap_flag']:
overlap_num += 1
new_examples.append(new_ex)
logger.info('Delete Overlap Keyphrase : %d (overlap / total = %.2f'
%(overlap_num, float(overlap_num / len(examples) * 100)) + '%)')
return new_examples
# -------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------
# batch batchfy
def bert2tag_converter(index, ex, tokenizer, mode, max_phrase_words):
''' convert each batch data to tensor ; add [CLS] [SEP] tokens ;'''
src_tokens = [BOS_WORD] + ex['tokens'] + [EOS_WORD]
valid_ids = [0] + ex['valid_mask'] + [0]
src_tensor = torch.LongTensor(tokenizer.convert_tokens_to_ids(src_tokens))
valid_mask = torch.LongTensor(valid_ids)
orig_doc_len = sum(valid_ids)
if mode == 'train':
label_tensor = torch.LongTensor(ex['label'])
return index, src_tensor, valid_mask, orig_doc_len, label_tensor
else:
return index, src_tensor, valid_mask, orig_doc_len
def batchify_bert2tag_features_for_train(batch):
''' train dataloader & eval dataloader .'''
ids = [ex[0] for ex in batch]
docs = [ex[1] for ex in batch]
valid_mask = [ex[2] for ex in batch]
doc_word_lens = [ex[3] for ex in batch]
label_list = [ex[4] for ex in batch]
bert_output_dim = 768
max_word_len = max([word_len for word_len in doc_word_lens]) # word-level
# ---------------------------------------------------------------
# [1][2]src tokens tensor
doc_max_length = max([d.size(0) for d in docs])
input_ids = torch.LongTensor(len(docs), doc_max_length).zero_()
input_mask = torch.LongTensor(len(docs), doc_max_length).zero_()
for i, d in enumerate(docs):
input_ids[i, :d.size(0)].copy_(d)
input_mask[i, :d.size(0)].fill_(1)
# ---------------------------------------------------------------
# valid mask tensor
valid_max_length = max([v.size(0) for v in valid_mask])
valid_ids = torch.LongTensor(len(valid_mask), valid_max_length).zero_()
for i, v in enumerate(valid_mask):
valid_ids[i, :v.size(0)].copy_(v)
# ---------------------------------------------------------------
# label tensor
labels = torch.LongTensor(len(label_list), max_word_len).zero_()
active_mask = torch.LongTensor(len(label_list), max_word_len).zero_()
for i, t in enumerate(label_list):
labels[i, :t.size(0)].copy_(t)
active_mask[i, :t.size(0)].fill_(1)
# -------------------------------------------------------------------
# [6] Empty Tensor : word-level max_len
valid_output = torch.zeros(len(docs), max_word_len, bert_output_dim)
return input_ids, input_mask, valid_ids, active_mask, valid_output, labels, ids
def batchify_bert2tag_features_for_test(batch):
''' test dataloader for Dev & Public_Valid.'''
ids = [ex[0] for ex in batch]
docs = [ex[1] for ex in batch]
valid_mask = [ex[2] for ex in batch]
doc_word_lens = [ex[3] for ex in batch]
bert_output_dim = 768
max_word_len = max([word_len for word_len in doc_word_lens]) # word-level
# ---------------------------------------------------------------
# [1][2]src tokens tensor
doc_max_length = max([d.size(0) for d in docs])
input_ids = torch.LongTensor(len(docs), doc_max_length).zero_()
input_mask = torch.LongTensor(len(docs), doc_max_length).zero_()
for i, d in enumerate(docs):
input_ids[i, :d.size(0)].copy_(d)
input_mask[i, :d.size(0)].fill_(1)
# ---------------------------------------------------------------
# [3] valid mask tensor
valid_max_length = max([v.size(0) for v in valid_mask])
valid_ids = torch.LongTensor(len(valid_mask), valid_max_length).zero_()
for i, v in enumerate(valid_mask):
valid_ids[i, :v.size(0)].copy_(v)
# ---------------------------------------------------------------
# valid length tensor
active_mask = torch.LongTensor(len(doc_word_lens), max_word_len).zero_()
for i, l in enumerate(doc_word_lens):
active_mask[i, :l].fill_(1)
# -------------------------------------------------------------------
# [4] Empty Tensor : word-level max_len
valid_output = torch.zeros(len(docs), max_word_len, bert_output_dim)
return input_ids, input_mask, valid_ids, active_mask, valid_output, doc_word_lens, ids
| StarcoderdataPython |
3278348 | #isLower is built into python😦
import Terry
def reverse(input):
output=""
for i in range(len(input)-1,-1,-1):
output+=input[i]
return output
#print(reverse("test"))
def leetspeak(input):
translationDict = {
"A":"4",
"E":"3",
"G":"6",
"I":"1",
"O":"0",
"S":"5",
"T":"7"
}
output=""
for i in range(len(input)):
if (input[i].upper()) in translationDict:
output+=translationDict[input[i].upper()]
else:
output+=input[i]
return output
#print(leetspeak("leet"))
def longVowels(input):
vowel = ["A","E","I","O","U"]
output = ""
i = 0
while i < len(input):
print(i)
if input[i].upper() in vowel:
if i < (len(input ) - 1):
if input[i + 1].upper() == input[i].upper():
output+=(input[i]*5)
i += 2
else:
output += input[i]
i += 1
else:
output += input[i]
i += 1
else:
output += input[i]
i += 1
return output
#print(longVowels("GOod"))
def rot13(input):
output = ""
temp = ""
for i in range(len(input)):
if Terry.isLetter(input[i]):
case = Terry.getCase(input[i])
temp = ord(input[i]) + 13
if temp > 90 and case == "upper":
temp -= 26
elif temp > 122 and case == "lower":
temp -= 26
output += chr(temp)
else:
output += input[i]
return output
print(rot13("This is a Test of 0ther character support🚁😀"))
| StarcoderdataPython |
179728 | <filename>multiway.py
"""
Program: multiway.py
Author: <NAME>
Date: October 9, 2017
"""
number = int(input("Enter the numeric grade:"))
if number >= 0 and number <= 100:
if number > 89:
letter = 'A'
print("The letter grade is", letter)
else:
print("Error: grade must be between 100 and 0")
| StarcoderdataPython |
1778185 | <gh_stars>0
import gym
import numpy as np
import math
import time
import glfw
"""Data generation for the case of a single block pick and place in Fetch Env"""
actions = []
observations = []
infos = []
from pynput import mouse
class Actor(object):
def __init__(self, env):
self.x = 0.0
self.y = 0.0
self.z = 0.0
self.init_x = 0.0
self.init_y = 0.0
self.record = False
self.env = env
self.rec_loc_inited = False
self.t = 0
self.grasp = 1
self.gain = 0.05
self.momentum = 0.5
def set_values(self,value):
print("value", value)
if value == 80:
self.y += self.gain
self.x *= self.momentum
self.z *= self.momentum
elif value == 59:
self.y -= self.gain
self.x *= self.momentum
self.z *= self.momentum
elif value == 39:
self.x += self.gain
self.y *= self.momentum
self.z *= self.momentum
elif value == 76:
self.x -= self.gain
self.y *= self.momentum
self.z *= self.momentum
elif value == 81:
self.z += self.gain
self.x *= self.momentum
self.y *= self.momentum
elif value == 87:
self.z -= self.gain
self.x *= self.momentum
self.y *= self.momentum
elif value == 91:
self.record = True
self.rec_loc_inited = False
print("Recording turned on")
elif value == 79:
self.env.reset()
self.x = 0.0
self.y = 0.0
self.z = 0.0
self.init_x = 0.0
self.init_y = 0.0
self.t = 0.0
self.grasp = 1
self.record = False
elif value == 65:
self.grasp = -1
def get_action(self):
return [self.x, self.y, self.z, self.grasp]
def on_move(self,x, y):
if not self.rec_loc_inited:
self.init_x = x
self.init_y = y
self.rec_loc_inited = True
if self.record:
self.x = (-self.init_x +x)/100
self.y = (-y + self.init_y)/100
def on_click(self,x, y, button, pressed):
self.record = False
self.z = 0.0
def on_scroll(self,x, y, dx, dy):
pass
def main():
env = gym.make('ClothSideways-v1')
actor = Actor(env)
listener = mouse.Listener(
on_move=actor.on_move,
on_click=actor.on_click,
on_scroll=actor.on_scroll)
listener.start()
env.set_key_callback_function(actor.set_values)
num_itr = 100
initStateSpace = "random"
env.reset()
successes = 0
try_n = 0
while successes < num_itr:
try_n += 1
obs = env.reset()
print("ITERATION NUMBER ", try_n, "Success so far", successes)
success = goToGoal(env, obs, actor)
if success:
successes += 1
fileName = "data_cloth_sideways_as3"
fileName += "_" + initStateSpace
fileName += "_" + str(num_itr)
fileName += ".npz"
np.savez_compressed(fileName, acs=actions, obs=observations, info=infos) # save the file
print("Saved, success rate:", successes)
def goToGoal(env, initial_observation, actor):
episodeAcs = []
episodeObs = []
episodeInfo = []
episodeObs.append(initial_observation)
items = [[0., 0., 0., 1.,] ,
[0., 0., 0., 1.] ,
[0., 0., 0., 1.] ,
[0., 0., 0., 1.] ,
[0., 0., 0., 1.] ,
[0., 0., 0., 1.] ,
[0., 0., 0.1, 1. ] ,
[0., 0., 0.1, 1. ] ,
[0., 0.01929688, 0.1 , 1., ] ,
[0.03867188, 0.11238281, 0.1 , 1. ] ,
[0.0653125, 0.17683594, 0.1 , 1. ] ,
[0.07914063, 0.21265625, 0.1, 1. ] ,
[0.08890625, 0.24820312, 0.1 , 1. ] ,
[0.10023438, 0.27324219, 0.1, 1. ] ,
[0.11695313, 0.30570313, 0.1 , 1. ] ,
[0.13265625, 0.33046875, 0.1, 1. ] ,
[0.14800781, 0.34851562, 0.1 , 1. ] ,
[0.17625 , 0.35800781, 0.1 , 1. ] ,
[0.34578125, 0.37238281, 0.1 , 1. ] ,
[0.40609375, 0.37621094, 0.1, 1. ] ,
[0.49902344, 0.38003906, 0.1 , 1. ] ,
[0.65976563, 0.38003906, 0.1 , 1. ] ,
[0.74980469, 0.36867187, 0.1 , 1. ] ,
[0.80523437, 0.1715625, 0.1, 1. ] ,
[ 0.73570313, -0.26335938, 0.1 , 1. ] ,
[ 0.34089844, -0.42804687, 0.1, 1. ] ,
[ 0.33136719, -0.41445312, 0.1 , 1. ] ,
[ 0.33136719, -0.39242187, 0.1 , 1. ] ,
[ 0.33136719, -0.39242187 , 0.1, 1. ] ,
[ 0.33136719, -0.39242187, 0.1 , 1. ] ,
[ 0.33136719, -0.39242187, 0.1 , 1. ] ,
[ 0.33136719 ,-0.39242187 , 0.1 , 1. ] ,
[ 0.33136719, -0.39242187 , 0.1 , -1. ] ,
[ 0.33136719, -0.39242187 , 0.1 , -1. ] ,
[ 0.33136719, -0.39242187 , 0.1, -1. ] ,
[ 0.33136719 ,-0.39242187 , 0.1 , -1. ] ,
[ 0.33136719, -0.39242187, 0.1 , -1. ] ,
[ 0.33136719 ,-0.39242187 ,0.1 , -1. ] ,
[ 0.33136719, -0.39242187 , 0.1 , -1. ] ,
[ 0.33136719, -0.39242187 , 0.1 , -1. ] ,
[ 0.92496094, -0.25386719 , 0.1 , -1. ] ,
[ 1.58167969, -0.57402344, 0.1 , -1. ] ,
[ 1.67710937, -1.20914063 , 0.1 , -1. ] ,]
t = 0
episode_success = False
for i, action in enumerate(items):
ac = np.array([0.0,0.0,0.0])
ac[0] = action[0] *0.85 + np.random.normal(0, 0.1)
ac[1] = action[1] + np.random.normal(0, 0.1)
ac[2] = action[2] * 1.5 + np.random.normal(0, 0.1)
if i > 32:
ac[0] = 0
ac[1] = 0
ac[2] = -1
action = ac
env.render()
action = np.array(action)
obs, reward, done, info = env.step(action)
if reward == 0:
print("suc", reward, info)
episode_success = True
t += 1
episodeAcs.append(action)
episodeInfo.append(info)
episodeObs.append(obs)
while t < env._max_episode_steps:
env.render()
t += 1
action = np.array([0.0,0.0,0.0])
env.step(action)
episodeAcs.append(action)
episodeInfo.append(info)
episodeObs.append(obs)
print("Lens", len(episodeAcs), len(episodeInfo), len(episodeObs))
print("Episode success", episode_success)
while False:
env.render()
action = actor.get_action()
action = np.array(action)
obsDataNew, reward, done, info = env.step(action)
if actor.t == 0:
print("Timestep", actor.t, action)
else:
print([act for act in action], ",")
episodeAcs.append(action)
episodeInfo.append(info)
episodeObs.append(obsDataNew)
actor.t += 1
#if timeStep >= env._max_episode_steps: break
if episode_success:
actions.append(episodeAcs)
observations.append(episodeObs)
infos.append(episodeInfo)
return episode_success
if __name__ == "__main__":
main()
#python -m baselines.run --alg=her --env=ClothSideways-v1 --demo_file=/Users/juliushietala/Desktop/Robotics/baselines/baselines/her/experiment/data_generation/data_cloth_sideways_as3_random_100.npz --num_demo=100 --num_timesteps=50000 --demo_batch_size=128 --bc_loss=1 --q_filter=1 --prm_loss_weight=0.001 --aux_loss_weight=0.0078 --n_cycles=20 --batch_size=1024 --random_eps=0.1 --noise_eps=0.1 --play --save_path=/Users/juliushietala/Desktop/Robotics/policies/her/sideways50k
# OPENAI_LOG_FORMAT=stdout,csv,tensorboard python -m baselines.run --alg=her --env=ClothReach-v1 --demo_file=/Users/juliushietala/Desktop/Robotics/baselines/baselines/her/experiment/data_generation/data_cloth_random_100.npz --num_demo=100 --num_timesteps=1500 --demo_batch_size=128 --bc_loss=1 --q_filter=1 --prm_loss_weight=0.001 --aux_loss_weight=0.0078 --n_cycles=20 --batch_size=1024 --random_eps=0.1 --noise_eps=0.1 --play --save_path=/Users/juliushietala/Desktop/Robotics/policies/her/diag30k --log_path=/Users/juliushietala/Desktop/Robotics/logs/cloth_diagonal --num_train=2
| StarcoderdataPython |
3398738 | import numpy as np
import torch
import torch.nn as nn
from copy import deepcopy
from pybnn.bohamiann import Bohamiann
def vapor_pressure(t, a, b, c):
a_ = a
b_ = b / 10.
c_ = c / 10.
return torch.exp(-a_ - b_ / t - c_ * torch.log(t)) - torch.exp(-a_ - b_)
def pow_func(t, a, b):
return a * (t ** b - 1)
def log_power(t, a, b, c):
b_ = b / 10
return 2 * a * (1 / (1 + torch.exp(-b_ * c)) - 1 / (1 + torch.exp(c * torch.log(t) - b_)))
def exponential(t, a, b):
b_ = 10 * b
return a * (torch.exp(-b_) - torch.exp(-b_ * t))
def hill_3(t, a, b, c):
return a * (1 / (c ** b * t ** (-b) + 1) - a / (c ** b + 1))
def bf_layer(theta, t):
a = theta[:, 0]
b = theta[:, 1]
c = theta[:, 2]
y_a = vapor_pressure(t, a, b, c)
a = theta[:, 3]
b = theta[:, 4]
y_b = pow_func(t, a, b)
a = theta[:, 5]
b = theta[:, 6]
c = theta[:, 7]
y_c = log_power(t, a, b, c)
a = theta[:, 8]
b = theta[:, 9]
y_d = exponential(t, a, b)
a = theta[:, 10]
b = theta[:, 11]
c = theta[:, 12]
y_e = hill_3(t, a, b, c)
return torch.stack([y_a, y_b, y_c, y_d, y_e], dim=1)
def get_lc_net_architecture(input_dimensionality: int) -> torch.nn.Module:
class Architecture(nn.Module):
def __init__(self, n_inputs, n_hidden=50):
super(Architecture, self).__init__()
self.fc1 = nn.Linear(n_inputs - 1, n_hidden)
self.fc2 = nn.Linear(n_hidden, n_hidden)
self.fc3 = nn.Linear(n_hidden, n_hidden)
self.theta_layer = nn.Linear(n_hidden, 13)
self.weight_layer = nn.Linear(n_hidden, 5)
self.asymptotic_layer = nn.Linear(n_hidden, 1)
self.sigma_layer = nn.Linear(n_hidden, 1)
def forward(self, input):
x = input[:, :-1]
t = input[:, -1]
x = torch.tanh(self.fc1(x))
x = torch.tanh(self.fc2(x))
x = torch.tanh(self.fc3(x))
theta = torch.sigmoid(self.theta_layer(x))
bf = bf_layer(theta, t)
weights = torch.softmax(self.weight_layer(x), -1)
residual = torch.sum(bf * weights, dim=(1,), keepdim=True)
asymptotic = torch.sigmoid(self.asymptotic_layer(x))
mean = residual + asymptotic
std = torch.sigmoid(self.sigma_layer(x))
return torch.cat((mean, std), dim=1)
return Architecture(n_inputs=input_dimensionality)
class LCNet(Bohamiann):
def __init__(self,
batch_size=20,
metrics=(nn.MSELoss,)
) -> None:
super(LCNet, self).__init__(get_network=get_lc_net_architecture,
batch_size=batch_size,
normalize_input=True,
normalize_output=False,
metrics=metrics)
def normalize(self, x, m=None, s=None):
if m is None:
m = np.mean(x, axis=0)
if s is None:
s = np.std(x, axis=0)
x_norm = deepcopy(x)
x_norm[:, :-1] = (x[:, :-1] - m[:-1]) / s[:-1]
return x_norm, m, s
| StarcoderdataPython |
1666496 | <reponame>kemingy/daily-coding-problem<filename>src/LCA.py
# Given a binary tree, find the lowest common ancestor (LCA) of two given nodes
# in the tree. Assume that each node in the tree also has a pointer to its parent.
# According to the definition of LCA on Wikipedia: “The lowest common ancestor
# is defined between two nodes v and w as the lowest node in T that has both v
# and w as descendants (where we allow a node to be a descendant of itself).”
class Node:
def __init__(self, value, left=None, right=None, parent=None):
self.value = value
self.left = left
self.right = right
self.parent = parent
def set_children(self, left=None, right=None):
self.left = left
self.right = right
if left:
left.parent = self
if right:
right.parent = self
def lowest_common_ancestor(x, y):
parents_x, parents_y = set(), set()
while not (parents_x & parents_y):
if x:
parents_x.add(x)
x = x.parent
if y:
parents_y.add(y)
y = y.parent
if x is None and y is None:
break
return (parents_x & parents_y).pop().value
if __name__ == '__main__':
nodes = [Node(i) for i in range(7)]
tree = nodes[0]
tree.set_children(nodes[1], nodes[5])
nodes[1].set_children(nodes[2], nodes[3])
nodes[3].set_children(nodes[4])
nodes[5].set_children(None, nodes[6])
print(lowest_common_ancestor(nodes[4], nodes[6]))
print(lowest_common_ancestor(nodes[2], nodes[4]))
print(lowest_common_ancestor(nodes[5], nodes[6]))
| StarcoderdataPython |
1770314 | <filename>src/zope/app/authentication/browser/rolepermissionview.py
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Role Permission View Classes
"""
from datetime import datetime
from zope.component import getUtilitiesFor, getUtility
from zope.i18n import translate
from zope.interface import implementer
from zope.exceptions.interfaces import UserError
from zope.i18nmessageid import ZopeMessageFactory as _
from zope.security.interfaces import IPermission
from zope.securitypolicy.interfaces import Unset, Allow, Deny
from zope.securitypolicy.interfaces import IRole, IRolePermissionManager
class RolePermissionView(object):
_pagetip = _("""For each permission you want to grant (or deny) to a role,
set the entry for that permission and role to a '+' (or '-').
Permissions are shown on the left side, going down.
Roles are shown accross the top.
""")
request = None
context = None
_roles = None
_permissions = None
def pagetip(self):
return translate(self._pagetip, context=self.request)
def roles(self):
roles = getattr(self, '_roles', None)
if roles is None:
roles = [
(translate(role.title, context=self.request).strip(), role)
for name, role in getUtilitiesFor(IRole)]
roles.sort()
roles = self._roles = [role for name, role in roles]
return roles
def permissions(self):
permissions = getattr(self, '_permissions', None)
if permissions is None:
permissions = [
(translate(perm.title, context=self.request).strip(), perm)
for name, perm in getUtilitiesFor(IPermission)
if name != 'zope.Public']
permissions.sort()
permissions = self._permissions = [perm
for name, perm in permissions]
return permissions
def availableSettings(self, noacquire=False):
aq = {'id': Unset.getName(), 'shorttitle': ' ',
'title': _('permission-acquire', 'Acquire')}
rest = [{'id': Allow.getName(), 'shorttitle': '+',
'title': _('permission-allow', 'Allow')},
{'id': Deny.getName(), 'shorttitle': '-',
'title': _('permission-deny', 'Deny')},
]
return rest if noacquire else [aq] + rest
def permissionRoles(self):
context = self.context.__parent__
roles = self.roles()
return [PermissionRoles(permission, context, roles)
for permission in self.permissions()]
def permissionForID(self, pid):
roles = self.roles()
perm = getUtility(IPermission, pid)
return PermissionRoles(perm, self.context.__parent__, roles)
def roleForID(self, rid):
permissions = self.permissions()
role = getUtility(IRole, rid)
return RolePermissions(role, self.context.__parent__, permissions)
def update(self, testing=None):
status = ''
changed = False
if 'SUBMIT' in self.request:
roles = [r.id for r in self.roles()]
permissions = [p.id for p in self.permissions()]
prm = IRolePermissionManager(self.context.__parent__)
for ip in range(len(permissions)):
rperm = self.request.get("p%s" % ip)
if rperm not in permissions:
continue
for ir in range(len(roles)):
rrole = self.request.get("r%s" % ir)
if rrole not in roles:
continue
setting = self.request.get("p%sr%s" % (ip, ir), None)
if setting is not None:
if setting == Unset.getName():
prm.unsetPermissionFromRole(rperm, rrole)
elif setting == Allow.getName():
prm.grantPermissionToRole(rperm, rrole)
elif setting == Deny.getName():
prm.denyPermissionToRole(rperm, rrole)
else:
raise ValueError("Incorrect setting: %s"
% setting) # pragma: no cover
changed = True
if 'SUBMIT_PERMS' in self.request:
prm = IRolePermissionManager(self.context.__parent__)
roles = self.roles()
rperm = self.request.get('permission_id')
settings = self.request.get('settings', ())
for ir in range(len(roles)):
rrole = roles[ir].id
setting = settings[ir]
if setting == Unset.getName():
prm.unsetPermissionFromRole(rperm, rrole)
elif setting == Allow.getName():
prm.grantPermissionToRole(rperm, rrole)
elif setting == Deny.getName():
prm.denyPermissionToRole(rperm, rrole)
else:
raise ValueError("Incorrect setting: %s" % setting)
changed = True
if 'SUBMIT_ROLE' in self.request:
role_id = self.request.get('role_id')
prm = IRolePermissionManager(self.context.__parent__)
allowed = self.request.get(Allow.getName(), ())
denied = self.request.get(Deny.getName(), ())
for permission in self.permissions():
rperm = permission.id
if rperm in allowed and rperm in denied:
permission_translated = translate(
permission.title, context=self.request)
msg = _('You choose both allow and deny for permission'
' "${permission}". This is not allowed.',
mapping={'permission': permission_translated})
raise UserError(msg)
if rperm in allowed:
prm.grantPermissionToRole(rperm, role_id)
elif rperm in denied:
prm.denyPermissionToRole(rperm, role_id)
else:
prm.unsetPermissionFromRole(rperm, role_id)
changed = True
if changed:
formatter = self.request.locale.dates.getFormatter(
'dateTime', 'medium')
status = _("Settings changed at ${date_time}",
mapping={'date_time':
formatter.format(datetime.utcnow())})
return status
@implementer(IPermission)
class PermissionRoles(object):
def __init__(self, permission, context, roles):
self._permission = permission
self._context = context
self._roles = roles
@property
def id(self):
return self._permission.id
@property
def title(self):
return self._permission.title
@property
def description(self):
return self._permission.description
def roleSettings(self):
"""
Returns the list of setting names of each role for this permission.
"""
prm = IRolePermissionManager(self._context)
proles = prm.getRolesForPermission(self._permission.id)
settings = {}
for role, setting in proles:
settings[role] = setting.getName()
nosetting = Unset.getName()
return [settings.get(role.id, nosetting) for role in self._roles]
@implementer(IRole)
class RolePermissions(object):
def __init__(self, role, context, permissions):
self._role = role
self._context = context
self._permissions = permissions
@property
def id(self):
return self._role.id
@property
def title(self):
return self._role.title
@property
def description(self):
return self._role.description
def permissionsInfo(self):
prm = IRolePermissionManager(self._context)
rperms = prm.getPermissionsForRole(self._role.id)
settings = {}
for permission, setting in rperms:
settings[permission] = setting.getName()
nosetting = Unset.getName()
return [{'id': permission.id,
'title': permission.title,
'setting': settings.get(permission.id, nosetting)}
for permission in self._permissions]
| StarcoderdataPython |
3367123 | import functools
import types
from typing import Any, Callable, Dict, List, Optional, Tuple, cast # noqa
FUNCTION_ATTRIBUTE = "_tomodachi_function_is_invoker_function"
START_ATTRIBUTE = "_tomodachi_deprecated_invoker_function_start_marker"
INVOKER_TASK_START_KEYWORD = "_tomodachi_invoker_task_start_keyword"
class Invoker(object):
context: Dict = {}
@classmethod
def decorator(cls, cls_func: Callable) -> Callable:
def _wrapper(*args: Any, **kwargs: Any) -> Callable:
def wrapper(func: Callable) -> Callable:
@functools.wraps(func)
async def _decorator(obj: Any, *a: Any, **kw: Any) -> Any:
if not kw or not kw.get(INVOKER_TASK_START_KEYWORD):
return await func(obj, *a, **kw)
setattr(_decorator, START_ATTRIBUTE, False) # deprecated
if not cls.context.get(obj, None):
if getattr(obj, "context", None):
cls.context[obj] = obj.context
else:
cls.context[obj] = {}
cls.context[obj].update(
{
i: getattr(obj, i)
for i in dir(obj)
if not callable(i)
and not i.startswith("__")
and not isinstance(getattr(obj, i), types.MethodType)
}
)
context = cls.context[obj]
obj.context = context
start_func = await cls_func(cls, obj, context, func, *args, **kwargs)
# Work-around if the decorators are stacked with multiple decorators for the same method
if getattr(func, FUNCTION_ATTRIBUTE, None):
decorated_func = cast(Callable, getattr(func, "func"))
decorated_cls_func = cast(Callable, getattr(func, "cls_func"))
decorated_args = cast(Tuple, getattr(func, "args"))
decorated_kwargs = cast(Dict, getattr(func, "kwargs"))
fn = cast(
Callable,
cls.decorator(decorated_cls_func)(*decorated_args, **decorated_kwargs)(decorated_func),
)
setattr(fn, START_ATTRIBUTE, True)
await fn(obj, *a, **kw)
return start_func
# Work-around if the decorators are stacked with multiple decorators for the same method
setattr(_decorator, "func", func)
setattr(_decorator, "cls_func", cls_func)
setattr(_decorator, "args", args)
setattr(_decorator, "kwargs", kwargs)
setattr(_decorator, FUNCTION_ATTRIBUTE, True)
return _decorator
if not kwargs and len(args) == 1 and callable(args[0]):
func = args[0]
args = ()
return wrapper(func)
else:
return wrapper
return _wrapper
| StarcoderdataPython |
196286 | <filename>park/urls.py
from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name='home' ),
path('add_park/', views.addPark, name='add_park'),
path('get_parks/', views.getParks, name='get_parks'),
]
| StarcoderdataPython |
1733319 | import tensorflow as tf
import numpy as np
from transformers import *
import re, string
import pandas as pd
import sys
from keras.preprocessing.sequence import pad_sequences
def map_sent(sent):
if sent == 'positive' or sent == 'neutral':
return 1
if sent == 'negative':
return 0
def deEmojify(inputString):
return inputString.encode('ascii', 'ignore').decode('ascii')
tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
pattern = re.compile('[\W_]+', re.UNICODE)
df = pd.read_csv('dataset/Tweets.csv')
df = df[:5000]
df['text'] = df['text'].apply(lambda title: deEmojify(title))
df['text'] = df['text'].apply(lambda title: re.sub(r"http\S+", "", title))
df['text'] = df['text'].apply(lambda title: ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)"," ",title).split()))
df['original'] = df['text'].copy()
df['text'] = df['text'].apply(lambda title: '[CLS] ' + title + ' [CEP]')
df['text'] = df['text'].apply(lambda title: tokenizer.tokenize(title))
df['airline_sentiment'] = df['airline_sentiment'].apply(lambda sent: map_sent(sent))
MAX_LEN=128
input_ids = pad_sequences([tokenizer.convert_tokens_to_ids(title) for title in df['text']],
maxlen=MAX_LEN, dtype='long', truncating='post', padding='post')
model = TFBertForSequenceClassification.from_pretrained('./seemsaccurate7/')
classes = ['negative', 'positive']
results = model.predict(input_ids)
count = 0
total = 0
for i in range(len(results)):
classi = np.argmax(results[i])
orig_sent = df['airline_sentiment'][i]
confidence = df['airline_sentiment_confidence'][i]
if confidence == 1:
total += 1
if orig_sent == classi:
count += 1
print('Sentence: {:s}'.format(df['original'][i]))
print('Sentiment: {:s}'.format(classes[classi]))
print('Real Sentiment: {:s}'.format(classes[orig_sent]))
accuracy = (count / total) * 100
print(count)
print(total)
print('Accuracy {:.2f}'.format(accuracy))
| StarcoderdataPython |
183902 | import torch
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import pdb, os, argparse
from tqdm import tqdm
from datetime import datetime
from model.CPD_ResNet_models import CPD_ResNet
from model.Sal_CNN import Sal_CNN
from data import get_loader
from utils import clip_gradient, adjust_lr
from DSU_test import eval_data
from tensorboardX import SummaryWriter
import torch.backends.cudnn as cudnn
from attentive_training import loss_weight, update_pseudoLabel, ContrastiveLoss
cudnn.benchmark = True
writer = SummaryWriter()
parser = argparse.ArgumentParser()
parser.add_argument('--ckpt_load', type=bool, default=False, help='whether load checkpoint or not')
parser.add_argument('--snapshot', type=int, default=None, help='load checkpoint number')
parser.add_argument('--epoch', type=int, default=40, help='epoch number')
parser.add_argument('--lr', type=float, default=1e-4, help='learning rate')
parser.add_argument('--batchsize', type=int, default=10, help='training batch size')
parser.add_argument('--trainsize', type=int, default=352, help='training dataset size')
parser.add_argument('--clip', type=float, default=0.5, help='gradient clipping margin')
parser.add_argument('--decay_rate', type=float, default=0.1, help='decay rate of learning rate')
parser.add_argument('--decay_epoch', type=int, default=200, help='every n epochs decay learning rate')
opt = parser.parse_args()
image_root = '../Dataset/train_data/images/'
depth_root = '../Dataset/train_data/depth/'
gt_root = '../Dataset/train_data/fake_mask/'
# GT is generated by CDCP [1] method that does not require any human annotation efforts.
# [1] An Innovative Salient Object Detection using Center-dark Channel Prior, ICCVW, 2017.
val_root = '../Dataset/test_data/'
validation = ['NJUD']
# build models
# Both Saliency and Depth Networks employ the CPD [2] extractor equipped with ResNet-50.
# [2] Cascaded Partial Decoder for Fast and Accurate Salient Object Detection, CVPR, 2019.
model_rgb = CPD_ResNet()
model_depth = CPD_ResNet()
model = Sal_CNN()
if opt.ckpt_load:
model_rgb.load_state_dict(torch.load('./ckpt/' + 'DSU_rgb.pth.' + str(opt.snapshot)))
model_depth.load_state_dict(torch.load('./ckpt/' + 'DSU_depth.pth.' + str(opt.snapshot)))
model.load_state_dict(torch.load('./ckpt/' + 'DSU.pth.' + str(opt.snapshot)))
cuda = torch.cuda.is_available()
if cuda:
model_rgb.cuda()
model_depth.cuda()
model.cuda()
params_rgb = model_rgb.parameters()
params_depth = model_depth.parameters()
params = model.parameters()
optimizer_rgb = torch.optim.Adam(params_rgb, opt.lr)
optimizer_depth = torch.optim.Adam(params_depth, opt.lr)
optimizer = torch.optim.Adam(params, opt.lr)
train_loader = get_loader(image_root, gt_root, depth_root, batchsize=opt.batchsize, trainsize=opt.trainsize)
total_step = len(train_loader)
CE = torch.nn.BCEWithLogitsLoss()
MSE = torch.nn.MSELoss()
Distance = ContrastiveLoss()
def train(train_loader, model_rgb, model_depth, model,
optimizer_rgb, optimizer_depth,optimizer, epoch):
model_rgb.train()
model_depth.train()
model.train()
for i, pack in enumerate(tqdm(train_loader), start=1):
iteration = i + epoch*len(train_loader)
optimizer_rgb.zero_grad()
optimizer_depth.zero_grad()
optimizer.zero_grad()
images, gts, depths,ppath,ori_data = pack
images = Variable(images)
gts = Variable(gts)
depths = Variable(depths)
ori_data = [Variable(i) for i in ori_data]
if cuda:
images = images.cuda()
gts = gts.cuda()
depths = depths.cuda()
ori_data = [i.cuda() for i in ori_data]
'''~~~Our DSU Framework~~~'''
# RGB Stream
'''Attentive Training Strategy'''
atts_rgb,dets_rgb,_= model_rgb(images)
pred_sal = dets_rgb.detach()
# The update interval τ is 3, amounting to 2τ = 6 epochs in a training round.
if (epoch + 1) % 6 <= 3 and (epoch + 1) % 6 > 0: # Step One
loss_rgb1 = CE(atts_rgb, gts)
loss_rgb2 = CE(dets_rgb, gts)
loss_rgb = (loss_rgb1 + loss_rgb2) / 2.0
else: # Step Two
weight, _ = loss_weight(dets_rgb, gts)
Weighted_CE = torch.nn.BCEWithLogitsLoss(weight=weight)
loss_rgb1 = Weighted_CE(atts_rgb, gts)
loss_rgb2 = Weighted_CE(dets_rgb, gts)
loss_rgb = (loss_rgb1 + loss_rgb2) / 2.0
loss_rgb.backward()
clip_gradient(optimizer_rgb, opt.clip)
optimizer_rgb.step()
# Depth Stream
atts_depth,dets_depth,feature = model_depth(images)
loss_depth1 = MSE(atts_depth, depths)
loss_depth2 = MSE(dets_depth, depths)
loss_depth = (loss_depth1 + loss_depth2) / 2.0
loss_depth.backward()
clip_gradient(optimizer_depth, opt.clip)
optimizer_depth.step()
# Fusion stream
old_feature = feature.detach()
S_dep, Non_S_dep, new_feature, depth_pos, depth_neg, pred_depth = model(pred_sal,depths,old_feature)
loss_Sal_depth = MSE(S_dep,depth_pos)
loss_NonSal_depth = MSE(Non_S_dep,depth_neg)
loss_depth_new = MSE(pred_depth,depths)
loss_consistency = Distance(old_feature,new_feature)/50
loss = (loss_Sal_depth + loss_NonSal_depth + loss_depth_new + loss_consistency)/4.0
loss.backward()
clip_gradient(optimizer, opt.clip)
optimizer.step()
if (epoch + 1) % 6 == 0:
'''Update pseudo Label'''
# Note that: we need to obtain original data with no augmentation to replace the fake label
with torch.no_grad():
if hasattr(torch.cuda, 'empty_cache'):
torch.cuda.empty_cache()
img_ori,gt_ori,depth_ori = ori_data
_, det_rgb, _ = model_rgb(img_ori)
pred_saliency = det_rgb.detach()
_, _, feature1 = model_depth(img_ori)
old_feature1 = feature1.detach()
S_dep1, Non_S_dep1, _, _, _, _ = model(pred_saliency, depth_ori, old_feature1)
S_depth1 = S_dep1.detach()
Non_S_depth1 = Non_S_dep1.detach()
_, l_weight = loss_weight(pred_saliency, gt_ori)
update_pseudoLabel(l_weight,ppath,S_depth1,Non_S_depth1,pred_saliency,int(epoch+1))
'''~~~END~~~'''
if i % 400 == 0 or i == total_step:
print('{} Epoch [{:03d}/{:03d}], Step [{:04d}/{:04d}], Loss_rgb: {:.4f} Loss_depth_sal: {:0.4f}'.
format(datetime.now(), epoch, opt.epoch, i, total_step, loss_rgb.data, loss.data))
writer.add_scalar('Loss/rgb', loss_rgb.item(), iteration)
writer.add_scalar('Loss/depth', loss_depth.item(), iteration)
writer.add_scalar('Loss/Sal_depth', loss.item(), iteration)
save_path = 'ckpt/'
if not os.path.exists(save_path):
os.makedirs(save_path)
if (epoch+1) % 2 == 0:
torch.save(model_rgb.state_dict(), save_path + 'DSU_rgb.pth' + '.%d' % (epoch+1))
torch.save(model_depth.state_dict(), save_path + 'DSU_depth.pth' + '.%d' % (epoch + 1))
torch.save(model.state_dict(), save_path + 'DSU.pth' + '.%d' % (epoch + 1))
print("Let's go!")
for epoch in range(1, opt.epoch):
adjust_lr(optimizer_rgb, opt.lr, epoch, opt.decay_rate, opt.decay_epoch)
adjust_lr(optimizer_depth, opt.lr, epoch, opt.decay_rate, opt.decay_epoch)
adjust_lr(optimizer, opt.lr, epoch, opt.decay_rate, opt.decay_epoch)
train(train_loader, model_rgb, model_depth, model,
optimizer_rgb, optimizer_depth,optimizer, epoch)
if (epoch+1) % 2 == 0:
ckpt_name = '.' + str(epoch+1)
eval_data(val_root, validation,ckpt_name)
if epoch >= opt.epoch -1:
writer.close()
| StarcoderdataPython |
3353750 | from unicorn.arm_const import *
from .. import native
def parse_fuzzed_irqs(entries):
allowed_irqs = set()
print("Parsing fuzzable irq configuration")
for name, int_or_range_string in entries.items():
# print("[PARSE FUZZED IRQs] Looking at entry: {} -> {}".format(name, int_or_range_string))
if isinstance(int_or_range_string, int):
allowed_irqs.add(int_or_range_string)
elif isinstance(int_or_range_string, str):
for entry in int_or_range_string.replace(" ", "").replace("\t", "").split(","):
if "-" in entry:
start, end = map(int, entry.split("-"))
print("Adding fuzzable irq range: {}-{}".format(start, end))
assert(start <= end)
for i in range(start, end + 1):
allowed_irqs.add(i)
else:
print("Adding fuzzable irq entry: {}".format(entry))
allowed_irqs.add(int(entry))
else:
assert(False)
return allowed_irqs
def get_fuzz(size):
"""
Gets at most 'size' bytes from the fuzz pool.
If we run out of fuzz, something will happen (e.g., exit)
:param size:
:return:
"""
return native.get_fuzz(size)
def fuzz_remaining():
return native.fuzz_remaining()
def load_fuzz(file_path):
native.load_fuzz(file_path)
def return_fuzz_byte(uc):
global fuzz
c = get_fuzz(1)
# TODO: This is not generic, add archinfo here to find the ret regs
uc.reg_write(UC_ARM_REG_R0, ord(c))
| StarcoderdataPython |
118266 | <reponame>rzuckerm/pylama
""" Support libs. """
| StarcoderdataPython |
1635566 | # Generated by Django 2.2.19 on 2021-05-11 13:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("wagtailcore", "0060_fix_workflow_unique_constraint"),
("feedback", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="FeedbackComment",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created", models.DateTimeField(auto_now_add=True)),
(
"action",
models.CharField(
max_length=500, verbose_name="What where you doing?"
),
),
(
"issue",
models.CharField(max_length=500, verbose_name="What went wrong?"),
),
(
"page",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="feedback_comments",
to="wagtailcore.Page",
),
),
],
),
]
| StarcoderdataPython |
1601840 | <filename>taxa_de_servico.py
nome=input("Informe seu nome: ")
diaria=int(input("Quantos dias você ficou? "))
conta = diaria * 60
if diaria > 15:
taxa = diaria * 5.5
elif diaria == 15:
taxa = diaria * 6
else:
taxa = diaria * 8
conta = (diaria * 60) + taxa
print(nome, "Total da conta", conta)
| StarcoderdataPython |
191948 | <filename>HTML-Swapper/GUI/RuleWidgetTwoValues.py
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'D:\Programming\HTML-Swapper\RuleWidgetTwoValues.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(350, 24)
self.horizontalLayout = QtWidgets.QHBoxLayout(Form)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.ruleTypeDropdown = QtWidgets.QComboBox(Form)
self.ruleTypeDropdown.setObjectName("ruleTypeDropdown")
self.horizontalLayout.addWidget(self.ruleTypeDropdown)
self.line = QtWidgets.QFrame(Form)
self.line.setFrameShape(QtWidgets.QFrame.VLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.horizontalLayout.addWidget(self.line)
self.attributeLineEdit = QtWidgets.QLineEdit(Form)
self.attributeLineEdit.setObjectName("attributeLineEdit")
self.horizontalLayout.addWidget(self.attributeLineEdit)
self.attributeValueLineEdit = QtWidgets.QLineEdit(Form)
self.attributeValueLineEdit.setObjectName("attributeValueLineEdit")
self.horizontalLayout.addWidget(self.attributeValueLineEdit)
self.horizontalLayout.setStretch(0, 3)
self.horizontalLayout.setStretch(2, 3)
self.horizontalLayout.setStretch(3, 3)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
| StarcoderdataPython |
129894 | <filename>svhn 64x64/unpacker.py
import h5py
class GetBoundingBoxes:
def __init__(self, inf):
self.inf = h5py.File(inf, 'r')
self.digitStructName = self.inf['digitStruct']['name']
self.digitStructBbox = self.inf['digitStruct']['bbox']
def get_name(self, n):
return ''.join([chr(c[0]) for c in self.inf[self.digitStructName[n][0]].value])
def get_attribute(self, attr):
if (len(attr) > 1):
attr = [self.inf[attr.value[j].item()].value[0][0] for j in range(len(attr))]
else:
attr = [attr.value[0][0]]
return attr
def get_bbox(self, n):
bbox = {}
bb = self.digitStructBbox[n].item()
bbox['height'] = self.get_attribute(self.inf[bb]["height"])
bbox['label'] = self.get_attribute(self.inf[bb]["label"])
bbox['left'] = self.get_attribute(self.inf[bb]["left"])
bbox['top'] = self.get_attribute(self.inf[bb]["top"])
bbox['width'] = self.get_attribute(self.inf[bb]["width"])
return bbox
def get_item(self, n):
s = self.get_bbox(n)
s['name'] = self.get_name(n)
return s
def unpack(self):
return [self.get_item(i) for i in range(len(self.digitStructName))]
def unpack_all(self):
pictDat = self.unpack()
result = []
structCnt = 1
for i in range(len(pictDat)):
item = {'filename': pictDat[i]["name"]}
figures = []
for j in range(len(pictDat[i]['height'])):
figure = {}
figure['height'] = pictDat[i]['height'][j]
figure['label'] = pictDat[i]['label'][j]
figure['left'] = pictDat[i]['left'][j]
figure['top'] = pictDat[i]['top'][j]
figure['width'] = pictDat[i]['width'][j]
figures.append(figure)
structCnt = structCnt + 1
item['boxes'] = figures
result.append(item)
return result
| StarcoderdataPython |
43681 | <reponame>hth945/pytest<filename>paddle/za/test/test2.py
# 导入图像读取第三方库
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import cv2
import numpy as np
from PIL import Image
import paddle
import paddle.fluid as fluid
from paddle.fluid.dygraph.nn import Linear
# 读取图像
img1 = cv2.imread('./work/example_0.png')
example = mpimg.imread('./work/example_0.png')
# 显示图像
plt.imshow(example)
plt.show()
im = Image.open('./work/example_0.png').convert('L')
print(np.array(im).shape)
im = im.resize((28, 28), Image.ANTIALIAS)
plt.imshow(im)
plt.show()
print(np.array(im).shape)
| StarcoderdataPython |
194719 | """Support alarm_control_panel entity for Xiaomi Miot."""
import logging
from homeassistant.const import * # noqa: F401
from homeassistant.components.alarm_control_panel import (
DOMAIN as ENTITY_DOMAIN,
AlarmControlPanelEntity,
)
from homeassistant.components.alarm_control_panel.const import *
from . import (
DOMAIN,
CONF_MODEL,
XIAOMI_CONFIG_SCHEMA as PLATFORM_SCHEMA, # noqa: F401
MiotEntity,
async_setup_config_entry,
bind_services_to_entries,
)
from .core.miot_spec import (
MiotSpec,
MiotService,
)
_LOGGER = logging.getLogger(__name__)
DATA_KEY = f'{ENTITY_DOMAIN}.{DOMAIN}'
SERVICE_TO_METHOD = {}
async def async_setup_entry(hass, config_entry, async_add_entities):
await async_setup_config_entry(hass, config_entry, async_setup_platform, async_add_entities, ENTITY_DOMAIN)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
hass.data.setdefault(DATA_KEY, {})
hass.data[DOMAIN]['add_entities'][ENTITY_DOMAIN] = async_add_entities
model = str(config.get(CONF_MODEL) or '')
entities = []
miot = config.get('miot_type')
if miot:
spec = await MiotSpec.async_from_type(hass, miot)
for srv in spec.get_services('arming'):
if not srv.get_property('arming_mode'):
continue
entities.append(MiotAlarmEntity(config, srv))
for entity in entities:
hass.data[DOMAIN]['entities'][entity.unique_id] = entity
async_add_entities(entities, update_before_add=True)
bind_services_to_entries(hass, SERVICE_TO_METHOD)
class MiotAlarmEntity(MiotEntity, AlarmControlPanelEntity):
def __init__(self, config, miot_service: MiotService):
super().__init__(miot_service, config=config, logger=_LOGGER)
self._attr_code_arm_required = False
self._is_mgl03 = self._model == 'lumi.gateway.mgl03'
self._prop_mode = miot_service.get_property('arming_mode')
if self._prop_mode:
if self._prop_mode.list_value('home_arming') is not None:
self._supported_features |= SUPPORT_ALARM_ARM_HOME
if self._prop_mode.list_value('away_arming') is not None:
self._supported_features |= SUPPORT_ALARM_ARM_AWAY
if self._prop_mode.list_value('sleep_arming') is not None:
self._supported_features |= SUPPORT_ALARM_ARM_NIGHT
if self._is_mgl03:
self._supported_features |= SUPPORT_ALARM_TRIGGER
@property
def state(self):
"""Return the state of the entity."""
return self._attr_state
async def async_update(self):
await super().async_update()
if not self._available:
return
self.update_state()
def update_state(self):
if self._prop_mode:
val = self._prop_mode.from_dict(self._state_attrs)
des = self._prop_mode.list_description(val) if val is not None else None
if des is not None:
des = f'{des}'.lower()
if 'basic' in des:
self._attr_state = STATE_ALARM_DISARMED
elif 'home' in des:
self._attr_state = STATE_ALARM_ARMED_HOME
elif 'away' in des:
self._attr_state = STATE_ALARM_ARMED_AWAY
elif 'sleep' in des:
self._attr_state = STATE_ALARM_ARMED_NIGHT
if self._is_mgl03:
val = self._state_attrs.get('arming.alarm')
if val:
self._attr_state = STATE_ALARM_TRIGGERED
return self._attr_state
def set_arm_mode(self, mode):
ret = False
val = self._prop_mode.list_value(mode)
if val is not None:
ret = self.set_property(self._prop_mode, val)
if ret:
self.update_state()
return ret
def alarm_disarm(self, code=None):
"""Send disarm command."""
return self.set_arm_mode('basic_arming')
def alarm_arm_home(self, code=None):
"""Send arm home command."""
return self.set_arm_mode('home_arming')
def alarm_arm_away(self, code=None):
"""Send arm away command."""
return self.set_arm_mode('away_arming')
def alarm_arm_night(self, code=None):
"""Send arm night command."""
return self.set_arm_mode('sleep_arming')
def alarm_arm_vacation(self, code=None):
"""Send arm vacation command."""
raise NotImplementedError()
def alarm_trigger(self, code=None):
"""Send alarm trigger command."""
if self._is_mgl03:
return self.set_miot_property(3, 22, 1)
raise NotImplementedError()
def alarm_arm_custom_bypass(self, code=None):
"""Send arm custom bypass command."""
raise NotImplementedError()
| StarcoderdataPython |
3389913 | <reponame>cypherdotXd/o3de
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
# Tests the Python API from DisplaySettingsPythonFuncs.cpp while the Editor is running
import azlmbr.bus as bus
import azlmbr.editor as editor
import azlmbr.display_settings as display_settings
# Retrieve current settings
existingState = display_settings.DisplaySettingsBus(bus.Broadcast, 'GetSettingsState')
# Alter settings
alteredState = azlmbr.object.create('DisplaySettingsState')
alteredState.no_collision = False
alteredState.no_labels = False
alteredState.simulate = True
alteredState.hide_tracks = False
alteredState.hide_links = False
alteredState.hide_helpers = False
alteredState.show_dimension_figures = True
# Set altered settings
display_settings.DisplaySettingsBus(bus.Broadcast, 'SetSettingsState', alteredState)
# Get settings again
newState = display_settings.DisplaySettingsBus(bus.Broadcast, 'GetSettingsState')
# Check if the setter worked
if alteredState.no_collision == newState.no_collision and \
alteredState.no_labels == newState.no_labels and \
alteredState.simulate == newState.simulate and \
alteredState.hide_tracks == newState.hide_tracks and \
alteredState.hide_links == newState.hide_links and \
alteredState.hide_helpers == newState.hide_helpers and \
alteredState.show_dimension_figures == newState.show_dimension_figures:
print("display settings were changed correctly")
# Restore previous settings
display_settings.DisplaySettingsBus(bus.Broadcast, 'SetSettingsState', existingState)
editor.EditorToolsApplicationRequestBus(bus.Broadcast, 'ExitNoPrompt')
| StarcoderdataPython |
3206581 | <gh_stars>0
from coopihc.observation.BaseObservationEngine import BaseObservationEngine
import copy
class CascadedObservationEngine(BaseObservationEngine):
"""CascadedObservationEngine
Cascades (serially) several observation engines.
Gamestate --> Engine1 --> Engine2 --> ... --> EngineN --> Observation
:param engine_list: list of observation engines
:type engine_list: list(:py:mod:`Observation Engine<coopihc.observation>`)
"""
def __init__(self, engine_list, *args, **kwargs):
super().__init__(*args, **kwargs)
self.engine_list = engine_list
def __content__(self):
"""__content__
Custom class repr
:return: custom repr
:rtype: dictionnary
"""
return {
self.__class__.__name__: {
"Engine{}".format(ni): i.__content__()
for ni, i in enumerate(self.engine_list)
}
}
def observe(self, game_state):
"""observe
Serial observations (i.e. output of an engine becomes input of the next one)
:param game_state: game state
:type game_state: `State<coopihc.base.State.State`
:return: (observation, obs reward)
:rtype: tuple(`State<coopihc.base.State.State`, float)
"""
game_state = copy.deepcopy(game_state)
rewards = 0
for engine in self.engine_list:
new_obs, new_reward = engine.observe(game_state)
game_state.update(new_obs)
rewards += new_reward
return game_state, rewards
| StarcoderdataPython |
4839286 | from bai_kafka_utils.events import (
BenchmarkDoc,
VisitedService,
FetcherBenchmarkEvent,
DownloadableContent,
FetcherPayload,
FileSystemObject,
)
BIG_FETCHER_JSON = """{
"date": "Thu May 02 16:15:42 UTC 2019",
"authenticated": false,
"payload": {
"toml": {
"descriptor_filename": "example_descriptor2.toml",
"sha1": "be60cb85620fa041c1bfabd9a9b1c8c1d6be1c78",
"doc": "IyBCZW5jaG1hcYS90Zi1pbWFnZW5ldC8iCg==",
"verified": true,
"contents": {
"spec_version": "0.1.0",
"data": {
"sources": [
{
"path": "~/data/tf-imagenet/",
"src": "s3://bucket/imagenet/train"
},
{
"path": "~/data/tf-imagenet/",
"src": "s3://bucket/imagenet/validation"
}
],
"id": "imagenet"
},
"env": {
"privileged": false,
"extended_shm": true,
"docker_image": "user/repo:tag"
},
"info": {
"task_name": "Example benchmark",
"scheduling": "single_run",
"description": " Full job description."
},
"hardware": {
"distributed": {
"num_instances": 3
},
"strategy": "horovod",
"instance_type": "p3.8xlarge"
},
"ml": {
"args": "--model=resnet50_v2 --batch-size=32",
"benchmark_code": "python /root/train.sh"
}
}
},
"datasets": [
{
"src": "s3://bucket/imagenet/train",
"path": "~/data/tf-imagenet/"
},
{
"src": "s3://bucket/imagenet/validation",
"path": "~/data/tf-imagenet/"
}
],
"models": [
{
"src": "s3://bucket/model/inception",
"path": "/models/inception",
"md5": "5d41402abc4b2a76b9719d911017c592"
},
{
"src": "s3://bucket/models/mnist",
"path": "/models/mnist"
}
],
"scripts": [
{
"dst": "s3://script-exchange/foo.tar"
}
]
},
"tstamp": 1556814924121,
"client_username": "bellgav",
"action_id": "ffea52eb-c24b-4dd0-b32e-61230db34ad5",
"visited": [
{
"svc": "baictl-client",
"tstamp": "@@TSTAMP@@",
"version": "0.1.0-481dad2"
},
{
"svc": "bai-bff",
"tstamp": 1556814924121,
"version": "0.0.2"
}
],
"message_id": "007bd9f8-f564-4edb-bb48-7380ee562ffc",
"client_sha1": "c05467317b6765535f1ec60f0aee812d39b35dd2",
"client_id": "97e7eb322342626974fb171fc5793514b0aea789",
"client_version": "0.1.0-481dad2",
"type": "BAI_APP_BFF"
}"""
EXPECTED_FETCHER_CONTENTS = {
"spec_version": "0.1.0",
"data": {
"sources": [
{"path": "~/data/tf-imagenet/", "src": "s3://bucket/imagenet/train"},
{"path": "~/data/tf-imagenet/", "src": "s3://bucket/imagenet/validation"},
],
"id": "imagenet",
},
"env": {"privileged": False, "extended_shm": True, "docker_image": "user/repo:tag"},
"info": {"task_name": "Example benchmark", "scheduling": "single_run", "description": " Full job description."},
"hardware": {"distributed": {"num_instances": 3}, "strategy": "horovod", "instance_type": "p3.8xlarge"},
"ml": {"args": "--model=resnet50_v2 --batch-size=32", "benchmark_code": "python /root/train.sh"},
}
EXPECTED_FETCHER_DOC = BenchmarkDoc(
doc="IyBCZW5jaG1hcYS90Zi1pbWFnZW5ldC8iCg==",
sha1="be60cb85620fa041c1bfabd9a9b1c8c1d6be1c78",
contents=EXPECTED_FETCHER_CONTENTS,
verified=True,
descriptor_filename="example_descriptor2.toml",
)
EXPECTED_FETCHER_VISITED = [
VisitedService(svc="baictl-client", tstamp="@@TSTAMP@@", version="0.1.0-481dad2"),
VisitedService(svc="bai-bff", tstamp=1556814924121, version="0.0.2"),
]
EXPECTED_FETCHER_DATASETS = [
DownloadableContent("s3://bucket/imagenet/train", path="~/data/tf-imagenet/"),
DownloadableContent("s3://bucket/imagenet/validation", path="~/data/tf-imagenet/"),
]
EXPECTED_FETCHER_SCRIPTS = [FileSystemObject(dst="s3://script-exchange/foo.tar")]
EXPECTED_FETCHER_MODELS = [
DownloadableContent(
"s3://bucket/model/inception", path="/models/inception", md5="5d41402abc4b2a76b9719d911017c592"
),
DownloadableContent("s3://bucket/models/mnist", path="/models/mnist"),
]
EXPECTED_FETCHER_EVENT = FetcherBenchmarkEvent(
action_id="ffea52eb-c24b-4dd0-b32e-61230db34ad5",
message_id="007bd9f8-f564-4edb-bb48-7380ee562ffc",
client_id="97e7eb322342626974fb171fc5793514b0aea789",
client_version="0.1.0-481dad2",
client_username="bellgav",
authenticated=False,
tstamp=1556814924121,
visited=EXPECTED_FETCHER_VISITED,
type="BAI_APP_BFF",
payload=FetcherPayload(
datasets=EXPECTED_FETCHER_DATASETS,
scripts=EXPECTED_FETCHER_SCRIPTS,
models=EXPECTED_FETCHER_MODELS,
toml=EXPECTED_FETCHER_DOC,
),
)
def test_big_fetcher_json():
event = FetcherBenchmarkEvent.from_json(BIG_FETCHER_JSON)
print(event)
print(EXPECTED_FETCHER_EVENT)
assert event == EXPECTED_FETCHER_EVENT
| StarcoderdataPython |
54019 | <filename>cannula/helpers.py
import os
import pkgutil
import sys
def get_root_path(import_name):
"""Returns the path to a package or cwd if that cannot be found.
Inspired by [flask](https://github.com/pallets/flask/blob/master/flask/helpers.py)
"""
# Module already imported and has a file attribute. Use that first.
mod = sys.modules.get(import_name)
if mod is not None and hasattr(mod, '__file__'):
return os.path.dirname(os.path.abspath(mod.__file__))
# Next attempt: check the loader.
loader = pkgutil.get_loader(import_name)
# Loader does not exist or we're referring to an unloaded main module
# or a main module without path (interactive sessions), go with the
# current working directory.
if loader is None or import_name == '__main__':
return os.getcwd()
filepath = loader.get_filename(import_name)
return os.path.dirname(os.path.abspath(filepath))
| StarcoderdataPython |
1676914 | <filename>ucsmsdk/mometa/extmgmt/ExtmgmtGatewayPing.py
"""This module contains the general information for ExtmgmtGatewayPing ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class ExtmgmtGatewayPingConsts:
pass
class ExtmgmtGatewayPing(ManagedObject):
"""This is ExtmgmtGatewayPing class."""
consts = ExtmgmtGatewayPingConsts()
naming_props = set([])
mo_meta = MoMeta("ExtmgmtGatewayPing", "extmgmtGatewayPing", "gw-ping-policy", VersionMeta.Version141i, "InputOutput", 0x7f, [], ["admin", "ext-lan-config"], ['extmgmtIfMonPolicy'], [], ["Get"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version141i, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"max_deadline_timeout": MoPropertyMeta("max_deadline_timeout", "maxDeadlineTimeout", "uint", VersionMeta.Version141i, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, [], ["5-15"]),
"number_of_ping_requests": MoPropertyMeta("number_of_ping_requests", "numberOfPingRequests", "uint", VersionMeta.Version141i, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, [], ["1-5"]),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version141i, MoPropertyMeta.READ_ONLY, 0x20, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version141i, MoPropertyMeta.READ_WRITE, 0x40, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"maxDeadlineTimeout": "max_deadline_timeout",
"numberOfPingRequests": "number_of_ping_requests",
"rn": "rn",
"sacl": "sacl",
"status": "status",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.max_deadline_timeout = None
self.number_of_ping_requests = None
self.sacl = None
self.status = None
ManagedObject.__init__(self, "ExtmgmtGatewayPing", parent_mo_or_dn, **kwargs)
| StarcoderdataPython |
100336 | #!/usr/bin/env python
from scapy.all import *
import sys
import argparse
import math
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("remoteIP", help="Remote IP")
parser.add_argument("localIP", help="Local IP")
parser.add_argument("-p", "--protocol", type=str, default='udp',
help="Layer 4 protocol", choices=['icmp', 'tcp', 'udp'])
parser.add_argument("-o", "--order", type=str, default="1023",
help="Order of the fragments, index starting with 0")
parser.add_argument("-d", "--data", type=str, default="Z"*20,
help="Payload")
parser.add_argument("-f", "--fragsize", type=int, default=8,
help="Fragment Size")
parser.add_argument("-s", "--size", type=int, default=0,
help="Payload Size")
parser.add_argument("-t", "--tcp_syn", action='store_true',
help="Send TCP SYN")
parser.add_argument("-l", "--overlap", action='store_true',
help="Send overlapping fragments")
parser.add_argument("-c", "--duplicate", type=int, default=0,
help="identical/duplicate fragments count")
return parser.parse_args(sys.argv[1:])
args = parse_args()
METHOD_MAP = {
'icmp': 'ICMP()',
'tcp': 'TCP()',
'udp': 'UDP()'
}
HEADER_SIZE = {
'icmp': 8,
'tcp': 20,
'udp': 8,
}
if args.size:
payload = "Z" * args.size
else:
payload = args.data
no_of_frags = int(math.ceil((HEADER_SIZE[args.protocol] + len(payload))/float(args.fragsize)))
os.system(
'iptables -A OUTPUT -p tcp --tcp-flags RST RST -s %s -j DROP' %
args.localIP)
id = random.randint(0, 65535)
print "Using IP id %s" % (id)
if args.overlap:
payload1 = "This is "
payload2 = "overlapp"
payload3 = "ing frag"
proto = 1
ip=IP(id=id, dst=args.remoteIP, proto=proto, flags=1)
icmp = ICMP(type=8, code=0, chksum=0xe3eb)
packet=ip/icmp/payload1
send(packet)
ip = IP(id=id, dst=args.remoteIP, proto=proto, flags=1, frag=1)
packet = ip/payload2
send(packet)
ip = IP(id=id, dst=args.remoteIP, proto=proto, flags=0, frag=2)
packet = ip/payload3
send(packet)
exit()
ip=IP(id=id, dst=args.remoteIP)
proto = eval(METHOD_MAP[args.protocol])
if args.tcp_syn:
# Create SYN packet
packet=ip/TCP(sport=8100, dport=8000, flags="S", seq=42)/(payload)
else:
packet=ip/proto/(payload)
frag = fragment(packet, fragsize=args.fragsize)
if len(frag) != no_of_frags:
print "Failure:No. of fragments mismatch for packet length %s, from scapy:%s, expected:%s" % (
len(packet), len(frag), no_of_frags)
exit()
#Send the same fragments args.duplicate times
for i in xrange(args.duplicate):
send(frag[int(args.order[0])])
for c in args.order:
send(frag[int(c)])
print "==================================================="
print "Sent fragment:"
frag[int(c)].show()
#Send the remaining fragments if any
if len(frag) > len(args.order):
for i in range(len(args.order),len(frag)):
send(frag[i])
| StarcoderdataPython |
1681437 | <gh_stars>1-10
from PIL import Image
import matplotlib.pyplot as plt
import nibabel as nib
import numpy as np
from skimage.exposure import equalize_hist
from skimage.transform import resize
import torch
from torchvision import transforms
def plot(image, f=None):
plt.axis("off")
plt.imshow(image, cmap="gray", vmin=0., vmax=1.)
if f is None:
plt.show()
else:
plt.savefig(f, bbox_inches='tight', pad_inches=0)
def volume_viewer(volume, initial_position=None, slices_first=True):
"""Plot a volume of shape [x, y, slices]
Useful for MR and CT image volumes
Args:
volume (torch.Tensor or np.ndarray): With shape [slices, h, w]
initial_position (list or tuple of len 3): (Optional)
slices_first (bool): If slices are first or last dimension in volume
"""
def remove_keymap_conflicts(new_keys_set):
for prop in plt.rcParams:
if prop.startswith('keymap.'):
keys = plt.rcParams[prop]
remove_list = set(keys) & new_keys_set
for key in remove_list:
keys.remove(key)
def previous_slice(ax):
volume = ax.volume
d = volume.shape[0]
ax.index = (ax.index + 1) % d
ax.images[0].set_array(volume[ax.index])
ax.texts.pop()
ax.text(5, 15, f"Slice: {d - ax.index}", color="white")
def next_slice(ax):
volume = ax.volume
d = volume.shape[0]
ax.index = (ax.index - 1) % d
ax.images[0].set_array(volume[ax.index])
ax.texts.pop()
ax.text(5, 15, f"Slice: {d - ax.index}", color="white")
def process_key(event):
fig = event.canvas.figure
# Move axial (slices)
if event.key == 'k':
next_slice(fig.axes[0])
elif event.key == 'j':
previous_slice(fig.axes[0])
# Move coronal (h)
elif event.key == 'u':
previous_slice(fig.axes[1])
elif event.key == 'i':
next_slice(fig.axes[1])
# Move saggital (w)
elif event.key == 'h':
previous_slice(fig.axes[2])
elif event.key == 'l':
next_slice(fig.axes[2])
fig.canvas.draw()
def prepare_volume(volume, slices_first):
# Convert to numpy
if isinstance(volume, torch.Tensor):
volume = volume.numpy()
# Omit batch dimension
if volume.ndim == 4:
volume = volume[0]
# If image is not loaded with slices_first, put slices dimension first
if not slices_first:
volume = np.moveaxis(volume, 2, 0)
# Pad slices
if volume.shape[0] < volume.shape[1]:
pad_size = (volume.shape[1] - volume.shape[0]) // 2
pad = [(0, 0)] * volume.ndim
pad[0] = (pad_size, pad_size)
volume = np.pad(volume, pad)
# Flip directions for display
volume = np.flip(volume, (0, 1, 2))
return volume
def plot_ax(ax, volume, index, title):
ax.volume = volume
shape = ax.volume.shape
d = shape[0]
ax.index = d - index
aspect = shape[2] / shape[1]
ax.imshow(ax.volume[ax.index], aspect=aspect, vmin=0., vmax=1.)
ax.set_title(title)
ax.text(5, 15, f"Slice: {d - ax.index}", color="white")
plt.rcParams['image.cmap'] = 'gray'
plt.rcParams['image.interpolation'] = 'nearest'
remove_keymap_conflicts({'h', 'j', 'k', 'l'})
volume = prepare_volume(volume, slices_first)
if initial_position is None:
initial_position = torch.tensor(volume.shape) // 2
# Volume shape [slices, h, w]
fig, ax = plt.subplots(1, 3, figsize=(12, 4))
plot_ax(ax[0], np.transpose(volume, (0, 2, 1)), initial_position[2],
"axial") # axial [slices, h, w]
plot_ax(ax[1], np.transpose(volume, (2, 0, 1)), initial_position[1],
"coronal") # saggital [h, slices, w]
plot_ax(ax[2], np.transpose(volume, (1, 0, 2)), initial_position[0],
"sagittal") # coronal [w, slices, h]
fig.canvas.mpl_connect('key_press_event', process_key)
print("Plotting volume, navigate:"
"\naxial with 'j', 'k'"
"\ncoronal with 'u', 'i'"
"\nsaggital with 'h', 'l'")
plt.show()
def write_txt(path: str, msg: str) -> None:
with open(path, "w") as f:
f.write(msg)
def load_nii(path: str, size: int = None, primary_axis: int = 0,
dtype: str = "float32"):
"""Load a neuroimaging file with nibabel, [w, h, slices]
https://nipy.org/nibabel/reference/nibabel.html
Args:
path (str): Path to nii file
size (int): Optional. Output size for h and w. Only supports rectangles
primary_axis (int): Primary axis (the one to slice along, usually 2)
dtype (str): Numpy datatype
Returns:
volume (np.ndarray): Of shape [w, h, slices]
affine (np.ndarray): Affine coordinates (rotation and translation),
shape [4, 4]
"""
# Load file
data = nib.load(path, keep_file_open=False)
volume = data.get_fdata(caching='unchanged') # [w, h, slices]
affine = data.affine
# Squeeze optional 4th dimension
if volume.ndim == 4:
volume = volume.squeeze(-1)
# Resize if size is given and if necessary
if size is not None and (volume.shape[0] != size or volume.shape[1] != size):
volume = resize(volume, [size, size, size])
# Convert
volume = volume.astype(np.dtype(dtype))
# Move primary axis to first dimension
volume = np.moveaxis(volume, primary_axis, 0)
return volume, affine
def save_nii(path: str, volume: np.ndarray, affine: np.ndarray = None,
dtype: str = "float32", primary_axis: int = 0) -> None:
"""Save a neuroimaging file (.nii) with nibabel
https://nipy.org/nibabel/reference/nibabel.html
Args:
path (str): Path to save file at
volume (np.ndarray): Image as numpy array
affine (np.ndarray): Affine transformation that determines the
world-coordinates of the image elements
dtype (str): Numpy dtype of saved image
primary_axis (int): The primary axis. Needs to be put back in place
"""
if affine is None:
affine = np.eye(4)
volume = np.moveaxis(volume, 0, primary_axis)
nib.save(nib.Nifti1Image(volume.astype(dtype), affine), path)
def histogram_equalization(volume):
# Create equalization mask
mask = np.zeros_like(volume)
mask[volume > 0] = 1
# Equalize
dtype = volume.dtype
volume = equalize_hist(volume, nbins=256, mask=mask).astype(dtype)
# Assure that background still is 0
volume *= mask
return volume
def process_scan(path: str, size: int = None, equalize_hist: bool = False,
return_affine: bool = False) -> np.ndarray:
"""Load and pre-process a medical 3D scan
Args:
path (str): Path to file
size (int): Optional, spatial dimension (height / width)
equalize_hist (bool): Perform histogram equalization
return_affine (bool): Whether to return the affine transformation matrix
Returns:
volume (torch.Tensor): Loaded and pre-processed scan
affine (np.ndarray): Affine transformation matrix
"""
# Load
volume, affine = load_nii(path=path, size=size, primary_axis=2, dtype="float32")
# Pre-processing
if equalize_hist:
volume = histogram_equalization(volume)
if return_affine:
return volume, affine
else:
return volume
def load_segmentation(path: str, size: int = None, bin_threshold: float = 0.4):
"""Load a segmentation file
Args:
path (str): Path to file
size (int): Optional, spatial dimension (height / width)
bin_threshold (float): Optional, threshold at which a pixel belongs to
the segmentation
"""
# Load
segmentation, _ = load_nii(path, size=size, primary_axis=2, dtype='float32')
# Binarize
segmentation = np.where(
segmentation > bin_threshold, 1, 0).astype(np.short)
return segmentation
def load_image(path: str, img_size: int = None):
img = Image.open(path).convert("L")
if img_size is None:
return transforms.ToTensor()(img)
else:
return transforms.Compose([
transforms.Resize(img_size),
transforms.ToTensor()
])(img)
if __name__ == '__main__':
size = 256
# path = "/home/felix/datasets/MOOD/brain/test_label/pixel/00480_uniform_shift.nii.gz"
# path = "/home/felix/datasets/MOOD/abdom/test_label/pixel/00330_slice_shuffle.nii.gz"
# segmentation = load_segmentation(path, size=size)
# path = "/home/felix/datasets/MOOD/brain/test/00480_uniform_shift.nii.gz"
# path = "/home/felix/datasets/MOOD/abdom/test/00330_slice_shuffle.nii.gz"
path = "/home/felix/datasets/MOOD/brain/train/00000.nii.gz"
volume = process_scan(path, size=size, equalize_hist=False)
print(volume.shape)
volume_viewer(volume)
import IPython ; IPython.embed() ; exit(1)
| StarcoderdataPython |
1766504 | <filename>pycon_project/migrations/008_create_sessions_slots.py
def migrate():
from datetime import datetime
from django.db import connections
from review.models import ProposalResult, promote_proposal
from schedule.models import Slot, Session
accepted_proposals = ProposalResult.objects.filter(accepted=True)
accepted_proposals = accepted_proposals.order_by("proposal")
for result in accepted_proposals:
promote_proposal(result.proposal)
connections["default"].cursor().execute("SELECT setval('schedule_session_id_seq', (SELECT max(id) FROM schedule_session))")
wed_morn_start = datetime(2011, 3, 9, 9, 0) # 9AM Eastern
wed_morn_end = datetime(2011, 3, 9, 12, 0) # Noon Eastern
wed_after_start = datetime(2011, 3, 9, 14, 0) # 2PM Eastern
wed_after_end = datetime(2011, 3, 9, 17, 0) # 5PM Eastern
thu_morn_start = datetime(2011, 3, 10, 9, 0) # 9AM Eastern
thu_morn_end = datetime(2011, 3, 10, 12, 0) # Noon Eastern
thu_after_start = datetime(2011, 3, 10, 14, 0) # 2PM Eastern
thu_after_end = datetime(2011, 3, 10, 17, 0) # 5PM Eastern
slots = [
{
"start": wed_morn_start,
"end": wed_morn_end,
"titles": [
"Python 101",
"Pinax Solutions",
"web2py secrets",
"Scientific Python Tools not only for Scientists and Engineers",
"Distributed and Cloud computing with Python",
"Building your own tile server using OpenStreetMap",
"Advanced Python I"
]
},
{
"start": wed_after_start,
"end": wed_after_end,
"titles": [
"Google App Engine workshop",
"Python For Total Beginners Using \"Learn Python The Hard Way\"",
"Mining and Visualizing Data from the Social Web with Python",
"Advanced Python II",
"Packet Crafting with Python",
"Packaging, Documenting, and Distributing your Python Codebase",
"Geospatial Computation and Visualization Cooperative Lab",
]
},
{
"start": thu_morn_start,
"end": thu_morn_end,
"titles": [
"Hands on Beginning Python",
"Mastering Python 3 I/O",
"Creating GUI Applications in Python using Qt I",
"Python/Django deployment workshop",
"Applied machine learning in python with scikit-learn",
"Tutorial -- Doing Data Structures in Python",
"(Re-)Introduction to C for Pythonistas",
]
},
{
"start": thu_after_start,
"end": thu_after_end,
"titles": [
"Hands on Intermediate Python",
"Cooking with Python 3",
"Creating GUI Applications in Python using Qt II",
"Faster Python Programs through Optimization",
"Writing Python extensions in C",
"Deploying web applications to the cloud",
"Documenting Your Project With Sphinx",
]
}
]
for slot in slots:
s = Slot.objects.create(start=slot["start"], end=slot["end"])
for title in slot["titles"]:
try:
session = Session.objects.get(title=title, session_type=3)
session.slot = s
session.save()
print "Saved", title
except Session.DoesNotExist:
print "Missed", title
| StarcoderdataPython |
48607 | <reponame>Fragoso09/orstool-qgis
# -*- coding: utf-8 -*-
"""
/***************************************************************************
ORStools
A QGIS plugin
QGIS client to query openrouteservice
-------------------
begin : 2017-02-01
git sha : $Format:%H$
copyright : (C) 2017 by <NAME>
email : <EMAIL>
***************************************************************************/
This plugin provides access to the various APIs from OpenRouteService
(https://openrouteservice.org), developed and
maintained by GIScience team at University of Heidelberg, Germany. By using
this plugin you agree to the ORS terms of service
(https://openrouteservice.org/terms-of-service/).
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from itertools import product
from PyQt5.QtCore import QVariant
from qgis.core import (QgsPointXY,
QgsGeometry,
QgsFeature,
QgsFields,
QgsField)
from ORStools.utils import convert
def get_request_point_features(route_dict, row_by_row):
"""
Processes input point features depending on the layer to layer relation in directions settings
:param route_dict: all coordinates and ID field values of start and end point layers
:type route_dict: dict
:param row_by_row: Specifies whether row-by-row relation or all-by-all has been used.
:type row_by_row: str
:returns: tuple of coordinates and ID field value for each routing feature in route_dict
:rtype: tuple
"""
locations_list = list(product(route_dict['start']['geometries'],
route_dict['end']['geometries']))
values_list = list(product(route_dict['start']['values'],
route_dict['end']['values']))
# If row-by-row in two-layer mode, then only zip the locations
if row_by_row == 'Row-by-Row':
locations_list = list(zip(route_dict['start']['geometries'],
route_dict['end']['geometries']))
values_list = list(zip(route_dict['start']['values'],
route_dict['end']['values']))
for properties in zip(locations_list, values_list):
# Skip if first and last location are the same
if properties[0][0] == properties[0][-1]:
continue
coordinates = convert.build_coords(properties[0])
values = properties[1]
yield (coordinates, values)
def get_fields(from_type=QVariant.String, to_type=QVariant.String, from_name="FROM_ID", to_name="TO_ID", line=False):
"""
Builds output fields for directions response layer.
:param from_type: field type for 'FROM_ID' field
:type from_type: QVariant enum
:param to_type: field type for 'TO_ID' field
:type to_type: QVariant enum
:param from_name: field name for 'FROM_ID' field
:type from_name: str
:param to_name: field name for 'TO_ID' field
:type to_name: field name for 'TO_ID' field
:param line: Specifies whether the output feature is a line or a point
:type line: boolean
:returns: fields object to set attributes of output layer
:rtype: QgsFields
"""
fields = QgsFields()
fields.append(QgsField("DIST_KM", QVariant.Double))
fields.append(QgsField("DURATION_H", QVariant.Double))
fields.append(QgsField("PROFILE", QVariant.String))
fields.append(QgsField("PREF", QVariant.String))
fields.append(QgsField("AVOID_TYPE", QVariant.String))
fields.append(QgsField(from_name, from_type))
if not line:
fields.append(QgsField(to_name, to_type))
return fields
def get_output_feature(response, profile, preference, avoid=None, from_value=None, to_value=None, line=False):
"""
Build output feature based on response attributes.
:param response: API response object
:type response: dict
:param profile: Transportation mode being used
:type profile: str
:param preference: Cost being used, shortest or fastest.
:type preference: str
:param avoid: Avoidables being used.
:type avoid: str
:param from_value: value of 'FROM_ID' field
:type from_value: any
:param to_value: value of 'TO_ID' field
:type to_value: any
:param line: Specifies whether output feature is a line or a point
:type line: boolean
:returns: Ouput feature with attributes and geometry set.
:rtype: QgsFeature
"""
response_mini = response['features'][0]
feat = QgsFeature()
coordinates = response_mini['geometry']['coordinates']
distance = response_mini['properties']['summary'][0]['distance']
duration = response_mini['properties']['summary'][0]['duration']
qgis_coords = [QgsPointXY(x, y) for x, y in coordinates]
feat.setGeometry(QgsGeometry.fromPolylineXY(qgis_coords))
if line:
feat.setAttributes(["{0:.3f}".format(distance / 1000),
"{0:.3f}".format(duration / 3600),
profile,
preference,
avoid,
from_value
])
else:
feat.setAttributes(["{0:.3f}".format(distance / 1000),
"{0:.3f}".format(duration / 3600),
profile,
preference,
avoid,
from_value,
to_value
])
return feat
| StarcoderdataPython |
1659122 | from rest_framework import serializers
from stump.models import AppearanceType
class AppearanceTypeSerializer(serializers.ModelSerializer):
class Meta:
model = AppearanceType
fields = '__all__'
| StarcoderdataPython |
98787 | <reponame>denisrmp/hacker-rank<filename>hacker-rank/implementation/new_year_chaos.py
# https://www.hackerrank.com/challenges/new-year-chaos
from collections import deque
def new_year_chaos(n, q):
acc = 0
expect = list(range(1, n + 1))
while len(q):
iof = expect.index(q[0])
if iof > 2:
return 'Too chaotic'
else:
acc += iof
expect.remove(q[0])
q.popleft()
return acc
T = int(input())
for _ in range(T):
n = int(input())
q = deque((int(i) for i in input().split()), n)
print(new_year_chaos(n, q))
| StarcoderdataPython |
1649660 | import logging
import os
import re
import jinja2
from functools import total_ordering
import sqlalchemy as sa
from datadock.helpers import extract_type_annotations, extract_flag_comment, check_flag
logger = logging.getLogger(__name__)
@total_ordering
class Statement:
def __init__(self, path: str, default_source_url: str = None, scalarize = False):
self.path = path
self.filename = os.path.basename(self.path)
self.name = re.sub("^[0-9][0-9]_", "", re.sub(".sql$", "", self.filename))
self.default_source_url = default_source_url
self.scalarize = scalarize
# Populated at runtime when run() is called
self.sql = None
self.source_url = None
def __repr__(self):
return f"{self.__class__.__name__}({self.path=}, {self.filename=}, {self.name=})"
def __str__(self):
return self.__repr__()
def __call__(self, **kwargs):
return self.run(**kwargs)
def __gt__(self, other):
return self.filename > other.filename
def __eq__(self, other):
return self.filename == other.filename
def has_ordering_tag(self):
return True if re.findall("^[0-9][0-9].*_", self.filename) else False
def load(self, **kwargs):
with open(self.path, "r") as f:
template = f.read()
self.sql = jinja2.Template(template).render(kwargs)
self.source_url = extract_flag_comment(self.sql, '--source')
def run(self, **kwargs):
self.load(**kwargs)
do_dry = 'dry' in kwargs and kwargs['dry']
do_print = 'print' in kwargs and kwargs['print']
if do_dry or do_print:
print(self.sql)
if do_dry:
return
if not self.source_url and not self.default_source_url:
raise RuntimeError("No --source flag found in sql and no default_source_url specified.")
engine = sa.create_engine(self.source_url or self.default_source_url)
logger.info(f"Running {self.filename}")
with engine.connect() as conn:
result = conn.execute(sa.text(self.sql))
rows = result.fetchall()
if rows and self.scalarize and len(rows[0]) == 1:
rows = [r[0] for r in rows]
return rows
#TODO
class StatementReturn:
def __init__(self, filename: str):
self.filename = filename
self.name = re.sub("^[0-9][0-9]_", "", re.sub(".sql$", "", filename))
self.reload()
def __repr__(self):
return f"{self.__class__.__name__}({self.filename=}, {self.name=})"
def __str__(self):
return self.__repr__()
def __call__(self):
self.run()
def reload(self):
with open(self.filename, "r") as f:
self.sql = f.read()
self.columns = extract_type_annotations(self.sql)
self.from_url = extract_flag_comment(self.sql, '--from')
self.to_url = extract_flag_comment(self.sql, '--to')
self.name = extract_flag_comment(self.sql, '--name')
self.return_data = check_flag(self.sql, '--return-data')
self.table = self.table_from_sql()
def table_from_sql(self):
cols = []
for name, type_string in self.columns.items():
mylocalenv = {}
#############################################
# Be careful, vulnerable to code injection. #
#############################################
exec(
'typ = ' + type_string,
sa.types.__dict__.copy(),
mylocalenv
)
cols.append(sa.Column(name=name, type_=mylocalenv['typ']))
meta = sa.MetaData()
return sa.Table(
self.name,
meta,
*cols
)
def get_rows(self):
engine = sa.create_engine(self.from_url)
stmt = sa.text(self.sql).columns(*self.table.c)
with engine.connect() as conn:
result = conn.execute(stmt)
return list(result.fetchall())
def run_basic_sql(self):
engine = sa.create_engine(self.from_url)
with engine.connect() as conn:
result = conn.execute(sa.text(self.sql))
return result
def to_sqlite_db(self, rows, clobber=False):
assert ':///' in self.to_url
assert len(self.table.metadata.tables) == 1
rowdicts = [{k: v for k, v in zip(self.table.c, r)} for r in rows]
engine = sa.create_engine(self.to_url)
with engine.connect() as conn:
if clobber:
i = sa.inspect(conn)
tablenames = i.get_table_names()
if self.table.name in tablenames:
self.table.drop(conn)
self.table.metadata.create_all(engine)
stmt = sa.insert(self.table).values(rowdicts)
conn.execute(stmt)
def run(self):
self.reload()
if self.return_data:
rows = self.get_rows()
self.to_sqlite_db(rows)
else:
self.run_basic_sql()
| StarcoderdataPython |
3267562 | <filename>unitorch/cli/models/swin/__init__.py
# Copyright (c) FULIUCANSHENG.
# Licensed under the MIT License.
# pretrained infos
pretrained_swin_infos = {
"default-swin": {
"config": "https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json",
"vision_config": "https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/preprocessor_config.json",
},
"swin-tiny-patch4-window7-224": {
"config": "https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json",
"vision_config": "https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/preprocessor_config.json",
"weight": "https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/pytorch_model.bin",
},
"swin-base-patch4-window7-224": {
"config": "https://huggingface.co/microsoft/swin-base-patch4-window7-224/resolve/main/config.json",
"vision_config": "https://huggingface.co/microsoft/swin-base-patch4-window7-224/resolve/main/preprocessor_config.json",
"weight": "https://huggingface.co/microsoft/swin-base-patch4-window7-224/resolve/main/pytorch_model.bin",
},
"swin-large-patch4-window7-224": {
"config": "https://huggingface.co/microsoft/swin-large-patch4-window7-224/resolve/main/config.json",
"vision_config": "https://huggingface.co/microsoft/swin-large-patch4-window7-224/resolve/main/preprocessor_config.json",
"weight": "https://huggingface.co/microsoft/swin-large-patch4-window7-224/resolve/main/pytorch_model.bin",
},
"swin-base-patch4-window7-224-in22k": {
"config": "https://huggingface.co/microsoft/swin-base-patch4-window7-224-in22k/resolve/main/config.json",
"vision_config": "https://huggingface.co/microsoft/swin-base-patch4-window7-224-in22k/resolve/main/preprocessor_config.json",
"weight": "https://huggingface.co/microsoft/swin-base-patch4-window7-224-in22k/resolve/main/pytorch_model.bin",
},
"swin-large-patch4-window7-224-in22k": {
"config": "https://huggingface.co/microsoft/swin-large-patch4-window7-224-in22k/resolve/main/config.json",
"vision_config": "https://huggingface.co/microsoft/swin-large-patch4-window7-224-in22k/resolve/main/preprocessor_config.json",
"weight": "https://huggingface.co/microsoft/swin-large-patch4-window7-224-in22k/resolve/main/pytorch_model.bin",
},
}
import unitorch.cli.models.swin.modeling
import unitorch.cli.models.swin.processing
| StarcoderdataPython |
3200445 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
FISCO BCOS/Python-SDK is a python client for FISCO BCOS2.0 (https://github.com/FISCO-BCOS/)
FISCO BCOS/Python-SDK is free software: you can redistribute it and/or modify it under the
terms of the MIT License as published by the Free Software Foundation. This project is
distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. Thanks for
authors and contributors of eth-abi, eth-account, eth-hash,eth-keys, eth-typing, eth-utils,
rlp, eth-rlp , hexbytes ... and relative projects
@author: kentzhang
@date: 2019-06
'''
from client.bcosclient import BcosClient
from client.datatype_parser import DatatypeParser
import uuid
import json
import threading
from utils.encoding import FriendlyJsonSerde
from client.channelpack import ChannelPack
from client.channel_push_dispatcher import ChannelPushHandler
class EventCallbackHandler:
"""事件回调接口,on_event传入的是已经解析成json的logs列表,但未按abi解析
使用者派生EventCallbackHandler,实现on_event,在监听指定事件时指定实例
** 注意查重
"""
def on_event(self, eventdata):
pass
class EventCallbackManager(ChannelPushHandler):
"""EventCallbackManager按filterid管理实例
接受amop的push消息里类型为0x1002的EVENT_LOG_PUSH,并根据filterid分发
"""
abiparser: DatatypeParser = None
callback_register = dict()
lock = threading.RLock()
def set_callback(self, filterid, callback):
try:
self.lock.acquire()
#print("set callbackup ",filterid,callback)
self.callback_register[filterid] = callback
except Exception as e:
self.logger.error("channel push dispatcher add handler error", e)
finally:
self.lock.release()
def remove_callback(self, filterid, callback):
try:
self.lock.acquire()
if filterid in self.callback_register:
self.callback_register.pop(filterid)
except Exception as e:
self.logger.error("channel push dispatcher add handler error", e)
finally:
self.lock.release()
def get_callback(self, filterid):
cb = None
try:
self.lock.acquire()
if filterid in self.callback_register:
cb = self.callback_register[filterid]
except Exception as e:
self.logger.error("get_callback error", e)
finally:
self.lock.release()
return cb
# on_push from channel_push_dispatcher
def on_push(self, packmsg: ChannelPack):
# print("--------------------EventPushHandler: type {},result:{},len:{}".format(
# hex(packmsg.type), packmsg.result, packmsg.totallen))
if packmsg.type != ChannelPack.EVENT_LOG_PUSH:
print("WRONG TYPE:-EventPushHandler: type {},result:{},len:{}".format(
hex(packmsg.type), packmsg.result, packmsg.totallen))
return
strmsg = packmsg.data.decode("utf-8")
eventdata = json.loads(strmsg)
filterid = eventdata["filterID"]
# find callback implement by filterid
eventcallback = self.get_callback(filterid)
if eventcallback is None:
return
eventcallback.on_event(eventdata)
class BcosEventCallback:
"""本文件主类,其实就是几个帮助方法,参考用法:
abifile = "contracts/" + contractname + ".abi"
abiparser = DatatypeParser(abifile)
eventcallback01 = EventCallbackImpl01()
eventcallback01.abiparser = abiparser
#---------
bcos_event = BcosEventCallback()
bcos_event.setclient(BcosClient())
result = bcos_event.register_eventlog_filter(
eventcallback01, abiparser, [address], event_name, indexed_value)
"""
client: BcosClient = None
ecb_manager = EventCallbackManager()
def format_event_register_request(
self,
from_block,
to_block,
addresses,
topics,
groupid,
filterid):
'''
{
"fromBlock": "latest",
"toBlock": "latest",
"addresses": [
0xca5ed56862869c25da0bdf186e634aac6c6361ee
],
"topics": [
"0x91c95f04198617c60eaf2180fbca88fc192db379657df0e412a9f7dd4ebbe95d"
],
"groupID": "1",
"filterID": "bb31e4ec086c48e18f21cb994e2e5967"
}'''
request = dict()
request["fromBlock"] = from_block
request["toBlock"] = to_block
request["addresses"] = addresses
request["topics"] = topics
request["groupID"] = groupid
request["filterID"] = filterid
requestJson = FriendlyJsonSerde().json_encode(request)
return requestJson
# 一定要这样调用,否则manager得另外注册一下
def setclient(self, client):
self.client = client
self.add_channel_push_handler(self.ecb_manager)
def add_channel_push_handler(self, eventHandler):
if self.client.channel_handler is not None:
self.client.channel_handler.pushDispacher.add_handler(
ChannelPack.EVENT_LOG_PUSH, eventHandler)
# 主要方法,注册事件
def register_eventlog_filter(
self,
eventcallback,
abiparser,
addresses,
event_name,
indexed_value=None,
fromblock="latest",
to_block="latest"):
topics = []
if event_name is not None:
topic0 = abiparser.topic_from_event_name(event_name)
topics.append(topic0)
event_abi = abiparser.event_name_map[event_name]
#print("event abi:", event_abi)
if indexed_value is not None and len(indexed_value) > 0:
indexedinput = []
for event_input in event_abi["inputs"]:
if event_input["indexed"] is True:
indexedinput.append((event_input['name'], event_input['type']))
# print(indexedinput)
i = 0
for v in indexed_value:
itype = indexedinput[i][1]
topic = DatatypeParser.topic_from_type(itype, v)
if not (topic is None):
topics.append(topic)
i = i + 1
# create new filterid by uuid
seq = uuid.uuid1()
filterid = seq.hex
requestJson = self.format_event_register_request(
fromblock, to_block, addresses, topics, self.client.groupid, filterid)
requestbytes = ChannelPack.pack_amop_topic_message("", requestJson)
response = self.client.channel_handler.make_channel_request(
requestbytes, ChannelPack.CLIENT_REGISTER_EVENT_LOG, ChannelPack.CLIENT_REGISTER_EVENT_LOG)
(topic, result) = ChannelPack.unpack_amop_topic_message(response)
dataobj = json.loads(result)
# print(dataobj)
if dataobj["result"] == 0:
self.ecb_manager.set_callback(filterid, eventcallback)
return dataobj
| StarcoderdataPython |
4837155 | <filename>Week 8/try_week8.py
fin = open('words.txt')
for line in fin:
word = line.strip()
print(word) | StarcoderdataPython |
1761149 | """ Household microsynthesis """
import numpy as np
import pandas as pd
import ukcensusapi.Nomisweb as Api_ew
import ukcensusapi.NRScotland as Api_sc
import humanleague
import household_microsynth.utils as utils
import household_microsynth.seed as seed
class Household:
""" Household microsynthesis """
# Placeholders for unknown or non-applicable category values
UNKNOWN = -1
NOTAPPLICABLE = -2
# initialise, supplying geographical area and resolution , plus (optionally) a location to cache downloads
def __init__(self, region, resolution, cache_dir="./cache"):
self.api_ew = Api_ew.Nomisweb(cache_dir)
self.api_sc = Api_sc.NRScotland(cache_dir)
self.region = region
# convert input string to enum
self.resolution = resolution
# Temporary workaround for unavailable Scottish data
self.scotland = False
if self.region[0] == "S":
self.scotland = True
print("Running in 'scotland mode'")
# (down)load the census tables
self.__get_census_data()
# initialise table and index
categories = ["Area", "LC4402_C_TYPACCOM", "QS420_CELL", "LC4402_C_TENHUK11", "LC4408_C_AHTHUK11", "CommunalSize",
"LC4404_C_SIZHUK11", "LC4404_C_ROOMS", "LC4405EW_C_BEDROOMS", "LC4408EW_C_PPBROOMHEW11",
"LC4402_C_CENHEATHUK11", "LC4605_C_NSSEC", "LC4202_C_ETHHUK11", "LC4202_C_CARSNO"]
self.total_dwellings = sum(self.ks401.OBS_VALUE) + sum(self.communal.OBS_VALUE)
# self.dwellings = pd.DataFrame(index=range(0, self.total_dwellings), columns=categories)
self.dwellings = pd.DataFrame(columns=categories)
self.index = 0
# generate indices
self.type_index = self.lc4402.C_TYPACCOM.unique()
self.tenure_index = self.lc4402.C_TENHUK11.unique()
self.ch_index = self.lc4402.C_CENHEATHUK11.unique()
self.comp_index = self.lc4408.C_AHTHUK11.unique()
def run(self):
""" run the microsynthesis """
area_map = self.lc4404.GEOGRAPHY_CODE.unique()
# construct seed disallowing states where B>R]
# T R O B H (H=household type)
# use 7 waves (2009-2015 incl)
constraints = seed.get_survey_TROBH() #[1,2,3,4,5,6,7]
# bedrooms removed for Scotland
if self.scotland:
constraints = np.expand_dims(np.sum(constraints, axis=3), 3)
for area in area_map:
print('.', end='', flush=True)
# 1. households
self.__add_households(area, constraints)
# add communal residences
self.__add_communal(area)
# # add unoccupied properties
self.__add_unoccupied(area)
# end area loop
# temp fix - TODO remove this column?
self.dwellings.LC4408EW_C_PPBROOMHEW11 = np.repeat(self.UNKNOWN, len(self.dwellings.LC4408EW_C_PPBROOMHEW11))
def __add_households(self, area, constraints):
# TODO use actual values from tables
# TODO make members? # Dim (overall dim)
tenure_map = self.lc4402.C_TENHUK11.unique() # 0
rooms_map = self.lc4404.C_ROOMS.unique() # 1
occupants_map = self.lc4404.C_SIZHUK11.unique() # 2
bedrooms_map = self.lc4405.C_BEDROOMS.unique() # 3 [1,2,3,4] or [-1]
hhtype_map = self.lc4408.C_AHTHUK11.unique() # 4
#
ch_map = self.lc4402.C_CENHEATHUK11.unique() # 1 (5)
buildtype_map = self.lc4402.C_TYPACCOM.unique() # 2 (6)
eth_map = self.lc4202.C_ETHHUK11.unique() # 3 (7)
cars_map = self.lc4202.C_CARSNO.unique() # 4 (8)
econ_map = self.lc4605.C_NSSEC.unique() # 5 (9)
tenure_rooms_occ = self.lc4404.loc[self.lc4404.GEOGRAPHY_CODE == area].copy()
# unmap indices
# TODO might be quicker to unmap the entire table upfront?
utils.unmap(tenure_rooms_occ.C_TENHUK11, tenure_map)
utils.unmap(tenure_rooms_occ.C_ROOMS, rooms_map)
utils.unmap(tenure_rooms_occ.C_SIZHUK11, occupants_map)
m4404 = utils.unlistify(tenure_rooms_occ,
["C_TENHUK11", "C_ROOMS", "C_SIZHUK11"],
[len(tenure_map), len(rooms_map), len(occupants_map)],
"OBS_VALUE")
# no bedroom info in Scottish data
tenure_beds_occ = self.lc4405.loc[self.lc4405.GEOGRAPHY_CODE == area].copy()
# unmap indices
utils.unmap(tenure_beds_occ.C_BEDROOMS, bedrooms_map)
utils.unmap(tenure_beds_occ.C_TENHUK11, tenure_map)
utils.unmap(tenure_beds_occ.C_SIZHUK11, occupants_map)
m4405 = utils.unlistify(tenure_beds_occ,
["C_TENHUK11", "C_BEDROOMS", "C_SIZHUK11"],
[len(tenure_map), len(bedrooms_map), len(occupants_map)],
"OBS_VALUE")
# print(m4405.shape)
tenure_accom = self.lc4408.loc[self.lc4408.GEOGRAPHY_CODE == area].copy()
utils.unmap(tenure_accom.C_TENHUK11, tenure_map)
utils.unmap(tenure_accom.C_AHTHUK11, hhtype_map)
m4408 = utils.unlistify(tenure_accom,
["C_TENHUK11", "C_AHTHUK11"],
[len(tenure_map), len(hhtype_map)],
"OBS_VALUE")
#print(np.sum(m4404), np.sum(m4405), np.sum(m4408))
# TODO relax IPF tolerance and maxiters when used within QISI?
m4408dim = np.array([0, 4])
# collapse m4408 dim for scotland
if self.scotland:
m4408 = np.sum(m4408, axis=0)
m4408dim = np.array([4])
p0 = humanleague.qisi(constraints, [np.array([0, 1, 2]), np.array([0, 3, 2]), m4408dim], [m4404, m4405, m4408])
# drop the survey seed if there are convergence problems
# TODO check_humanleague_result needs complete refactoring
if not isinstance(p0, dict) or not p0["conv"]:
print("Dropping TROBH constraint due to convergence failure")
p0 = humanleague.qisi(seed.get_impossible_TROBH(), [np.array([0, 1, 2]), np.array([0, 3, 2]), m4408dim], [m4404, m4405, m4408])
utils.check_humanleague_result(p0, [m4404, m4405, m4408], seed.get_impossible_TROBH())
else:
utils.check_humanleague_result(p0, [m4404, m4405, m4408], constraints)
#print("p0 ok")
tenure_ch_accom = self.lc4402.loc[self.lc4402.GEOGRAPHY_CODE == area].copy()
utils.unmap(tenure_ch_accom.C_CENHEATHUK11, ch_map)
utils.unmap(tenure_ch_accom.C_TENHUK11, tenure_map)
utils.unmap(tenure_ch_accom.C_TYPACCOM, buildtype_map)
m4402 = utils.unlistify(tenure_ch_accom,
["C_TENHUK11", "C_CENHEATHUK11", "C_TYPACCOM"],
[len(tenure_map), len(ch_map), len(buildtype_map)],
"OBS_VALUE")
tenure_eth_car = self.lc4202.loc[self.lc4202.GEOGRAPHY_CODE == area].copy()
utils.unmap(tenure_eth_car.C_ETHHUK11, eth_map)
utils.unmap(tenure_eth_car.C_CARSNO, cars_map)
utils.unmap(tenure_eth_car.C_TENHUK11, tenure_map)
m4202 = utils.unlistify(tenure_eth_car,
["C_TENHUK11", "C_ETHHUK11", "C_CARSNO"],
[len(tenure_map), len(eth_map), len(cars_map)],
"OBS_VALUE")
econ = self.lc4605.loc[self.lc4605.GEOGRAPHY_CODE == area].copy()
utils.unmap(econ.C_NSSEC, econ_map)
utils.unmap(econ.C_TENHUK11, tenure_map)
# econ counts often slightly lower, need to tweak
##econ = utils.adjust(econ, tenure_eth_car)
m4605 = utils.unlistify(econ,
["C_TENHUK11", "C_NSSEC"],
[len(tenure_map), len(econ_map)],
"OBS_VALUE")
m4605_sum = np.sum(m4605)
m4202_sum = np.sum(m4202)
if m4605_sum != m4202_sum:
print("LC4402: %d LC4605: %d -> %d " % (np.sum(m4402), m4605_sum, m4202_sum), end="")
tenure_4202 = np.sum(m4202, axis=(1, 2))
nssec_4605_adj = humanleague.prob2IntFreq(np.sum(m4605, axis=0) / m4605_sum, m4202_sum)["freq"]
# m4605_adj = humanleague.qisi(m4605.astype(float), [np.array([0]), np.array([1])], [tenure_4202, nssec_4605_adj])
# Convergence problems can occur when e.g. one of the tenure rows is zero yet the marginal total is nonzero,
# Can get round this by adding a small number to the seed
# effectively allowing zero states to be occupied with a finite probability
# if not m4605_adj["conv"]:
m4605_adj = humanleague.qisi(m4605.astype(float) + 1.0/m4202_sum, [np.array([0]), np.array([1])], [tenure_4202, nssec_4605_adj])
utils.check_humanleague_result(m4605_adj, [tenure_4202, nssec_4605_adj])
m4605 = m4605_adj["result"]
#print("econ adj ok")
# print(np.sum(p0["result"], axis=(1,2,3,4)))
# print(np.sum(m4402, axis=(1,2)))
# print(np.sum(m4202, axis=(1,2)))
# print(np.sum(m4605, axis=1))
# no seed constraint so just use QIS
if self.scotland:
# tenures not mappable in LC4202
m4202 = np.sum(m4202, axis=0)
m4605 = np.sum(m4605, axis=0)
p1 = humanleague.qis([np.array([0, 1, 2, 3, 4]), np.array([0, 5, 6]), np.array([7, 8]), np.array([9])], [p0["result"], m4402, m4202, m4605])
#p1 = humanleague.qis([np.array([0, 1, 2, 3]), np.array([0, 4, 5]), np.array([0, 6, 7])], [p0["result"], m4402, m4202])
else:
p1 = humanleague.qis([np.array([0, 1, 2, 3, 4]), np.array([0, 5, 6]), np.array([0, 7, 8]), np.array([0, 9])], [p0["result"], m4402, m4202, m4605])
#p1 = humanleague.qis([np.array([0, 1, 2, 3]), np.array([0, 4, 5]), np.array([0, 6, 7])], [p0["result"], m4402, m4202])
utils.check_humanleague_result(p1, [p0["result"], m4402, m4202, m4605])
#print("p1 ok")
table = humanleague.flatten(p1["result"])
chunk = pd.DataFrame(columns=self.dwellings.columns.values)
chunk.Area = np.repeat(area, len(table[0]))
chunk.LC4402_C_TENHUK11 = utils.remap(table[0], tenure_map)
chunk.QS420_CELL = np.repeat(self.NOTAPPLICABLE, len(table[0]))
chunk.LC4404_C_ROOMS = utils.remap(table[1], rooms_map)
chunk.LC4404_C_SIZHUK11 = utils.remap(table[2], occupants_map)
chunk.LC4405EW_C_BEDROOMS = utils.remap(table[3], bedrooms_map)
chunk.LC4408_C_AHTHUK11 = utils.remap(table[4], hhtype_map)
chunk.LC4402_C_CENHEATHUK11 = utils.remap(table[5], ch_map)
chunk.LC4402_C_TYPACCOM = utils.remap(table[6], buildtype_map)
chunk.CommunalSize = np.repeat(self.NOTAPPLICABLE, len(table[0]))
chunk.LC4202_C_ETHHUK11 = utils.remap(table[7], eth_map)
chunk.LC4202_C_CARSNO = utils.remap(table[8], cars_map)
chunk.LC4605_C_NSSEC = utils.remap(table[9], econ_map)
#print(chunk.head())
self.dwellings = self.dwellings.append(chunk, ignore_index=True)
def __add_communal(self, area):
# here we simply enumerate the census counts - no microsynthesis required
area_communal = self.communal.loc[(self.communal.GEOGRAPHY_CODE == area) & (self.communal.OBS_VALUE > 0)]
if len(area_communal) == 0:
return
num_communal = area_communal.OBS_VALUE.sum()
chunk = pd.DataFrame(columns=self.dwellings.columns.values)
chunk.Area = np.repeat(area, num_communal)
chunk.LC4402_C_TENHUK11 = np.repeat(self.NOTAPPLICABLE, num_communal)
chunk.LC4404_C_ROOMS = np.repeat(self.UNKNOWN, num_communal)
chunk.LC4404_C_SIZHUK11 = np.repeat(self.UNKNOWN, num_communal)
chunk.LC4405EW_C_BEDROOMS = np.repeat(self.UNKNOWN, num_communal)
chunk.LC4408_C_AHTHUK11 = np.repeat(self.UNKNOWN, num_communal) # communal not considered separately to multi-person household
chunk.LC4402_C_CENHEATHUK11 = np.repeat(2, num_communal) # assume all communal are centrally heated
chunk.LC4402_C_TYPACCOM = np.repeat(self.NOTAPPLICABLE, num_communal)
chunk.LC4202_C_ETHHUK11 = np.repeat(self.UNKNOWN, num_communal)
chunk.LC4202_C_CARSNO = np.repeat(1, num_communal) # no cars (blanket assumption)
index = 0
#print(area, len(area_communal))
for i in range(0, len(area_communal)):
# average occupants per establishment - integerised (special case when zero occupants)
establishments = area_communal.at[area_communal.index[i], "OBS_VALUE"]
occupants = area_communal.at[area_communal.index[i], "CommunalSize"]
if establishments == 1:
occ_array = [occupants]
else:
occ_array = humanleague.prob2IntFreq(np.full(establishments, 1.0 / establishments), occupants)["freq"]
for j in range(0, establishments):
chunk.QS420_CELL.at[index] = area_communal.at[area_communal.index[i], "CELL"]
chunk.CommunalSize.at[index] = occ_array[j]
chunk.LC4605_C_NSSEC.at[index] = utils.communal_economic_status(area_communal.at[area_communal.index[i], "CELL"])
index += 1
#print(chunk.head())
self.dwellings = self.dwellings.append(chunk, ignore_index=True)
# unoccupied, should be one entry per area
# sample from the occupied houses
def __add_unoccupied(self, area):
unocc = self.ks401.loc[(self.ks401.GEOGRAPHY_CODE == area) & (self.ks401.CELL == 6)]
if not len(unocc) == 1:
raise("ks401 problem - multiple unoccupied entries in table")
n_unocc = unocc.at[unocc.index[0], "OBS_VALUE"]
#print(n_unocc)
chunk = pd.DataFrame(columns=self.dwellings.columns.values)
chunk.Area = np.repeat(area, n_unocc)
chunk.LC4402_C_TENHUK11 = np.repeat(self.UNKNOWN, n_unocc)
chunk.LC4404_C_SIZHUK11 = np.repeat(0, n_unocc)
chunk.LC4408_C_AHTHUK11 = np.repeat(self.UNKNOWN, n_unocc)
chunk.LC4402_C_TYPACCOM = np.repeat(self.NOTAPPLICABLE, n_unocc)
chunk.LC4202_C_ETHHUK11 = np.repeat(self.UNKNOWN, n_unocc)
chunk.LC4202_C_CARSNO = np.repeat(1, n_unocc) # no cars
chunk.QS420_CELL = np.repeat(self.NOTAPPLICABLE, n_unocc)
chunk.CommunalSize = np.repeat(self.NOTAPPLICABLE, n_unocc)
chunk.LC4605_C_NSSEC = np.repeat(self.UNKNOWN, n_unocc)
occ = self.dwellings.loc[(self.dwellings.Area == area) & (self.dwellings.QS420_CELL == self.NOTAPPLICABLE)]
s = occ.sample(n_unocc, replace=True).reset_index()
chunk.LC4404_C_ROOMS = s.LC4404_C_ROOMS
chunk.LC4405EW_C_BEDROOMS = s.LC4405EW_C_BEDROOMS
chunk.LC4402_C_CENHEATHUK11 = s.LC4402_C_CENHEATHUK11
self.dwellings = self.dwellings.append(chunk, ignore_index=True)
def __get_census_data(self):
if self.region[0] == "E" or self.region[0] == "W":
return self.__get_census_data_ew()
elif self.region[0] == "S":
return self.__get_census_data_sc()
elif self.region[0] == "N":
raise NotImplementedError("NI census data not available")
else:
raise ValueError("invalid region code " + self.region)
def __get_census_data_sc(self):
#print(self.api_sc.get_metadata("LC4404SC", self.resolution))
self.lc4402 = self.api_sc.get_data("LC4402SC", self.region, self.resolution,
category_filters={"LC4402SC_0_CODE": [2,3,5,6], "LC4402SC_1_CODE": [2,3,4,5], "LC4402SC_2_CODE": [1,2]})
self.lc4402.rename({"LC4402SC_1_CODE": "C_TYPACCOM", "LC4402SC_2_CODE": "C_CENHEATHUK11", "LC4402SC_0_CODE": "C_TENHUK11" }, axis=1, inplace=True)
#print(self.lc4402.head())
# ensure counts are consistent across tables
checksum = self.lc4402.OBS_VALUE.sum()
# construct a tenure marginal for synthesis of other tables unavailable in Scottish dataset
ngeogs = len(self.lc4402.GEOGRAPHY_CODE.unique())
ntenures = len(self.lc4402.C_TENHUK11.unique())
tenure_table = self.lc4402.groupby(["GEOGRAPHY_CODE", "C_TENHUK11"]).sum().reset_index().drop(["C_TYPACCOM", "C_CENHEATHUK11"], axis=1)
m4402 = utils.unlistify(tenure_table, ["GEOGRAPHY_CODE", "C_TENHUK11"], [ngeogs, ntenures], "OBS_VALUE")
# synthesise LC4404 from QS407 and QS406
# LC4404SC room categories are: 1, 2-3, 4-5, 6+ so not very useful, using univariate tables instead
#print(self.api_sc.get_metadata("QS407SC", self.resolution))
qs407 = self.api_sc.get_data("QS407SC", self.region, self.resolution, category_filters={"QS407SC_0_CODE": range(1,10)})
qs407.rename({"QS407SC_0_CODE": "C_ROOMS"}, axis=1, inplace=True)
qs407 = utils.cap_value(qs407, "C_ROOMS", 6, "OBS_VALUE")
#print(qs407.head())
assert qs407.OBS_VALUE.sum() == checksum
#print(self.api_sc.get_metadata("QS406SC", self.resolution))
qs406 = self.api_sc.get_data("QS406SC", self.region, self.resolution, category_filters={"QS406SC_0_CODE": range(1,9)})
qs406.rename({"QS406SC_0_CODE": "C_SIZHUK11"}, axis=1, inplace=True)
qs406 = utils.cap_value(qs406, "C_SIZHUK11", 4, "OBS_VALUE")
#print(qs406.head())
assert qs406.OBS_VALUE.sum() == checksum
nrooms = len(qs407.C_ROOMS.unique())
nsizes = len(qs406.C_SIZHUK11.unique())
m407 = utils.unlistify(qs407, ["GEOGRAPHY_CODE", "C_ROOMS"], [ngeogs, nrooms], "OBS_VALUE")
m406 = utils.unlistify(qs406, ["GEOGRAPHY_CODE", "C_SIZHUK11"], [ngeogs, nsizes], "OBS_VALUE")
a4404 = humanleague.qis([np.array([0,1]), np.array([0,2]), np.array([0,3])], [m4402, m407, m406])
utils.check_humanleague_result(a4404, [m4402, m407, m406])
self.lc4404 = utils.listify(a4404["result"], "OBS_VALUE", ["GEOGRAPHY_CODE", "C_TENHUK11", "C_ROOMS", "C_SIZHUK11"])
self.lc4404.GEOGRAPHY_CODE = utils.remap(self.lc4404.GEOGRAPHY_CODE, qs406.GEOGRAPHY_CODE.unique())
self.lc4404.C_TENHUK11 = utils.remap(self.lc4404.C_TENHUK11, tenure_table.C_TENHUK11.unique())
self.lc4404.C_ROOMS = utils.remap(self.lc4404.C_ROOMS, qs407.C_ROOMS.unique())
self.lc4404.C_SIZHUK11 = utils.remap(self.lc4404.C_SIZHUK11, qs406.C_SIZHUK11.unique())
#print(self.lc4404.head())
assert self.lc4404.OBS_VALUE.sum() == checksum
# no bedroom info is available
# for now randomly sample from survey on rooms
# TODO microsynth using tenure/occs also?
self.lc4405 = self.lc4404.copy()
# self.lc4405.rename({"C_ROOMS": "C_BEDROOMS"}, axis=1, inplace=True)
self.lc4405["C_BEDROOMS"] = Household.UNKNOWN
room_bed_dist = np.sum(seed.get_survey_TROBH(), axis=(0,2,4))
#print(room_bed_dist)
# c = [1,2,3,4]
# for i in range(0,6):
# p = room_bed_dist[i]/np.sum(room_bed_dist[i])
# n = len(self.lc4405[self.lc4405.C_ROOMS == i+1])
# #print(np.random.choice(c, n, p=p))
# self.lc4405.loc[self.lc4405.C_ROOMS == i+1, "C_BEDROOMS"] = np.random.choice(c, n, p=p)
#assert len(self.lc4405[self.lc4405.C_BEDROOMS == Household.UNKNOWN]) == 0
assert len(self.lc4405[self.lc4405.C_ROOMS < self.lc4405.C_BEDROOMS]) == 0
self.lc4405.drop("C_ROOMS", axis=1, inplace=True)
self.lc4405 = self.lc4405.groupby(["GEOGRAPHY_CODE", "C_TENHUK11", "C_SIZHUK11", "C_BEDROOMS"]).sum().reset_index()
#print(self.lc4405)
assert self.lc4405.OBS_VALUE.sum() == checksum
# synthesise LC4408
#print(self.api_sc.get_metadata("QS116SC", self.resolution))
# 1'One person household',
# 2'Married couple household: No dependent children',
# 3'Married couple household: With dependent children',
# 4'Same-sex civil partnership couple household',
# 5'Cohabiting couple household: No dependent children',
# 6'Cohabiting couple household: With dependent children',
# 7'Lone parent household: No dependent children',
# 8'Lone parent household: With dependent children',
# 9'Multi-person household: All full-time students',
# 10'Multi-person household: Other']}}
qs116 = self.api_sc.get_data("QS116SC", self.region, self.resolution, category_filters={"QS116SC_0_CODE": range(1,11)})
qs116.rename({"QS116SC_0_CODE": "C_AHTHUK11"}, axis=1, inplace=True)
# map to lower-resolution household types
# 1 -> 1 (single)
# (2,3,4) -> 2 (married/civil couple)
# (5,6) -> 3 (cohabiting couple)
# (7,8) -> 4 (single parent)
# (9,10) -> 5 (mixed)
qs116.loc[(qs116.C_AHTHUK11 == 2) | (qs116.C_AHTHUK11 == 3) | (qs116.C_AHTHUK11 == 4), "C_AHTHUK11"] = 2
qs116.loc[(qs116.C_AHTHUK11 == 5) | (qs116.C_AHTHUK11 == 6), "C_AHTHUK11"] = 3
qs116.loc[(qs116.C_AHTHUK11 == 7) | (qs116.C_AHTHUK11 == 8), "C_AHTHUK11"] = 4
qs116.loc[(qs116.C_AHTHUK11 == 9) | (qs116.C_AHTHUK11 == 10), "C_AHTHUK11"] = 5
# ...and consolidate
qs116 = qs116.groupby(["GEOGRAPHY_CODE", "C_AHTHUK11"]).sum().reset_index()
assert qs116.OBS_VALUE.sum() == checksum
nhhtypes = len(qs116.C_AHTHUK11.unique())
m116 = utils.unlistify(qs116, ["GEOGRAPHY_CODE", "C_AHTHUK11"], [ngeogs, nhhtypes], "OBS_VALUE")
a4408 = humanleague.qis([np.array([0,1]), np.array([0,2])], [m4402, m116])
utils.check_humanleague_result(a4408, [m4402, m116])
self.lc4408 = utils.listify(a4408["result"], "OBS_VALUE", ["GEOGRAPHY_CODE", "C_TENHUK11", "C_AHTHUK11"])
self.lc4408.GEOGRAPHY_CODE = utils.remap(self.lc4408.GEOGRAPHY_CODE, qs116.GEOGRAPHY_CODE.unique())
self.lc4408.C_TENHUK11 = utils.remap(self.lc4408.C_TENHUK11, self.lc4402.C_TENHUK11.unique())
self.lc4408.C_AHTHUK11 = utils.remap(self.lc4408.C_AHTHUK11, qs116.C_AHTHUK11.unique())
#print(self.lc4408.head())
assert self.lc4408.OBS_VALUE.sum() == checksum
# LC1105
#print(self.api_sc.get_metadata("KS101SC", self.resolution))
self.lc1105 = self.api_sc.get_data("KS101SC", self.region, self.resolution, category_filters={"KS101SC_0_CODE": [3,4]})
self.lc1105.rename({"KS101SC_0_CODE": "C_RESIDENCE_TYPE"}, axis=1, inplace=True)
# 3->1, 4->2
self.lc1105["C_RESIDENCE_TYPE"] = self.lc1105["C_RESIDENCE_TYPE"] - 2
#print(self.lc1105.OBS_VALUE.sum(), checksum)
# occupied vs unoccupied
#print(self.api_sc.get_metadata("KS401SC", self.resolution))
# 5'All household spaces: Occupied',
# 6'All household spaces: Unoccupied: Second residence/holiday accommodation',
# 7'All household spaces: Unoccupied: Vacant',
self.ks401 = self.api_sc.get_data("KS401SC", self.region, self.resolution, category_filters={"KS401SC_0_CODE": [5,6,7]})
self.ks401.rename({"KS401SC_0_CODE": "CELL"}, axis=1, inplace=True)
self.ks401 = utils.cap_value(self.ks401, "CELL", 6, "OBS_VALUE")
assert self.ks401[self.ks401.CELL == 5].OBS_VALUE.sum() == checksum
#print(self.api_sc.get_metadata("LC4202SC", self.resolution))
#{'table': 'LC4202SC', 'description': '', 'geography': 'OA11', 'fields': {'LC4202SC_1_CODE': [
# 'All households:',
# 'Owned:',
# 'Social rented:',
# 'Private rented or living rent free:'],
# 'LC4202SC_2_CODE': [
# 'Total',
# 'Number of cars or vans in household: No cars or vans',
# 'Number of cars or vans in household: One car or van',
# 'Number of cars or vans in household:Two or more cars or vans'],
# 'LC4202SC_0_CODE': [
# 'All households',
# 'White',
# 'Mixed or multiple ethnic groups',
# 'Asian Asian Scottish or Asian British',
# 'African',
# 'Caribbean or Black',
# 'Other ethnic groups']}}
self.lc4202 = self.api_sc.get_data("LC4202SC", self.region, self.resolution, category_filters={"LC4202SC_1_CODE": [1,2,3], "LC4202SC_2_CODE": [1,2,3], "LC4202SC_0_CODE": [1,2,3,4,5,6]})
self.lc4202.rename({"LC4202SC_2_CODE": "C_CARSNO", "LC4202SC_1_CODE": "C_TENHUK11", "LC4202SC_0_CODE": "C_ETHHUK11"}, axis=1, inplace=True)
# TODO how to map tenure 1->2/3?
self.lc4202.loc[self.lc4202.C_TENHUK11 == 3, "C_TENHUK11"] = 6
self.lc4202.loc[self.lc4202.C_TENHUK11 == 2, "C_TENHUK11"] = 5
self.lc4202.loc[self.lc4202.C_TENHUK11 == 1, "C_TENHUK11"] = 3 # OR 2?
assert self.lc4202.OBS_VALUE.sum() == checksum
#print(self.api_sc.get_metadata("LC4605SC", self.resolution))
#{'table': 'LC4605SC', 'description': '', 'geography': 'OA11', 'fields': {'LC4605SC_1_CODE': [
# 'All HRPs aged 16 to 74',
# 'Owned: Total',
# 'Owned: Owned outright',
# 'Owned: Owned witha mortgage or loan or shared ownership',
# 'Rented or living rent free: Total',
# 'Rented or living rent free: Social rented',
# 'Rented or living rent free: Private rented or living rent free'],
# 'LC4605SC_0_CODE': ['All HRPs aged 16 to 74',
# '1. Higher managerial administrative and professional occupations',
# '2. Lower managerial administrative and professional occupations',
# '3. Intermediate occupations',
# '4. Small employers and own account workers',
# '5. Lower supervisory and technical occupations',
# '6. Semi-routine occupations',
# '7. Routine occupations',
# '8. Never worked and long-term unemployed',
# 'L15 Full-time students']}}
self.lc4605 = self.api_sc.get_data("LC4605SC", self.region, self.resolution, category_filters={"LC4605SC_1_CODE": [2,3,5,6], "LC4605SC_0_CODE": range(1,10)})
self.lc4605.rename({"LC4605SC_1_CODE": "C_TENHUK11", "LC4605SC_0_CODE": "C_NSSEC"}, axis=1, inplace=True)
# TODO add retired?
print(self.lc4605.OBS_VALUE.sum(), checksum, "TODO add retired")
#print(self.api_sc.get_metadata("QS420SC", self.resolution))
cats = [2,6,11,14,22,23,24,25,26,27,28,29,30,31,32,33]
# merge the two communal tables (so we have establishment and people counts)
self.communal = self.api_sc.get_data("QS420SC", self.region, self.resolution, category_filters={"QS420SC_0_CODE": cats}).rename({"QS420SC_0_CODE": "CELL"}, axis=1)
qs421 = self.api_sc.get_data("QS421SC", self.region, self.resolution, category_filters={"QS421SC_0_CODE": cats}).rename({"OBS_VALUE": "CommunalSize"}, axis=1)
#print(qs421.head())
self.communal = self.communal.merge(qs421, left_on=["GEOGRAPHY_CODE", "CELL"], right_on=["GEOGRAPHY_CODE", "QS421SC_0_CODE"]).drop("QS421SC_0_CODE", axis=1)
#print(self.communal.CommunalSize.sum())
def __get_census_data_ew(self):
"""
Retrieves census tables for the specified geography
checks for locally cached data or calls nomisweb API
"""
# convert input string to enum
resolution = self.api_ew.GeoCodeLookup[self.resolution]
if self.region in self.api_ew.GeoCodeLookup.keys():
region_codes = self.api_ew.GeoCodeLookup[self.region]
else:
region_codes = self.api_ew.get_lad_codes(self.region)
if not region_codes:
raise ValueError("no regions match the input: \"" + self.region + "\"")
area_codes = self.api_ew.get_geo_codes(region_codes, resolution)
# assignment does shallow copy, need to use .copy() to avoid this getting query_params fields
common_params = {"MEASURES": "20100",
"date": "latest",
"geography": area_codes}
# LC4402EW - Accommodation type by type of central heating in household by tenure
query_params = common_params.copy()
query_params["C_TENHUK11"] = "2,3,5,6"
query_params["C_CENHEATHUK11"] = "1,2"
query_params["C_TYPACCOM"] = "2...5"
query_params["select"] = "GEOGRAPHY_CODE,C_TENHUK11,C_CENHEATHUK11,C_TYPACCOM,OBS_VALUE"
self.lc4402 = self.api_ew.get_data("LC4402EW", query_params)
# LC4404EW - Tenure by household size by number of rooms
query_params = common_params.copy()
query_params["C_ROOMS"] = "1...6"
query_params["C_TENHUK11"] = "2,3,5,6"
query_params["C_SIZHUK11"] = "1...4"
query_params["select"] = "GEOGRAPHY_CODE,C_ROOMS,C_TENHUK11,C_SIZHUK11,OBS_VALUE"
self.lc4404 = self.api_ew.get_data("LC4404EW", query_params)
# LC4405EW - Tenure by household size by number of bedrooms
query_params = common_params.copy()
query_params["C_TENHUK11"] = "2,3,5,6"
query_params["C_BEDROOMS"] = "1...4"
query_params["C_SIZHUK11"] = "1...4"
query_params["select"] = "GEOGRAPHY_CODE,C_SIZHUK11,C_TENHUK11,C_BEDROOMS,OBS_VALUE"
self.lc4405 = self.api_ew.get_data("LC4405EW", query_params)
# LC4408EW - Tenure by number of persons per bedroom in household by household type
query_params = common_params.copy()
#query_params["C_PPBROOMHEW11"] = "1...4"
query_params["C_PPBROOMHEW11"] = "0"
query_params["C_AHTHUK11"] = "1...5"
query_params["C_TENHUK11"] = "2,3,5,6"
query_params["select"] = "GEOGRAPHY_CODE,C_AHTHUK11,C_TENHUK11,OBS_VALUE"
self.lc4408 = self.api_ew.get_data("LC4408EW", query_params)
# LC1105EW - Residence type by sex by age
query_params = common_params.copy()
query_params["C_SEX"] = "0"
query_params["C_AGE"] = "0"
query_params["C_RESIDENCE_TYPE"] = "1,2"
query_params["select"] = "GEOGRAPHY_CODE,C_RESIDENCE_TYPE,OBS_VALUE"
self.lc1105 = self.api_ew.get_data("LC1105EW", query_params)
# KS401EW - Dwellings, household spaces and accommodation type
# Household spaces with at least one usual resident / Household spaces with no usual residents
query_params = common_params.copy()
query_params["RURAL_URBAN"] = "0"
query_params["CELL"] = "5,6"
query_params["select"] = "GEOGRAPHY_CODE,CELL,OBS_VALUE"
self.ks401 = self.api_ew.get_data("KS401EW", query_params)
# NOTE: common_params is passed by ref so take a copy
self.communal = self.__get_communal_data_ew(common_params.copy())
# LC4202EW - Tenure by car or van availability by ethnic group of Household Reference Person (HRP)
query_params = common_params.copy()
query_params["C_CARSNO"] = "1...3"
query_params["C_TENHUK11"] = "2,3,5,6"
query_params["C_ETHHUK11"] = "2...8"
query_params["select"] = "GEOGRAPHY_CODE,C_ETHHUK11,C_CARSNO,C_TENHUK11,OBS_VALUE"
self.lc4202 = self.api_ew.get_data("LC4202EW", query_params)
# LC4605EW - Tenure by NS-SeC - Household Reference Persons
query_params = common_params.copy()
query_params["C_TENHUK11"] = "2,3,5,6"
query_params["C_NSSEC"] = "1...9"
query_params["select"] = "GEOGRAPHY_CODE,C_TENHUK11,C_NSSEC,OBS_VALUE"
self.lc4605 = self.api_ew.get_data("LC4605EW", query_params)
def __get_communal_data_ew(self, query_params):
# TODO merge the tables rather than relying on the order being the same in both
query_params["RURAL_URBAN"] = 0
query_params["CELL"] = "2,6,11,14,22...34"
query_params["select"] = "GEOGRAPHY_CODE,CELL,OBS_VALUE"
# communal is qs420 plus qs421
communal = self.api_ew.get_data("QS420EW", query_params) # establishments
qs421 = self.api_ew.get_data("QS421EW", query_params) # people
# merge the two tables (so we have establishment and people counts)
communal["CommunalSize"] = qs421.OBS_VALUE
return communal
| StarcoderdataPython |
43064 | <reponame>joshuahlang/template-specialize
# Licensed under the MIT License
# https://github.com/craigahobbs/template-specialize/blob/master/LICENSE
from .main import main
if __name__ == '__main__':
main() # pragma: no cover
| StarcoderdataPython |
3278068 | <gh_stars>0
import os
import sys
import inspect
import unittest
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.append(os.path.dirname(currentdir))
import image
class ImageContainerTest(unittest.TestCase):
def setUp(self):
self.ic = image.ImageContainer()
def tearDown(self):
del self.ic
def testImageContainerAdd(self):
self.assertEqual(len(self.ic), 0)
self.ic.add(image.Image('image.jpg'))
self.assertEqual(len(self.ic), 1)
def testImageContainerRemove(self):
i1 = image.Image('image.jpg')
i2 = image.Image('image.jpg')
self.ic.add(i1)
self.ic.add(i2)
self.assertEqual(len(self.ic), 2)
self.ic.remove(i2)
self.assertEqual(len(self.ic), 1)
@unittest.skip('Not implemented')
def testImageContainerLoad(self):
self.ic.load('img')
self.assertEqual(len(self.ic), 5)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3368431 | <filename>peripteras/users/api/views.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
from collections import Counter
from django.conf import settings
from django.core.exceptions import SuspiciousOperation
from django.http import Http404
from django.shortcuts import get_object_or_404
from django.db.models import Q, FieldDoesNotExist
from django.utils.translation import ugettext_lazy as _
from rest_framework.authentication import SessionAuthentication
from rest_framework.decorators import detail_route
from rest_framework.permissions import IsAuthenticated, IsAdminUser
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import generics, status, mixins, viewsets
from peripteras.kiosks.models import Kiosk, Item
from peripteras.public.api.serializers import KioskSerializer, ItemkSerializer
from peripteras.users.api.serializers import BasketSerializer
from peripteras.common.mixins import FilterMixin
class AddToBasket(FilterMixin, generics.CreateAPIView):
"""
Endpoint for adding items to basket
http://localhost:8001/user/api/basket/add/
"""
# authentication_classes = (SessionAuthentication, )
# permission_classes = (IsAuthenticated, )
# queryset = Item.objects.all()
serializer_class = ItemkSerializer
def get(self, request):
item_id = request.GET.get("item_id", None)
kiosk_id = request.GET.get("kiosk_id", None)
# basket = request.session.get('basket', None)
kiosk_ids = []
orders_holder = request.session.get('orders_holder', None)
if item_id and kiosk_id:
if orders_holder:
# users has at least one order
for order in orders_holder:
kiosk_ids.append(order['kiosk'])
if kiosk_id in kiosk_ids:
for order in orders_holder:
if order['kiosk'] == kiosk_id:
# 'this kiosk has items, add item to item list'
order['items'].append(item_id)
orders_holder_tmp = request.session.get(
'orders_holder', None)
orders_holder_tmp.append(order)
request.session[
'orders_holder'] = orders_holder_tmp
# remove this order from session
orders_holder.remove(order)
data = {
'msg': 'Προστέθηκε στο καλάθι'
}
return Response(data, status=status.HTTP_200_OK)
else:
# create new order for new kiosk
items_list = [item_id]
order = {
'kiosk': kiosk_id,
'items': items_list
}
tmp_orders_holder = orders_holder
tmp_orders_holder.append(order)
request.session['orders_holder'] = tmp_orders_holder
data = {
'msg': 'Νέα παραγελία. Μπήκε στο καλάθι'
}
return Response(data, status=status.HTTP_200_OK)
else:
# init the orders sesion holder
request.session['orders_holder'] = []
# create an order dict
items_list = [item_id]
order = {
'kiosk': kiosk_id,
'items': items_list
}
tmp_orders_holder = request.session.get('orders_holder', None)
tmp_orders_holder.append(order)
request.session['orders_holder'] = tmp_orders_holder
data = {
'msg': 'Μπήκε το πρώτο αντικείμενο στο καλάθι'
}
return Response(data, status=status.HTTP_200_OK)
else:
data = {
'msg': 'error no kiosk item id'
}
return Response(data, status=status.HTTP_406_NOT_ACCEPTABLE)
# order = {
# 'kiosk':kiosk_id,
# 'items':basket
# }
# # order_init = ['order1','order2']
# # request.session['orders'] = order_init
# tmp_orders = request.session.get('orders', None)
# tmp_orders.append(order)
# request.session['orders'] = tmp_orders
# item_to_add = Item.objects.get(id=item_id)
# if basket:
# basket = request.session['basket']
# basket.append(item_to_add.id)
# request.session['basket'] = basket
# else:
# basket = []
# basket.append(item_to_add.id)
# request.session['basket'] = basket
data = {
'msg': 'whaat'
}
return Response(data, status=status.HTTP_200_OK)
class GetFromBasket(FilterMixin, generics.CreateAPIView):
"""
Endpoint for fetching the basket
http://localhost:8001/user/api/basket/get/
"""
# authentication_classes = (SessionAuthentication, )
# permission_classes = (IsAuthenticated, )
# queryset = Item.objects.all()
serializer_class = BasketSerializer
def get(self, request):
kiosk_id = request.GET.get("kiosk_id", None)
orders_holder = request.session.get('orders_holder', None)
basket_items_ids = None
if orders_holder:
for order in orders_holder:
if order['kiosk'] == kiosk_id:
basket_items_ids = order['items']
# else:
# data = {
# 'msg':'Άδειο καλάθι'
# }
# return Response(data, status=status.HTTP_200_OK)
# basket_items_ids = request.session.get('basket', None)
kiosk = Kiosk.objects.get(id=kiosk_id)
basket_items = []
ziped_data = []
total_price = 0
if basket_items_ids:
for it_id in basket_items_ids:
tmp_item = Item.objects.get(id=it_id)
total_price += tmp_item.price
basket_items.append(tmp_item)
fee = Item()
fee.price = kiosk.delivery_fee
fee.title = _(u'Έξοδα μεταφοράς')
fee.id = False
basket_items.append(fee)
unique_items = Counter(basket_items)
ziped_data = zip(unique_items.keys(), unique_items.values())
ziped_data = self.apply_filters(request, ziped_data)
serializer = self.serializer_class(ziped_data, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class RemoveFromBasket(FilterMixin, generics.CreateAPIView):
"""
Endpoint for removing items from basket
http://localhost:8001/user/api/basket/remove/
"""
# authentication_classes = (SessionAuthentication, )
# permission_classes = (IsAuthenticated, )
# queryset = Item.objects.all()
serializer_class = ItemkSerializer
def get(self, request):
item_id = request.GET.get("item_id", None)
kiosk_id = request.GET.get("kiosk_id", None)
orders_holder = request.session.get('orders_holder', None)
basket = request.session.get('basket', None)
# if item_id:
# item_id = int(item_id)
# Here we get unicode, convert it to int to find inbasket type = (list)
if orders_holder:
for order in orders_holder:
if order['kiosk'] == kiosk_id:
if item_id not in order['items']:
data = {
'msg': 'Δεν ηταν στο καλάθι'
}
return Response(data, status=status.HTTP_200_OK)
order['items'].remove(item_id)
orders_holder_tmp = request.session.get(
'orders_holder', None)
orders_holder_tmp.append(order)
request.session['orders_holder'] = orders_holder_tmp
# remove this order from session
orders_holder.remove(order)
data = {
'msg': 'Aφαιρέθηκε από το καλάθι'
}
return Response(data, status=status.HTTP_200_OK)
else:
data = {
'msg': 'Άδειο καλάθι'
}
return Response(data, status=status.HTTP_200_OK)
| StarcoderdataPython |
3393450 | <reponame>DaoDaoer/PaddleSeg
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import os.path as osp
from . import colorMap
class Label:
def __init__(self, idx=None, name=None, color=None):
self.idx = idx
self.name = name
self.color = color
def __repr__(self):
return f"{self.idx} {self.name} {self.color}"
class LabelList(object):
def __init__(self, labels: dict = None):
self.labelList = []
if labels is not None:
for lab in labels:
color = lab.get("color", colorMap.get_color())
self.add(lab["id"], lab["name"], color)
def add(self, idx, name, color):
self.labelList.append(Label(idx, name, color))
def remove(self, index):
for idx, lab in enumerate(self.labelList):
if lab.idx == index:
del self.labelList[idx]
break
# del self.labelList[index]
def clear(self):
self.labelList = []
def toint(self, seq):
if isinstance(seq, list):
for i in range(len(seq)):
try:
seq[i] = int(seq[i])
except ValueError:
pass
else:
seq = int(seq)
return seq
def importLabel(self, path):
if not osp.exists(path):
return []
with open(path, "r", encoding="utf-8") as f:
labels = f.readlines()
labelList = []
for lab in labels:
lab = lab.replace("\n", "").strip(" ").split(" ")
if len(lab) != 2 and len(lab) != 5:
print(f"{lab} 标签不合法")
continue
label = Label(self.toint(lab[0]), str(lab[1]), self.toint(lab[2:]))
labelList.append(label)
self.labelList = labelList
def exportLabel(self, path):
if not path or not osp.exists(osp.dirname(path)):
print("label path don't exist")
return
with open(path, "w", encoding="utf-8") as f:
for label in self.labelList:
print(label.idx, end=" ", file=f)
print(label.name, end=" ", file=f)
for idx in range(3):
print(label.color[idx], end=" ", file=f)
print(file=f)
def getLabelById(self, labelIdx):
for lab in self.labelList:
if lab.idx == labelIdx:
return lab
def __repr__(self):
return str(self.labelList)
def __getitem__(self, index):
return self.labelList[index]
def __len__(self):
return len(self.labelList)
@property
def colors(self):
cols = []
for lab in self.labelList:
cols.append(lab.color)
return cols
| StarcoderdataPython |
4810460 | import watertank
import burner
import shac
ha = shac.comp([watertank.watertank, burner.burner])
shac.compile(ha, COMPOSED=True, ABOF=True)
| StarcoderdataPython |
1739910 | <reponame>cenkalti/hcloud-python
# -*- coding: utf-8 -*-
from hcloud.actions.client import BoundAction
from hcloud.core.client import BoundModelBase, ClientEntityBase, GetEntityByNameMixin
from hcloud.core.domain import add_meta_to_result
from hcloud.images.domain import Image
class BoundImage(BoundModelBase):
model = Image
def __init__(self, client, data):
from hcloud.servers.client import BoundServer
created_from = data.get("created_from")
if created_from is not None:
data['created_from'] = BoundServer(client._client.servers, created_from, complete=False)
bound_to = data.get("bound_to")
if bound_to is not None:
data['bound_to'] = BoundServer(client._client.servers, {"id": bound_to}, complete=False)
super(BoundImage, self).__init__(client, data)
def get_actions_list(self, sort=None, page=None, per_page=None, status=None):
# type: (Optional[List[str]], Optional[int], Optional[int], Optional[List[str]]) -> PageResult[BoundAction, Meta]
"""Returns a list of action objects for the image.
:param status: List[str] (optional)
Response will have only actions with specified statuses. Choices: `running` `success` `error`
:param sort: List[str] (optional)
Specify how the results are sorted. Choices: `id` `id:asc` `id:desc` `command` `command:asc` `command:desc` `status` `status:asc` `status:desc` `progress` `progress:asc` `progress:desc` `started` `started:asc` `started:desc` `finished` `finished:asc` `finished:desc`
:param page: int (optional)
Specifies the page to fetch
:param per_page: int (optional)
Specifies how many results are returned by page
:return: (List[:class:`BoundAction <hcloud.actions.client.BoundAction>`], :class:`Meta <hcloud.core.domain.Meta>`)
"""
return self._client.get_actions_list(self, sort=sort, page=page, per_page=per_page, status=status)
def get_actions(self, sort=None, status=None):
# type: (Optional[List[str]], Optional[List[str]]) -> List[BoundAction]
"""Returns all action objects for the image.
:param status: List[str] (optional)
Response will have only actions with specified statuses. Choices: `running` `success` `error`
:param sort: List[str] (optional)
Specify how the results are sorted. Choices: `id` `id:asc` `id:desc` `command` `command:asc` `command:desc` `status` `status:asc` `status:desc` `progress` `progress:asc` `progress:desc` `started` `started:asc` `started:desc` `finished` `finished:asc` `finished:desc`
:return: List[:class:`BoundAction <hcloud.actions.client.BoundAction>`]
"""
return self._client.get_actions(self, status=status, sort=sort)
def update(self, description=None, type=None, labels=None):
# type: (Optional[str], Optional[Dict[str, str]]) -> BoundImage
"""Updates the Image. You may change the description, convert a Backup image to a Snapshot Image or change the image labels.
:param description: str (optional)
New description of Image
:param type: str (optional)
Destination image type to convert to
Choices: snapshot
:param labels: Dict[str, str] (optional)
User-defined labels (key-value pairs)
:return: :class:`BoundImage <hcloud.images.client.BoundImage>`
"""
return self._client.update(self, description, type, labels)
def delete(self):
# type: () -> bool
"""Deletes an Image. Only images of type snapshot and backup can be deleted.
:return: bool
"""
return self._client.delete(self)
def change_protection(self, delete=None):
# type: (Optional[bool]) -> BoundAction
"""Changes the protection configuration of the image. Can only be used on snapshots.
:param delete: bool
If true, prevents the snapshot from being deleted
:return: :class:`BoundAction <hcloud.actions.client.BoundAction>`
"""
return self._client.change_protection(self, delete)
class ImagesClient(ClientEntityBase, GetEntityByNameMixin):
results_list_attribute_name = 'images'
def get_actions_list(self,
image, # type: Image
sort=None, # type: Optional[List[str]]
page=None, # type: Optional[int]
per_page=None, # type: Optional[int]
status=None, # type: Optional[List[str]]
):
# type: (...) -> PageResults[List[BoundAction], Meta]
"""Returns a list of action objects for an image.
:param image: :class:`BoundImage <hcloud.images.client.BoundImage>` or :class:`Image <hcloud.images.domain.Image>`
:param status: List[str] (optional)
Response will have only actions with specified statuses. Choices: `running` `success` `error`
:param sort: List[str] (optional)
Specify how the results are sorted. Choices: `id` `id:asc` `id:desc` `command` `command:asc` `command:desc` `status` `status:asc` `status:desc` `progress` `progress:asc` `progress:desc` `started` `started:asc` `started:desc` `finished` `finished:asc` `finished:desc`
:param page: int (optional)
Specifies the page to fetch
:param per_page: int (optional)
Specifies how many results are returned by page
:return: (List[:class:`BoundAction <hcloud.actions.client.BoundAction>`], :class:`Meta <hcloud.core.domain.Meta>`)
"""
params = {}
if sort is not None:
params["sort"] = sort
if status is not None:
params["status"] = status
if page is not None:
params["page"] = page
if per_page is not None:
params["per_page"] = per_page
response = self._client.request(url="/images/{image_id}/actions".format(image_id=image.id), method="GET", params=params)
actions = [BoundAction(self._client.actions, action_data) for action_data in response['actions']]
return add_meta_to_result(actions, response, 'actions')
def get_actions(self,
image, # type: Image
sort=None, # type: Optional[List[str]]
status=None, # type: Optional[List[str]]
):
# type: (...) -> List[BoundAction]
"""Returns all action objects for an image.
:param image: :class:`BoundImage <hcloud.images.client.BoundImage>` or :class:`Image <hcloud.images.domain.Image>`
:param status: List[str] (optional)
Response will have only actions with specified statuses. Choices: `running` `success` `error`
:param sort: List[str] (optional)
Specify how the results are sorted. Choices: `id` `command` `status` `progress` `started` `finished` . You can add one of ":asc", ":desc" to modify sort order. ( ":asc" is default)
:return: List[:class:`BoundAction <hcloud.actions.client.BoundAction>`]
"""
return super(ImagesClient, self).get_actions(image, sort=sort, status=status)
def get_by_id(self, id):
# type: (int) -> BoundImage
"""Get a specific Image
:param id: int
:return: :class:`BoundImage <hcloud.images.client.BoundImage
"""
response = self._client.request(url="/images/{image_id}".format(image_id=id), method="GET")
return BoundImage(self, response['image'])
def get_list(self,
name=None, # type: Optional[str]
label_selector=None, # type: Optional[str]
bound_to=None, # type: Optional[List[str]]
type=None, # type: Optional[List[str]]
sort=None, # type: Optional[List[str]]
page=None, # type: Optional[int]
per_page=None, # type: Optional[int]
status=None # type: Optional[List[str]]
):
# type: (...) -> PageResults[List[BoundImage]]
"""Get all images
:param name: str (optional)
Can be used to filter images by their name.
:param label_selector: str (optional)
Can be used to filter servers by labels. The response will only contain servers matching the label selector.
:param bound_to: List[str] (optional)
Server Id linked to the image. Only available for images of type backup
:param type: List[str] (optional)
Choices: system snapshot backup
:param status: List[str] (optional)
Can be used to filter images by their status. The response will only contain images matching the status.
:param sort: List[str] (optional)
Choices: id id:asc id:desc name name:asc name:desc created created:asc created:desc
:param page: int (optional)
Specifies the page to fetch
:param per_page: int (optional)
Specifies how many results are returned by page
:return: (List[:class:`BoundImage <hcloud.images.client.BoundImage>`], :class:`Meta <hcloud.core.domain.Meta>`)
"""
params = {}
if name is not None:
params['name'] = name
if label_selector is not None:
params['label_selector'] = label_selector
if bound_to is not None:
params['bound_to'] = bound_to
if type is not None:
params['type'] = type
if sort is not None:
params['sort'] = sort
if page is not None:
params['page'] = page
if per_page is not None:
params['per_page'] = per_page
if status is not None:
params['status'] = per_page
response = self._client.request(url="/images", method="GET", params=params)
images = [BoundImage(self, image_data) for image_data in response['images']]
return self._add_meta_to_result(images, response)
def get_all(self,
name=None, # type: Optional[str]
label_selector=None, # type: Optional[str]
bound_to=None, # type: Optional[List[str]]
type=None, # type: Optional[List[str]]
sort=None, # type: Optional[List[str]]
status=None, # type: Optional[List[str]]
):
# type: (...) -> List[BoundImage]
"""Get all images
:param name: str (optional)
Can be used to filter images by their name.
:param label_selector: str (optional)
Can be used to filter servers by labels. The response will only contain servers matching the label selector.
:param bound_to: List[str] (optional)
Server Id linked to the image. Only available for images of type backup
:param type: List[str] (optional)
Choices: system snapshot backup
:param status: List[str] (optional)
Can be used to filter images by their status. The response will only contain images matching the status.
:param sort: List[str] (optional)
Choices: id name created (You can add one of ":asc", ":desc" to modify sort order. ( ":asc" is default))
:return: List[:class:`BoundImage <hcloud.images.client.BoundImage>`]
"""
return super(ImagesClient, self).get_all(name=name, label_selector=label_selector, bound_to=bound_to, type=type, sort=sort, status=status)
def get_by_name(self, name):
# type: (str) -> BoundImage
"""Get image by name
:param name: str
Used to get image by name.
:return: :class:`BoundImage <hcloud.images.client.BoundImage>`
"""
return super(ImagesClient, self).get_by_name(name)
def update(self, image, description=None, type=None, labels=None):
# type:(Image, Optional[str], Optional[str], Optional[Dict[str, str]]) -> BoundImage
"""Updates the Image. You may change the description, convert a Backup image to a Snapshot Image or change the image labels.
:param image: :class:`BoundImage <hcloud.images.client.BoundImage>` or :class:`Image <hcloud.images.domain.Image>`
:param description: str (optional)
New description of Image
:param type: str (optional)
Destination image type to convert to
Choices: snapshot
:param labels: Dict[str, str] (optional)
User-defined labels (key-value pairs)
:return: :class:`BoundImage <hcloud.images.client.BoundImage>`
"""
data = {}
if description is not None:
data.update({"description": description})
if type is not None:
data.update({"type": type})
if labels is not None:
data.update({"labels": labels})
response = self._client.request(url="/images/{image_id}".format(image_id=image.id), method="PUT", json=data)
return BoundImage(self, response['image'])
def delete(self, image):
# type: (Image) -> bool
"""Deletes an Image. Only images of type snapshot and backup can be deleted.
:param :class:`BoundImage <hcloud.images.client.BoundImage>` or :class:`Image <hcloud.images.domain.Image>`
:return: bool
"""
self._client.request(url="/images/{image_id}".format(image_id=image.id), method="DELETE")
# Return allays true, because the API does not return an action for it. When an error occurs a APIException will be raised
return True
def change_protection(self, image, delete=None):
# type: (Image, Optional[bool], Optional[bool]) -> BoundAction
"""Changes the protection configuration of the image. Can only be used on snapshots.
:param image: :class:`BoundImage <hcloud.images.client.BoundImage>` or :class:`Image <hcloud.images.domain.Image>`
:param delete: bool
If true, prevents the snapshot from being deleted
:return: :class:`BoundAction <hcloud.actions.client.BoundAction>`
"""
data = {}
if delete is not None:
data.update({"delete": delete})
response = self._client.request(url="/images/{image_id}/actions/change_protection".format(image_id=image.id), method="POST", json=data)
return BoundAction(self._client.actions, response['action'])
| StarcoderdataPython |
101515 | <filename>sug-blog/unit4/account.py
import handler.handler as handler
import util.security as security
import util.validator as validator
from google.appengine.ext import db
class AccountHandler(handler.TemplateHandler):
"""
AccountHandler inherits from the hander.TemplateHandler class.
It gives users the possibility to signup for, or log into, an account.
"""
def get(self):
self.render("accounts.html")
class SignupHandler(handler.TemplateHandler):
"""
SignupHandler inherits from the hander.TemplateHandler class.
It aggregates methods that offer users the possibility to create and persist an account.
"""
def get(self):
self.render("signup.html")
def post(self):
username = self.request.get('username')
password = self.request.get('password')
cfpassword = self.request.get('verify')
user_email = self.request.get('email')
comments = self.request.get('text')
if validator.validate_user(username, password, cfpassword, user_email):
# check if the input username is unique
query = db.Query(Account)
query.filter('username =', username)
known_account = query.get()
if known_account:
# ask the user to choose a different username
username_error = "Username already exists"
self.render("signup.html", username=username, username_error=username_error)
else:
# create and persist an account for the user
hashed_password = security.hash_password(password)
new_account = Account(username=username, password=<PASSWORD>, email=user_email, comments=comments)
key = new_account.put()
self.response.headers['Content-Type'] = 'text/plain'
# set a cookie with the username
user_cookie = security.make_secure_val(username)
self.response.set_cookie('user', str(user_cookie), max_age=3600, path='/')
self.response.set_cookie('account_key', str(key), max_age=360, path='/')
self.redirect('/account_created')
else:
username_error = ""
password_error = ""
cfpassword_error = ""
email_error = ""
if not validator.is_username_valid(username):
username_error = "Invalid username!"
if not validator.is_password_valid(password):
password_error = "Invalid password!"
if not validator.is_cfpassword_valid(password, cfpassword):
cfpassword_error = "Your passwords don't match!"
if not validator.is_email_valid(user_email):
email_error = "Invalid email!"
self.render("signup.html",
username=username,
username_error=username_error,
password_error=password_error,
cfpassword_error=cfpassword_error,
email=user_email,
email_error=email_error,
comments=comments)
class WelcomeHandler(handler.TemplateHandler):
"""
WelcomeHandler inherits from the handler.TemplateHandler class.
Its sole purpose is to confirm user account signup, or redirect
them to the signup page if the input information isn't valid.
"""
def get(self):
user_cookie = self.request.cookies.get('user')
account_key = self.request.cookies.get('account_key')
# retrieve username from cookie
if user_cookie:
username = security.check_secure_val(user_cookie)
# retrieve user account by their username from the datastore
query = db.Query(Account)
query.filter('username =', username)
account = query.get()
# if the above attempt doesn't work, try retrieving by the account key
if not account:
if account_key:
account_key = db.Key(account_key)
account = db.get(account_key)
# make sure we have a valid account before proceeding
if account:
self.render("account.html", username=account.username,
email=account.email, creation_date=account.created, comments=account.comments)
else:
if user_cookie:
# in case the user couldn't be found but a cookie with a username had been set
self.response.unset_cookie('user')
if account_key:
# in case the user couldn't be found but a cookie with an account key had been set
self.response.unset_cookie('account_key')
username_error = "Unknown username: " + username
self.render("login.html", username_error=username_error)
else:
self.redirect('/account_signup')
def post(self):
# when the logout button is clicked, redirect to the logout page
self.redirect('/account_logout')
class LoginHandler(handler.TemplateHandler):
"""
LoginHandler inherits from the handler.TemplateHandler class.
It aggregates methods that let users sign into and view their account information.
"""
def get(self):
self.render("login.html")
def post(self):
username = self.request.get('username')
password = self.request.get('password')
if username and password:
# retrieve user from the datastore
query = db.Query(Account)
query.filter('username =', username)
account = query.get()
if account:
# verify input password
if security.check_password(password, account.password):
self.response.headers['Content-Type'] = 'text/plain'
# set a cookie with the username
user_cookie = security.make_secure_val(account.username)
self.response.set_cookie('user', str(user_cookie), max_age=3600, path='/')
self.redirect("/account_created")
else:
# the input password is not valid
message = "Invalid password!"
self.render("login.html", password_error=message)
else:
# the input username is unknown
message = "Invalid username!"
self.render("login.html", username_error=message)
else:
# no username or password were input
self.render("login.html", username_error="Please input valid usename!",
password_error="<PASSWORD>!")
class LogoutHandler(handler.TemplateHandler):
"""
LoginHandler inherits from the handler.TemplateHandler class.
It allows users to sign out of their accounts.
"""
def get(self):
# clear out any account cookie that might have been set
self.response.headers.add_header('Set-Cookie', 'user=%s; Path=/' % str())
self.response.headers.add_header('Set-Cookie', 'account_key=%s; Path=/' % str())
# all we ever do from here is go back to the general accounts page
self.redirect('/account')
class Account(db.Model):
"""
This class inherits from the GAE db.Model (entity) class, and represents a user account.
A user account is made of the following properties:
- username : the username chosen by the user
- password : the <PASSWORD> of the password chosen by the user
- email : the user's email address
- comments : the user's comments upon account creation
- created : creation date and time of user account
"""
username = db.StringProperty(required=True, indexed=True)
password = db.StringProperty(required=True)
email = db.StringProperty()
comments = db.TextProperty()
created = db.DateTimeProperty(auto_now_add=True)
| StarcoderdataPython |
3350455 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# PythonTidyWrapper.py
# 2007 Mar 06 . ccr
# 2010 Sep 08 . ccr . Add JAVA_STYLE_LIST_DEDENT.
# 2010 Mar 16 . ccr . Add KEEP_UNASSIGNED_CONSTANTS and PARENTHESIZE_TUPLE_DISPLAY.
# 2007 May 25 . ccr . Changed MAX_SEPS. Add WRAP_DOC_STRINGS and DOC_TAB_REPLACEMENT.
# 2007 May 01 . ccr . Added SINGLE_QUOTED_STRINGS.
"""Wrap PythonTidy.py with a configuration file.
The name of the file containing the input Python script may be
supplied. If it is \"-\" or omitted, the input will be read from
standard input.
The name of a file to contain the output Python script may be
supplied. If it is \"-\" or omitted, the output will be written to
standard output.
"""
from __future__ import division
import sys
import os
import optparse
PY_VER = sys.version
if PY_VER[:3] in ['2.5', '2.6', '2.7']: # 2009 Oct 26
import xml.etree.ElementTree
ElementTree = xml.etree.ElementTree
else:
import elementtree.ElementTree # from http://effbot.org/zone/element-index.htm
ElementTree = elementtree.ElementTree
import PythonTidy
ZERO = 0
SPACE = ' '
NULL = ''
NA = -1
class XmlFile(ElementTree.ElementTree):
"""XML document.
"""
def __init__(
self,
file=None,
tag='global',
**extra
):
if isinstance(file, basestring):
file = os.path.expanduser(file)
if file is None:
top_level_elt = XmlList(tag=tag, **extra)
top_level_elt.text = top_level_elt.tail = '\n'
ElementTree.ElementTree.__init__(self,
element=top_level_elt)
else:
ElementTree.ElementTree.__init__(self)
self.parse(source=file,
parser=ElementTree.XMLTreeBuilder(target=ElementTree.TreeBuilder(XmlList)))
return
def count(self, tag=None):
return self.getroot().count(tag=tag)
def write(self, file):
if isinstance(file, basestring):
file = os.path.expanduser(file)
return ElementTree.ElementTree.write(self, file)
def append(self, xml_elt):
return self.getroot().append(xml_elt)
def sort(self, tag=None, key_name='id'):
return self.getroot().sort(tag=tag, key_name=key_name)
def index(self, tag=None, key_name='id'):
return self.getroot().index(tag=tag, key_name=key_name)
class XmlElt(ElementTree._ElementInterface):
"""XML element with attrib, text, and tail.
"""
def __init__(
self,
tag,
attrib={},
**extra
):
attrib = attrib.copy()
attrib.update(extra)
ElementTree._ElementInterface.__init__(self, tag=tag,
attrib=attrib)
return
def __str__(self):
return ElementTree.tostring(self)
class XmlList(XmlElt):
"""Subclass an XML eltement to perform summary statistics on and
retrieve lists (or dicts) of its children.
"""
def count(self, tag=None):
result = ZERO
for child in self:
if tag in [None, child.tag]:
result += 1
return result
def sort(self, tag=None, key_name='id'):
result = [(child.attrib[key_name], child) for child in self
if tag in [None, child.tag]]
result.sort()
return result
def index(self, tag=None, key_name='id'):
result = {}
for child in self:
if tag in [None, child.tag]:
insert(result, child.attrib[key_name], child)
return result
class Config(XmlFile):
"""Configuration parameters.
"""
def __init__(
self,
file=None,
tag='config',
**extra
):
XmlFile.__init__(self, file=file, tag=tag, **extra)
self.root = self.getroot()
return
def get_global(self, name):
return getattr(PythonTidy, name)
def set_global(self, name, value):
setattr(PythonTidy, name, value)
return self
def from_pythontidy_namespace(self):
repertoire = [
('COL_LIMIT', 'Width of output lines in characters.', 'int'
),
('INDENTATION', 'String used to indent lines.'),
('ASSIGNMENT',
'This is how the assignment operator is to appear.'),
('FUNCTION_PARAM_ASSIGNMENT',
'... but this is how function-parameter assignment should appear.'
),
('FUNCTION_PARAM_SEP',
'This is how function parameters are separated.'),
('LIST_SEP', '... and this is how list items are separated.'
),
('SUBSCRIPT_SEP',
'... and this is how subscripts are separated.'),
('DICT_COLON', 'This separates dictionary keys from values.'
),
('SLICE_COLON',
'... but this separates the start:end indices of slices.'
),
('COMMENT_PREFIX',
'This is the sentinel that marks the beginning of a commentary string.'
),
('SHEBANG',
'Hashbang, a line-one comment naming the Python interpreter to Unix shells.'
),
('CODING', 'The output character encoding (codec).'),
('CODING_SPEC',
"""Source file encoding.
The %s in the value (if any) is replaced by the value of CODING.""",
'replace', 'CODING'),
('BOILERPLATE',
"""Standard code block (if any).
This is inserted after the module doc string on output."""),
('BLANK_LINE',
'This is how a blank line is to appear (up to the newline character).'
),
('KEEP_BLANK_LINES',
'If true, preserve one blank where blank(s) are encountered.'
, 'bool'),
('ADD_BLANK_LINES_AROUND_COMMENTS',
'If true, set off comment blocks with blanks.', 'bool'),
('ADD_BLANK_LINE_AFTER_DOC_STRING',
'If true, add blank line after doc strings.', 'bool'),
('MAX_SEPS_FUNC_DEF',
'Split lines containing longer function definitions.',
'int'),
('MAX_SEPS_FUNC_REF',
'Split lines containing longer function calls.', 'int'),
('MAX_SEPS_SERIES',
'Split lines containing longer lists or tuples.', 'int'),
('MAX_SEPS_DICT',
'Split lines containing longer dictionary definitions.',
'int'),
('MAX_LINES_BEFORE_SPLIT_LIT',
'Split string literals containing more newline characters.'
, 'int'),
('LEFT_MARGIN', 'This is how the left margin is to appear.'
),
('NORMALIZE_DOC_STRINGS',
'If true, normalize white space in doc strings.', 'bool'),
('LEFTJUST_DOC_STRINGS',
'If true, left justify doc strings.', 'bool'),
('WRAP_DOC_STRINGS',
'If true, wrap doc strings to COL_LIMIT.', 'bool'),
('LEFTJUST_COMMENTS',
'If true, left justify comments.', 'bool'),
('WRAP_COMMENTS',
'If true, wrap comments to COL_LIMIT.', 'bool'),
('DOUBLE_QUOTED_STRINGS',
'If true, use quotes instead of apostrophes for string literals.'
, 'bool'),
('SINGLE_QUOTED_STRINGS',
'If true, use apostrophes instead of quotes for string literals.'
, 'bool'),
('RECODE_STRINGS',
"""If true, try to decode strings.
Attempt to use the character encoding specified in the input (if any).""",
'bool'),
('OVERRIDE_NEWLINE',
"""This is how the newline sequence should appear.
Normally, the first thing that looks like a newline
sequence on input is captured and used at the end of every
line of output. If this is not satisfactory, the desired
output newline sequence may be specified here."""),
('CAN_SPLIT_STRINGS',
'If true, longer strings are split at the COL_LIMIT.',
'bool'),
('DOC_TAB_REPLACEMENT',
'This literal replaces tab characters in doc strings and comments.'
),
('KEEP_UNASSIGNED_CONSTANTS',
"""Optionally preserve unassigned constants so that code to be tidied
may contain blocks of commented-out lines that have been no-op'ed
with leading and trailing triple quotes. Python scripts may declare
constants without assigning them to a variables, but PythonTidy
considers this wasteful and normally elides them.""",
'bool'),
('PARENTHESIZE_TUPLE_DISPLAY',
"""Optionally omit parentheses around tuples, which are superfluous
after all. Normal PythonTidy behavior will be still to include them
as a sort of tuple display analogous to list displays, dict
displays, and yet-to-come set displays.""",
'bool'),
('JAVA_STYLE_LIST_DEDENT',
"""When PythonTidy splits longer lines because MAX_SEPS
are exceeded, the statement normally is closed before the margin is
restored. The closing bracket, brace, or parenthesis is placed at the
current indent level. This looks ugly to \"C\" programmers. When
JAVA_STYLE_LIST_DEDENT is True, the closing bracket, brace, or
parenthesis is brought back left to the indent level of the enclosing
statement.""",
'bool'),
]
for parm in repertoire:
self.set_parm_from_namespace(*parm)
repertoire = [
('LOCAL_NAME_SCRIPT',
"""The following are name-transformation functions used
on output to filter the local-variable names."""
),
('GLOBAL_NAME_SCRIPT',
"""The following are name-transformation functions used
on output to filter the global-variable names."""
),
('CLASS_NAME_SCRIPT',
"""The following are name-transformation functions used
on output to filter class names."""
),
('FUNCTION_NAME_SCRIPT',
"""The following are name-transformation functions used
on output to filter function names."""
),
('FORMAL_PARAM_NAME_SCRIPT',
"""The following are name-transformation functions used
on output to filter function-parameter names."""
),
('ATTR_NAME_SCRIPT',
"""The following are name-transformation functions used
on output to filter class-attribute names."""
),
]
for parm in repertoire:
self.set_script_from_namespace(*parm)
for parm in PythonTidy.SUBSTITUTE_FOR.iteritems():
self.set_substitutions_from_namespace(*parm)
return self
def set_parm_from_namespace(
self,
name,
desc,
type=None,
replacement=None,
):
value = self.get_global(name)
if type is None:
if value is None:
value = 'None'
elif type == 'int':
value = 'int(%s)' % value
elif type == 'bool':
value = 'bool(%s)' % value
elif type == 'replace':
target = self.get_global(replacement)
value = value.replace(target, '%s')
value = 'str.replace(%s, "%%s", PythonTidy.%s)' \
% (repr(value), replacement)
else:
raise NotImplementedError
elt = XmlList(tag='parm', name=name, value=value)
elt.tail = '''
%s
''' % desc.strip()
self.append(elt)
return self
def set_script_from_namespace(self, name, desc):
group = XmlList(tag='script', name=name)
group.text = '''
%s
''' % desc.strip()
group.tail = '''
'''
value = self.get_global(name)
if value is None:
pass
else:
for function in value:
elt = XmlList(tag='xform', name=function.__name__)
elt.tail = '\n'
group.append(elt)
self.append(group)
return self
def set_substitutions_from_namespace(self, target, replacement):
elt = XmlList(tag='substitute', target=target,
replacement=replacement)
elt.tail = '\n'
self.append(elt)
return self
def to_pythontidy_namespace(self):
for elt in self.root.findall('parm'):
self.get_parm_to_namespace(elt)
for elt in self.root.findall('script'):
self.get_script_to_namespace(elt)
substitutions = self.root.findall('substitute')
if substitutions:
PythonTidy.SUBSTITUTE_FOR = {}
for elt in substitutions:
self.get_substitutions_to_namespace(elt)
return self
def get_parm_to_namespace(self, elt):
name = elt.attrib['name']
value = elt.attrib['value']
if value.startswith('int('):
value = eval(value)
elif value.startswith('bool('):
value = eval(value)
elif value.startswith('str.replace('):
value = eval(value)
elif value == 'None':
value = None
self.set_global(name, value)
return self
def get_script_to_namespace(self, group):
name = group.attrib['name']
result = []
self.set_global(name, result)
for elt in group.findall('xform'):
name = elt.attrib['name']
result.append(self.get_global(name))
return self
def get_substitutions_to_namespace(self, elt):
target = elt.attrib['target']
replacement = elt.attrib['replacement']
PythonTidy.SUBSTITUTE_FOR[target] = replacement
return self
def main():
PARSER = optparse.OptionParser(usage='%prog [options] [input [output]]'
, description=__doc__)
PARSER.add_option('-u', '--ini_file',
help='''Read configuration parameters from an ini_file.'''
, default=None)
PARSER.add_option('-U', '--dump',
help='''Dump default PythonTidy configuration parameters out to a file.'''
, default=None)
(OPTS, ARGS) = PARSER.parse_args()
if len(ARGS) > 2:
PARSER.error('At most, only two arguments are allowed.')
if len(ARGS) > 1:
FILE_OUTPUT = ARGS[1]
else:
FILE_OUTPUT = '-'
if FILE_OUTPUT in ['-']:
FILE_OUTPUT = sys.stdout
if len(ARGS) > ZERO:
FILE_INPUT = ARGS[ZERO]
else:
FILE_INPUT = '-'
if FILE_INPUT in ['-']:
FILE_INPUT = sys.stdin
if OPTS.dump is None:
pass
else:
CONFIG = Config()
CONFIG.from_pythontidy_namespace()
CONFIG.write(file=OPTS.dump)
sys.exit('Dump complete!')
if OPTS.ini_file is None:
pass
else:
CONFIG = Config(file=OPTS.ini_file)
CONFIG.to_pythontidy_namespace()
del CONFIG
PythonTidy.tidy_up(FILE_INPUT, FILE_OUTPUT)
if __name__ == "__main__":
main()
# Fin
| StarcoderdataPython |
107447 | import http.server
import socketserver
# Inicializando o servidor web
# ============================================================================
PORT = 8000
HANDLER = http.server.SimpleHTTPRequestHandler
with socketserver.TCPServer(("", PORT), HANDLER) as httpd:
print("Servindo na porta:", PORT)
httpd.serve_forever()
| StarcoderdataPython |
152036 | <reponame>DanNixon/PlayMusicCL<gh_stars>100-1000
from setuptools import setup
setup(
name='playmusiccl',
version='0.6.2',
entry_points = {
'console_scripts': ['playmusiccl=playmusiccl:run'],
},
description='Text based command line client for Google Play Music',
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
'Topic :: Multimedia :: Sound/Audio :: Players',
],
keywords='google play music command line',
url='http://github.com/DanNixon/PlayMusicCL',
author='<NAME>',
author_email='<EMAIL>',
license='Apache',
packages=['playmusiccl'],
install_requires=[
'gmusicapi',
'pylast',
],
include_package_data=True,
zip_safe=False
)
| StarcoderdataPython |
3216363 | <gh_stars>10-100
from evaluation.fix_spans import _contiguous_ranges
import pandas as pd
import numpy as np
from ast import literal_eval
import os
import sys
from evaluation.metrics import f1
def get_spans_from_offsets(text, offsets):
text_spans = []
ranges = _contiguous_ranges(offsets)
for _range in ranges:
text_spans.append(text[_range[0] : _range[1] + 1])
return text_spans
test_file = "./data/tsd_test_spans.csv"
test_df = pd.read_csv(test_file)
test_df["spans"] = test_df["spans"].apply(lambda x: literal_eval(x))
output_file = "./example_table.txt"
all_preds = []
for predictions_file in sorted(os.listdir("results/test_predictions")):
with open(os.path.join("results/test_predictions", predictions_file), "r") as f:
all_preds.append(f.readlines())
# best_example_id = -1
# best_f1_std = 0
# second_example_id = -1
# second_f1_std = 0
# third_example_id = -1
# third_f1_std = 0
example_f1_variance = {}
for example_id in range(2000):
# example_id = int(sys.argv[1]) # something between 0 and 1999
f1_scores = []
example_text = test_df.iloc[example_id]["text"]
gold = test_df.iloc[example_id]["spans"]
for idx, predictions_file in enumerate(
sorted(os.listdir("results/test_predictions"))
):
# print(predictions_file)
pred = all_preds[idx][example_id]
pred = sorted(np.unique(literal_eval(pred.split("\t")[1])))
f1_scores.append(f1(pred, gold))
example_f1_variance[example_id] = np.std(f1_scores)
# if np.std(f1_scores) > best_f1_std:
# third_example_id = second_example_id
# third_f1_std = second_f1_std
# second_example_id = best_example_id
# second_f1_std = best_f1_std
# best_example_id = example_id
# best_f1_std = np.std(f1_scores)
# elif np.std(f1_scores) > second_f1_std:
# third_example_id = second_example_id
# third_f1_std = second_f1_std
# second_example_id = example_id
# second_f1_std = np.std(f1_scores)
# elif np.std(f1_scores) > third_f1_std:
# third_example_id = example_id
# third_f1_std = np.std(f1_scores)
# from scipy.stats import mode
val = np.median(np.array(list(example_f1_variance.values())))
print(val)
for k, v in example_f1_variance.items():
if v == val:
example_id = k
break
# sorted_example_f1_variance = [
# k for k, v in sorted(example_f1_variance.items(), key=lambda x: x[1], reverse=True)
# ]
# example_id = sorted_example_f1_variance[15]
example_text = test_df.iloc[example_id]["text"]
gold = test_df.iloc[example_id]["spans"]
example_spans = get_spans_from_offsets(example_text, gold)
file_names = ["text_" + str(example_id), "ground"]
spans = [example_text, example_spans]
for idx, predictions_file in enumerate(sorted(os.listdir("results/test_predictions"))):
pred = all_preds[idx][example_id]
pred = sorted(np.unique(literal_eval(pred.split("\t")[1])))
text_spans = get_spans_from_offsets(example_text, pred)
file_names.append(
predictions_file.replace("_spans-pred.txt", "").replace("original_test-", "")
)
spans.append(text_spans)
df = pd.DataFrame.from_dict({"file_names": file_names, "spans": spans})
df.to_latex(output_file, index=False)
s = df.to_markdown(tablefmt="github")
with open("example_markdown_table.md", "w") as f:
f.write(s) | StarcoderdataPython |
163170 | #!/usr/bin/env python3
import argparse
import pytest
import datetime
import dateutil
from dateutil.parser import parse
import os
legal_days_of_week="MTWRF"
def mkdir_p(newdir):
"""works the way a good mkdir should :)
- already exists, silently complete
- regular file in the way, raise an exception
- parent directory(ies) does not exist, make them as well
Source: http://code.activestate.com/recipes/82465-a-friendly-mkdir/
Note: os.makedirs() already makes all directories in the path but
raises an exception if directory already exists.
"""
if os.path.isdir(newdir):
pass
elif os.path.isfile(newdir):
raise OSError("a file with the same name as the desired " \
"dir, '%s', already exists." % newdir)
else:
head, tail = os.path.split(newdir)
if head and not os.path.isdir(head):
mkdir_p(head)
#print "_mkdir %s" % repr(newdir)
if tail:
os.mkdir(newdir)
def validate_days_of_week(days_of_week):
for c in days_of_week:
if c not in legal_days_of_week:
raise ValueError("days_of_week should contain only chars from" +
legal_days_of_week)
def validate_date(date):
result = dateutil.parser.parse(date)
return result
def validate_start_date(date):
result = validate_date(date)
sunday = 6 # see: https://docs.python.org/2/library/datetime.html
day_of_week = result.weekday()
if day_of_week != sunday:
raise ValueError("Start Date should be a Sunday")
return result
def add_weeks(start_datetime,weeks):
"""
Return a datetime that is start_datetime plus weeks into the future
Example:
start_datetime represents 2019-03-31, weeks is 0 => 2019-03-31
start_datetime represents 2019-03-31, weeks is 1 => 2019-04-07
start_datetime represents 2019-03-31, weeks is 2 => 2019-04-14
"""
days_ahead = weeks * 7
final_datetime = n_days_ahead(start_datetime, 7*weeks)
return final_datetime
def n_days_ahead(start_datetime, days):
"""
Return a datetime that is start_datetime plus days into the future
"""
delta = datetime.timedelta(days = days)
return start_datetime + delta
def days_for_this_week(start_datetime, dates):
"""
Return a tuple of len(dates) datetimes, one for each lecture day
start_datetime is the first lecture day of the week, the rest
are calculated by addings days forward according to dates
Warning: Does not account for holidays (see days_without_holidays)
"""
days_this_week = []
for d in dates:
day_num = day_to_num(d) + 1 #Plus 1 so Monday is offset 1 from Sunday
days_this_week.append(n_days_ahead(start_datetime, day_num))
return days_this_week
def days_without_holidays(days_of_week, holiday_list):
"""
Given a tuple for the two lecture days of the week, returns a list
of valid dates, removing any that clash with holidays
"""
sanitized_holidays = []
for h in holiday_list:
sanitized_holidays.append(parse(h))
sanitized_dates = []
for day in days_of_week:
if day not in sanitized_holidays:
sanitized_dates.append(day)
return sanitized_dates
def day_to_num(day):
if day == "M":
return 0
elif day == "T":
return 1
elif day == "W":
return 2
elif day == "R":
return 3
elif day == "F":
return 4
else:
raise ValueError("day_to_num should contain only chars from" +
legal_days_of_week)
def find_first_lect_datetime(start_datetime, days_of_week):
"""
Note: No longer using this function
In the case where start_datetime is not the first lecture date, this
function returns the first lecture's date
"""
first_day = days_of_week[0]
dayNum = day_to_num(first_day)
first_lect = start_datetime
while first_lect.weekday() != dayNum:
first_lect = n_days_ahead(first_lect, 1)
return first_lect
def make_date_list(start_date, weeks, days_of_week, holiday_list):
"""
return list of date strings in yyyy-mm-dd format
Example:
TODO shouldn't the 3 below be 2? As this is for 2 weeks?
make_date_list("2019-03-31",2,"MW",["2019-04-08","2019-04-09"]) =>
["2019-04-01","2019-04-03","2019-04-10"]
"""
validate_days_of_week(days_of_week)
start_datetime = validate_start_date(start_date)
final_datetimes = []
for i in range(weeks):
start_of_week_datetime = add_weeks(start_datetime,i)
days_this_week_unsanitized = days_for_this_week(start_of_week_datetime, days_of_week)
days_this_week_sanitized = days_without_holidays(days_this_week_unsanitized, holiday_list)
final_datetimes.append(days_this_week_sanitized)
return final_datetimes
def lecture_gen(path, start_date, weeks, days_of_week, holiday_list):
"""
Creates a _lectures directory in the given path with premade lecture stubs
according to the other fields: start date, number of weeks, days of the week
that the lecture is on, and a list of holidays that lectures can not be held
on.
"""
#Create path:
directory_path = os.path.join(path, "_lectures")
try:
mkdir_p(directory_path)
except OSError:
print ("Creation of the directory %s failed" % directory_path)
return
else:
print ("Successfully created the directory %s" % directory_path)
#create valid dates listing:
valid_dates = make_date_list(start_date, weeks, days_of_week, holiday_list)
lecture_num = 0
for dates_by_week in valid_dates:
for date in dates_by_week:
lecture_num += 1 #first lecture num will be 1
filename = "lecture" + str(lecture_num)
f = open(os.path.join(directory_path, filename), "w+")
f.write("---\n")
f.write("num: " + '"lect' + str(lecture_num) + '"\n')
f.write("lecture_date: " + str(date.date()) + "\n")
f.write("desc: " + '\n')
f.write("ready: " + "false\n")
f.write("pdfurl: " + "\n")
f.write("---\n")
f.close()
if __name__=="__main__":
#lecture_gen(os.getcwd(), "2019-03-31",2,"MW",["2019-04-08","2019-04-09"])
print("valid dates 1")
valid_dates = make_date_list("2019-03-31",2,"MW",["2019-04-08","2019-04-09"])
print(valid_dates)
print("valid dates 2")
valid_dates2 = make_date_list("2019-03-31",2,"MWF",["2019-04-08","2019-04-09"])
print(valid_dates2)
print("valid dates 3")
valid_dates3 = make_date_list("2019-03-31",2,"TWR",["2019-04-08","2019-04-09"])
print(valid_dates3)
| StarcoderdataPython |
1733700 | <reponame>RodrigoMSCruz/CursoEmVideo.com-Python<gh_stars>0
# Lê 6 números e mostra o somatório deles. Se for digitado um valor
# ímpar, é desconsiderado.
print('Digite 6 números para serem somados. Números ímpares serão desconsiderados.')
s = 0
c = 0
for i in range (1, 7, 1):
n = int(input('Digite um número: '))
if n % 2 == 0:
s = s + n # s =+ n
c = c + 1 # c += 1
print('O somatório de {} números pares é: {}.'.format(c, s)) | StarcoderdataPython |
3257533 | <filename>samurai/settings.py
#!/usr/bin/env python3
from os import getenv
def get_env_debug_secret_hosts():
debug = bool(getenv("DEBUG"))
if not getenv("SECRET_KEY") and not debug:
raise Exception("Won't allow you to use default secret key out of DEBUG.")
secret_key = getenv("SECRET_KEY", "<KEY>")
allowed_hosts = getenv("ALLOWED_HOSTS", "").split()
return debug, secret_key, allowed_hosts
def get_env_databases(base_dir):
try:
POSTGRES_URL = getenv("DATABASE_URL")
DB_TYPE, _, _, PG_USERNAME, PG_PASSWORD, PG_HOST, PG_PORT, PG_NAME = (
POSTGRES_URL.replace("@", ":").replace("/", ":").split(":")
)
except (ValueError, AttributeError):
DB_TYPE, PG_USERNAME, PG_PASSWORD, PG_HOST, PG_PORT, PG_NAME = [None] * 6
return {
"default": {"ENGINE": "django.db.backends.sqlite3", "NAME": base_dir / "db.sqlite3"}
if not PG_NAME or getenv("PG_NAME")
else {
"ENGINE": "django.db.backends.postgresql_psycopg2",
**{"NAME": PG_NAME, "USER": PG_USERNAME, "PASSWORD": PG_PASSWORD, "HOST": PG_HOST, "PORT": PG_PORT},
}
}
def get_env_email():
return (
getenv("EMAIL_HOST"),
getenv("EMAIL_HOST_USER"),
getenv("EMAIL_HOST_PASSWORD"),
getenv("EMAIL_PORT"),
bool(getenv("EMAIL_USE_TLS")),
)
| StarcoderdataPython |
153978 | <reponame>58563528/nonebot-hk-reporter
import nonebot
from nonebot.adapters.cqhttp import Bot as CQHTTPBot
nonebot.init(command_start=[""])
app = nonebot.get_asgi()
driver = nonebot.get_driver()
driver.register_adapter('cqhttp', CQHTTPBot)
nonebot.load_builtin_plugins()
nonebot.load_plugin('nonebot_plugin_help')
nonebot.load_plugins('src/plugins')
if __name__ == "__main__":
nonebot.run(app="bot:app")
| StarcoderdataPython |
1628242 | <reponame>nguyentientungduong/python_client<filename>sample/RemoveRowByRowkey.py
#!/usr/bin/python
import griddb_python as griddb
import sys
factory = griddb.StoreFactory.get_instance()
argv = sys.argv
containerName = "SamplePython_RemoveRowByRowKey"
rowCount = 5
nameList = ["notebook PC", "desktop PC", "keyboard", "mouse", "printer"]
numberList = [108, 72, 25, 45, 62 ]
try:
# Get GridStore object
gridstore = factory.get_store(host=argv[1], port=int(argv[2]), cluster_name=argv[3], username=argv[4], password=argv[5])
# When operations such as container creation and acquisition are performed, it is connected to the cluster.
gridstore.get_container("containerName")
print("Connect to Cluster")
conInfo = griddb.ContainerInfo(name=containerName,
column_info_list=
[["id", griddb.Type.INTEGER],
["productName", griddb.Type.STRING],
["count", griddb.Type.INTEGER]],
type=griddb.ContainerType.COLLECTION,
row_key=True)
col = gridstore.put_container(conInfo)
print("Create Collection name=", containerName)
# Register multiple rows
rowList = []
for i in range(0,rowCount):
rowList.append([i, nameList[i], numberList[i]])
col.multi_put(rowList)
print("Sample data generation: Put Rows count=", rowCount)
# Delete a row
# (1)Get the container
col1 = gridstore.get_container(containerName)
if col1 == None:
print( "ERROR Container not found. name=", containerName)
# (2)Delete row by row key
col1.remove(3)
print("Delete Row rowkey=3")
print("success!")
except griddb.GSException as e:
for i in range(e.get_error_stack_size()):
print("[", i, "]")
print(e.get_error_code(i))
print(e.get_location(i))
print(e.get_message(i))
| StarcoderdataPython |
3268888 | <reponame>curtjen/clither
#!/bin/env python
# ===== Usage =====
# --- Import ---
# import helpers
#
# --- create_directory("DIRECTORY_NAME") ---
#
# --- backup_file("FILE_PATH") ---
#
# --- create_symlink("FILE_PATH") ---
# This will create a symlink inside the $HOME directory for a given file path.
import calendar
import json
import os
import shutil
import sys
import time
from collections import namedtuple
from glob import glob
from shutil import copy
#TODO(xnz): figure out a way to make argparse play nice on libs.
dry_run_flag = False
if '--dry_run' in sys.argv:
dry_run_flag = True
cd = lambda path: 'cd ' + path
# Any json we use should (for now) should not use spaces or - in the key
def dict_to_obj(blueprint_dict):
blueprint_list = list(blueprint_dict.keys())
proto = namedtuple('dynamicHolder', blueprint_list)
result_dict = proto(**blueprint_dict)
return result_dict
BASE_DIR = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
# post 3.4 use:
# from pathlib import Path
# path = Path("/here/your/path/file.txt")
# print(path.parent)
#TODO(xnz): mk naming consistent.
file_paths = {
'clither_repo': 'https://github.com/curtjen/clither.git',
'base_dir': '', # + is temps
'clither_path': '/clither',
'clither_lib_path': '/clither/template_files',
'clither_tmp_files_path': '/clither/lib',
'clither_pather': '/clither/pather.py',
'clither_run': '/clither/template_files/run.py',
'clither_run_help': '/clither/lib/helpers.py',
'clither_tmp_config': '/clither/template_files/config.json',
'clither_default_json': '/clither/template_files/clither.config.json',
'clither_default_addons_rc': '/clither/template_files/general_shell_materal',
'custom_path': '/clither_custom/',
'custom_lib_path': '/clither_custom/lib',
'custom_rcs_path': '/clither_custom/rcs',
'custom_addons_path': '/clither_custom/addons',
'custom_bin_path': '/clither_custom/bins',
'custom_bin_conflicts_path': '/clither_custom/bins/conflicts',
'custom_configs_path': '/clither_custom/configs',
'custom_overrides_path': '/clither_custom/configs/overrides',
'custom_fallbacks_path': '/clither_custom/configs/fallbacks',
'custom_lockfile_path': '/clither_custom/lockfiles',
'custom_addons_config': '/clither_custom/config.json',
'custom_default_addon_path': '/clither_custom/addons/clither_defaults',
'custom_default_addon_rcs_path': '/clither_custom/addons/clither_defaults/rcs',
}
file_paths = {
key: os.path.join(BASE_DIR, *value.split('/'))
for key, value in file_paths.items()}
paths = dict_to_obj(file_paths)
def json_to_obj(json_file_path):
"""Experemental"""
with open(json_file_path) as file:
data = json.load(file)
data = dict_to_obj(data)
return data
def create_directory(dir):
if os.path.exists(dir):
return
msg = 'mkdir ' + dir
if dry_run(msg):
return
os.makedirs(dir)
print(msg)
def mk_clither_custom_dirs():
dirs = (
'', # base dir
'rcs',
'addons',
'bins',
'bins/conflicts'
)
for dir in dirs:
create_directory('{0}/{1}'.format(paths.custom_path, dir))
def backup_file(file_path):
"""Backup file at a given path.
args:
file_path: (str) Path to backup file.
"""
# msg = 'backup_file: ' + file_path
# if dry_run(msg):
# return
if os.path.isfile(file_path):
# get file name and containing directory
dir_path = os.path.dirname(file_path)
file_name = os.path.basename(file_path)
# generate backup directory in file's directory (if one does not exist)
backup_path = '{0}/backups_rcs'.format(dir_path)
create_directory(backup_path)
# move file to backup directory with epoch timestamp at end
# e.g. zshrc_1234567890
timestamp = int(time.time())
dst_file_path = '{0}/{1}_{2}'.format(backup_path, file_name, timestamp)
# print('{0} backed up to: {1}'.format(file_name, dst_file_path))
msg = 'backup_file: cp {0} {1}'.format(file_path, dst_file_path)
if dry_run(msg):
return
print(msg)
os.rename(file_path, dst_file_path)
def clear_file(file_path):
msg = 'clear_file: ' + file_path
if dry_run(msg):
return
#TODO(xnz): Check for better way to do this.
print(msg)
with open(file_path, 'w') as file:
file.write('')
def append_to_file(file_path, string):
msg = 'append_to_file: echo {0} >> {1}'.format(string, file_path)
if dry_run(msg):
return
print(msg)
with open(file_path, 'a') as file:
file.write(string + '\n')
def dry_run(msg):
"""Return True and print msg if dry_run flag is set, else return False."""
if dry_run_flag:
print('dry_run: ' + msg)
return True
return
def run_cmd(*cmd_list):
"""Desc
args:
cmd: (str) The command to run.
"""
if not filter(None, cmd_list):
return
cmd = '; '.join(str(x) for x in cmd_list)
msg = 'run cmd: ' + cmd
if dry_run(msg):
return
#TODO(xnz): mv this to a subprocess
print(msg)
os.system(cmd)
def create_symlink(src, dst):
# check if already exists
msg = 'create symlink: ln -s {0} {1}'.format(src, dst)
if dry_run(msg):
return
print(msg)
if os.path.exists(dst):
os.remove(dst)
os.symlink(src, dst)
def copy_file(src_file, dst, write_over=False):
dst = os.path.join(dst, os.path.basename(src_file))
if os.path.exists(dst):
if not write_over:
print('copy_file: dst already exists: ' + dst)
return
if not dry_run_flag:
print('rm ' + dst)
os.remove(dst)
msg = 'cp {0} {1}'.format(src_file, dst)
if dry_run(msg):
return
print(msg)
copy(src_file, dst)
def get_dir_list(dir_path):
# os.listdir(dir_path)
try:
return os.listdir(dir_path)
except OSError as err:
if dry_run_flag:
return []
raise OSError(err)
def get_globed_dirs(pattern):
return [dir for dir in glob(pattern) if os.path.isdir(dir)]
def process_area(area_of_interest, process_func, missing_config_func):
def _mk_paths(path, config_file_name):
addon_path = os.path.join(path, dir)
config_path = os.path.join(addon_path, config_file_name)
return addon_path, config_path
def _process_config_area(addon_path, config_path):
with open(config_path) as file:
try:
config = json.load(file)
except ValueError:
print('file format was not a json:', config_path) # mark better
return
result = config.setdefault(area_of_interest)
if not result:
msg = 'File does not have {0} section: {1}'
print(msg.format(area_of_interest, config_path))
return
process_func(result, addon_path)
addon_dirs = get_dir_list(paths.custom_addons_path)
for dir in addon_dirs:
addon_path, config_path = _mk_paths(paths.custom_overrides_path, dir)
print('Run {0} on: {1}'.format(area_of_interest, dir))
if os.path.exists(config_path):
_process_config_area(addon_path, config_path)
continue
addon_path, config_path = _mk_paths(paths.custom_addons_path, 'clither.config.json')
if os.path.exists(config_path):
_process_config_area(addon_path, config_path)
continue
addon_path, config_path = _mk_paths(paths.custom_fallbacks_path, dir)
if os.path.exists(config_path):
_process_config_area(addon_path, config_path)
continue
print('No config found for ' + dir)
missing_config_func(dir)
def get_epoc_time():
return str(calendar.timegm(time.gmtime()))
def clean_dir(path):
if os.path.exists(path):
shutil.rmtree(path)
def get_new_path(path, parent_path, prefix):
#TODO(xnz): not system portable...
new_path = prefix
new_path += path.replace('/', '+')
new_path = os.path.join(parent_path, new_path)
return new_path | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.