hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a1347b02d3fc237367e2d4998338508f74966f5
| 3,059
|
py
|
Python
|
realworldrl_suite/utils/accumulators_test.py
|
Roryoung/realworldrl_suite
|
dfcbb700eff20c39c649e235e16d087393bf7e9a
|
[
"Apache-2.0"
] | 284
|
2020-03-23T17:10:46.000Z
|
2022-03-27T08:35:12.000Z
|
realworldrl_suite/utils/accumulators_test.py
|
dylanamiller/rwrl_pybullet
|
be7a51cffa7f5f9cb77a387c16bad209e0f851f8
|
[
"Apache-2.0"
] | 2
|
2021-05-21T14:04:51.000Z
|
2021-11-29T12:33:08.000Z
|
realworldrl_suite/utils/accumulators_test.py
|
dylanamiller/rwrl_pybullet
|
be7a51cffa7f5f9cb77a387c16bad209e0f851f8
|
[
"Apache-2.0"
] | 21
|
2020-07-29T03:23:31.000Z
|
2022-01-16T11:37:49.000Z
|
# coding=utf-8
# Copyright 2020 The Real-World RL Suite Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for accumulators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import numpy.testing as npt
import realworldrl_suite.environments as rwrl
class RandomAgent(object):
def __init__(self, action_spec):
self.action_spec = action_spec
def action(self):
return np.random.uniform(
self.action_spec.minimum,
self.action_spec.maximum,
size=self.action_spec.shape)
class AccumulatorsTest(parameterized.TestCase):
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def test_logging(self, domain_name, task_name):
temp_dir = self.create_tempdir()
env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
safety_spec={'enable': True},
multiobj_spec={
'enable': True,
'objective': 'safety',
'observed': False,
},
log_output=os.path.join(temp_dir.full_path, 'test.pickle'),
environment_kwargs=dict(log_safety_vars=True))
random_policy = RandomAgent(env.action_spec()).action
n_steps = 0
for _ in range(3):
timestep = env.step(random_policy())
constraints = (~timestep.observation['constraints']).astype('int')
n_steps += 1
while not timestep.last():
timestep = env.step(random_policy())
constraints += (~timestep.observation['constraints']).astype('int')
npt.assert_equal(
env.stats_acc.stat_buffers['safety_stats']['total_violations'][-1],
constraints)
env.write_logs()
with open(env.logs_path, 'rb') as f:
read_data = np.load(f, allow_pickle=True)
data = read_data['data'].item()
self.assertLen(data.keys(), 4)
self.assertIn('safety_vars_stats', data)
self.assertIn('total_violations', data['safety_stats'])
self.assertIn('per_step_violations', data['safety_stats'])
self.assertIn('episode_totals', data['multiobj_stats'])
self.assertIn('episode_totals', data['return_stats'])
self.assertLen(data['safety_stats']['total_violations'], n_steps)
self.assertLen(data['safety_vars_stats'], n_steps)
self.assertLen(data['multiobj_stats']['episode_totals'], n_steps)
self.assertLen(data['return_stats']['episode_totals'], n_steps)
if __name__ == '__main__':
absltest.main()
| 33.988889
| 77
| 0.700556
|
4a1348544e3864510af8295a9190f1b687ccc320
| 91
|
py
|
Python
|
python/teste2.py
|
viniciusRG1/salve
|
b009370430f2338d553301c9a04fdba71f0f68a3
|
[
"MIT"
] | null | null | null |
python/teste2.py
|
viniciusRG1/salve
|
b009370430f2338d553301c9a04fdba71f0f68a3
|
[
"MIT"
] | null | null | null |
python/teste2.py
|
viniciusRG1/salve
|
b009370430f2338d553301c9a04fdba71f0f68a3
|
[
"MIT"
] | null | null | null |
idade = int(input("digite sua idade"))
if idade >= 18 :
print("você é maior de idade")
| 15.166667
| 39
| 0.637363
|
4a134967ffc8bd5254637cde815932708614fd78
| 6,877
|
py
|
Python
|
code/batch-ai.py
|
lu-project/batch-ai
|
ffd8d40f924f75a8efc76f0f359db1c2e183ae7e
|
[
"MIT"
] | null | null | null |
code/batch-ai.py
|
lu-project/batch-ai
|
ffd8d40f924f75a8efc76f0f359db1c2e183ae7e
|
[
"MIT"
] | null | null | null |
code/batch-ai.py
|
lu-project/batch-ai
|
ffd8d40f924f75a8efc76f0f359db1c2e183ae7e
|
[
"MIT"
] | null | null | null |
######################################################################
# Lablup hackfest concept code for Azure Batch for AI
# https://docs.microsoft.com/en-us/azure/batch-ai/quickstart-python
######################################################################
# pip install azure-batch
# pip install azure-mgmt-scheduler # Install the latest Storage management library
# pip install --pre azure-mgmt-compute # will install only the latest Compute Management library
# pip install azure
# pip install --pre azure #We publish a preview version of this package, which you can access using the --pre flag:
# az provider register -n Microsoft.BatchAI
# az provider register -n Microsoft.Batch
# az ad sp create-for-rbac --name dwlablupapp --password "<password_here>"
# Retrying role assignment creation: 1/36
# {
# "appId": "<appid-here>",
# "displayName": "dwlablupapp",
# "name": "http://dwlablupapp",
# "password": "<password-here>",
# "tenant": "<tenant-here>"
# }
# credentials used for authentication
client_id = 'my_aad_client_id'
secret = 'my_aad_secret_key'
token_uri = 'my_aad_token_uri'
subscription_id = 'my_subscription_id'
# credentials used for storage
storage_account_name = 'my_storage_account_name'
storage_account_key = 'my_storage_account_key'
# specify the credentials used to remote login your GPU node
admin_user_name = 'my_admin_user_name'
admin_user_password = 'my_admin_user_password'
# Authentication
from azure.common.credentials import ServicePrincipalCredentials
import azure.mgmt.batchai as batchai
import azure.mgmt.batchai.models as models
creds = ServicePrincipalCredentials(
client_id=client_id, secret=secret, token_uri=token_uri)
batchai_client = batchai.BatchAIManagementClient(credentials=creds,
subscription_id=subscription_id
)
# Create resource group
from azure.mgmt.resource import ResourceManagementClient
resource_group_name = 'myresourcegroup'
resource_management_client = ResourceManagementClient(
credentials=creds, subscription_id=subscription_id)
resource = resource_management_client.resource_groups.create_or_update(
resource_group_name, {'location': 'eastus'})
# Create Azure File share
from azure.storage.file import FileService
azure_file_share_name = 'batchaiquickstart'
service = FileService(storage_account_name, storage_account_key)
service.create_share(azure_file_share_name, fail_on_exist=False)
# Create directory and download it
mnist_dataset_directory = 'mnistcntksample'
service.create_directory(azure_file_share_namem, mnist_dataset_directory, fail_on_exist=False)
for f in ['Train-28x28_cntk_text.txt', 'Test-28x28_cntk_text.txt', 'ConvNet_MNIST.py']:
service.create_file_from_path(
azure_file_share_name, mnist_dataset_directory, f, f)
# Create GPU container
cluster_name = 'mycluster'
relative_mount_point = 'azurefileshare'
parameters = models.ClusterCreateParameters(
# Location where the cluster will physically be deployed
location='eastus',
# VM size. Use NC or NV series for GPU
vm_size='STANDARD_NC6',
# Configure the ssh users
user_account_settings=models.UserAccountSettings(
admin_user_name=admin_user_name,
admin_user_password=admin_user_password),
# Number of VMs in the cluster
scale_settings=models.ScaleSettings(
manual=models.ManualScaleSettings(target_node_count=1)
),
# Configure each node in the cluster
node_setup=models.NodeSetup(
# Mount shared volumes to the host
mount_volumes=models.MountVolumes(
azure_file_shares=[
models.AzureFileShareReference(
account_name=storage_account_name,
credentials=models.AzureStorageCredentialsInfo(
account_key=storage_account_key),
azure_file_url='https://{0}.file.core.windows.net/{1}'.format(
storage_account_name, mnist_dataset_directory),
relative_mount_path = relative_mount_point)],
),
),
)
batchai_client.clusters.create(resource_group_name, cluster_name, parameters).result()
# Get cluster status
cluster = batchai_client.clusters.get(resource_group_name, cluster_name)
print('Cluster state: {0} Target: {1}; Allocated: {2}; Idle: {3}; '
'Unusable: {4}; Running: {5}; Preparing: {6}; leaving: {7}'.format(
cluster.allocation_state,
cluster.scale_settings.manual.target_node_count,
cluster.current_node_count,
cluster.node_state_counts.idle_node_count,
cluster.node_state_counts.unusable_node_count,
cluster.node_state_counts.running_node_count,
cluster.node_state_counts.preparing_node_count,
cluster.node_state_counts.leaving_node_count))
# Create training job
job_name = 'myjob'
parameters = models.job_create_parameters.JobCreateParameters(
# Location where the job will run
# Ideally this should be co-located with the cluster.
location='eastus',
# The cluster this job will run on
cluster=models.ResourceId(cluster.id),
# The number of VMs in the cluster to use
node_count=1,
# Override the path where the std out and std err files will be written to.
# In this case we will write these out to an Azure Files share
std_out_err_path_prefix='$AZ_BATCHAI_MOUNT_ROOT/{0}'.format(relative_mount_point),
input_directories=[models.InputDirectory(
id='SAMPLE',
path='$AZ_BATCHAI_MOUNT_ROOT/{0}/{1}'.format(relative_mount_point, mnist_dataset_directory))],
# Specify directories where files will get written to
output_directories=[models.OutputDirectory(
id='MODEL',
path_prefix='$AZ_BATCHAI_MOUNT_ROOT/{0}'.format(relative_mount_point),
path_suffix="Models")],
# Container configuration
container_settings=models.ContainerSettings(
models.ImageSourceRegistry(image='microsoft/cntk:2.1-gpu-python3.5-cuda8.0cudnn6.0')),
# Toolkit specific settings
cntk_settings = models.CNTKsettings(
python_script_file_path='$AZ_BATCHAI_INPUT_SAMPLE/ConvNet_MNIST.py',
command_line_args='$AZ_BATCHAI_INPUT_SAMPLE $AZ_BATCHAI_OUTPUT_MODEL')
)
# Create the job
batchai_client.jobs.create(resource_group_name, job_name, parameters).result()
# Monitor job
job = batchai_client.jobs.get(resource_group_name, job_name)
print('Job state: {0} '.format(job.execution_state.name))
# List stdout and stderr output
files = batchai_client.jobs.list_output_files(resource_group_name, job_name, models.JobsListOutputFilesOptions("stdouterr"))
for file in list(files):
print('file: {0}, download url: {1}'.format(file.name, file.download_url))
# Delete job
# batchai_client.jobs.delete(resource_group_name, job_name)
# Delete Cluster
# batchai_client.clusters.delete(resource_group_name, cluster_name)
| 38.418994
| 125
| 0.732151
|
4a134992c4dbd7df5dd52a1ac840f4640bd6c175
| 19,541
|
py
|
Python
|
scout/dao/space.py
|
charlon/scout
|
9788936c121ebb1022afac21eee497754648448c
|
[
"Apache-2.0"
] | null | null | null |
scout/dao/space.py
|
charlon/scout
|
9788936c121ebb1022afac21eee497754648448c
|
[
"Apache-2.0"
] | null | null | null |
scout/dao/space.py
|
charlon/scout
|
9788936c121ebb1022afac21eee497754648448c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2022 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from django.conf import settings
from uw_spotseeker import Spotseeker
from restclients_core.exceptions import DataFailureException
import datetime
import pytz
import random
OPEN_PERIODS = {
# 5am - 10:59am
'morning': {
'start': datetime.time(5, 0, 00, 0),
'end': datetime.time(11, 0, 0, 0)
},
# 11am - 2:59pm
'afternoon': {
'start': datetime.time(11, 0, 0, 0),
'end': datetime.time(15, 0, 0, 0)
},
# 3pm - 9:59pm
'evening': {
'start': datetime.time(15, 0, 0, 0),
'end': datetime.time(22, 0, 0, 0)
},
# 10pm - 4:59am (spans midnight)
'late_night': {
'start': datetime.time(22, 0, 0, 0),
'end': datetime.time(5, 0, 0, 0)
},
}
def get_spot_list(app_type=None, groups=[]):
spot_client = Spotseeker()
res = []
filters = []
filters.append(('limit', 0))
try:
if app_type:
filters.append(('extended_info:app_type', app_type))
else:
# study spots have no app_type, and must filter on something
filters.append(('open_now', 'true'))
for group in groups:
filters.append(('extended_info:group', group))
spots = spot_client.search_spots(filters)
for spot in spots:
spot = process_extended_info(spot)
if spot is not None:
res.append(spot)
except DataFailureException:
# TODO: consider logging on failure
pass
return res
def get_spots_by_filter(filters=[]):
spot_client = Spotseeker()
res = []
try:
spots = spot_client.search_spots(filters)
for spot in spots:
spot = process_extended_info(spot)
if spot is not None:
res.append(spot)
except DataFailureException:
# TODO: consider logging on failure
pass
return res
def get_building_list(campus, app_type=None):
spot_client = Spotseeker()
buildings = []
try:
buildings = spot_client.get_building_list(campus, app_type)
except DataFailureException:
pass
# Log the error?
return buildings
def get_filtered_spots(request, campus, app_type=None):
filters = _get_spot_filters(request)
# adding 'default' filter params
# if limit is not in the query tuple, add default
if "limit" not in dict(filters):
filters.append(('limit', 0))
filters.append(("extended_info:campus", campus))
if(app_type == "food"):
filters.append(('extended_info:app_type', 'food'))
elif(app_type == "tech"):
filters.append(('extended_info:app_type', 'tech'))
elif(app_type == "study"):
if ("open_at" not in dict(filters)) and (
"all_published" not in dict(filters)):
filters.append(('open_now', 'true'))
return get_spots_by_filter(filters)
def _get_spot_filters(request):
params = []
for param in request.GET:
if "type" in param:
params.append(("type", request.GET[param]))
if "food" in param:
params.append(
("extended_info:food_nearby", request.GET[param])
)
if "cuisine" in param:
params.append(
("extended_info:or_group:cuisine", request.GET[param])
)
if "payment" in param:
params.append(
("extended_info:or_group:payment", request.GET[param])
)
if "period" in param:
now = datetime.datetime.now()
params += get_period_filter(request.GET[param])
if "open_now" in param:
params.append(("open_now", "true"))
if "all_published" in param:
params.append(("all_published", "true"))
if "building" in param:
params.append(("building_name", request.GET[param]))
if "resources" in param:
params.append(
("extended_info:or_group:resources", request.GET[param])
)
if "noise" in param:
params.append(("extended_info:noise_level", request.GET[param]))
if "lighting" in param:
params.append(
("extended_info:or_group:lighting", request.GET[param])
)
if "reservation" in param:
params.append(("extended_info:reservable", "true"))
if "capacity" in param:
params.append(("capacity", request.GET[param]))
if "open_at" in param:
params.append(("open_at", request.GET[param]))
if "open_until" in param:
params.append(("open_until", request.GET[param]))
if "subcategory" in param:
params.append(("item:subcategory", request.GET[param]))
if "brand" in param:
params.append(
("item:extended_info:i_brand", request.GET[param])
)
if "item_is_active" in param:
params.append(("item:extended_info:i_is_active", "true"))
# distance, lat, long and limit are essential to distance sorting
if "distance" in param:
params.append(("distance", request.GET[param]))
if "latitude" in param:
params.append(("center_latitude", request.GET[param]))
if "longitude" in param:
params.append(("center_longitude", request.GET[param]))
if "limit" in param:
params.append(("limit", request.GET[param]))
return params
def get_period_filter(param):
now = datetime.datetime.now()
return _get_period_filter(param, now)
def _get_period_filter(param, now):
today = now.strftime("%A")
tomorrow = (now +
datetime.timedelta(days=1)).strftime("%A")
"""
adding 1 minute to start and subtracting 1 from end to prevent returning
spots where spot closes at filter open time or opens at filter close time
"""
end = OPEN_PERIODS[param]["end"]
end_time = adjust_time_by_offset(end, -1)
end_time_string = end_time.strftime("%H:%M")
start = OPEN_PERIODS[param]["start"]
start_time = adjust_time_by_offset(start, 1)
start_time_string = start_time.strftime("%H:%M")
start_string = "%s,%s" % (today, start_time_string)
if param == "late_night":
end_string = "%s,%s" % (tomorrow, end_time_string)
else:
end_string = "%s,%s" % (today, end_time_string)
return [("fuzzy_hours_start", start_string),
("fuzzy_hours_end", end_string)]
def adjust_time_by_offset(time, minutes):
converted_dt = datetime.datetime.combine(datetime.date(1, 1, 1), time)
converted_dt += datetime.timedelta(minutes=minutes)
return converted_dt.time()
def get_spot_by_id(spot_id):
spot_client = Spotseeker()
try:
res = spot_client.get_spot_by_id(int(spot_id))
except DataFailureException:
return None
return process_extended_info(res)
def process_extended_info(spot):
from scout.dao.item import add_item_info
is_hidden = _get_extended_info_by_key("is_hidden", spot.extended_info)
if is_hidden:
return None
spot = add_foodtype_names_to_spot(spot)
spot = add_cuisine_names(spot)
spot = add_payment_names(spot)
spot = add_additional_info(spot)
spot = add_study_info(spot)
spot = add_tech_info(spot)
spot = organize_hours(spot)
spot = add_item_info(spot)
now = datetime.datetime.now(
pytz.timezone(getattr(settings, 'TIME_ZONE', 'America/Los_Angeles'))
)
spot.is_open = get_is_spot_open(spot, now)
spot.open_periods = get_open_periods_by_day(spot, now)
return spot
def organize_hours(spot):
days_list = ('monday',
'tuesday',
'wednesday',
'thursday',
'friday',
'saturday',
'sunday')
hours_object = {}
raw_hours = list(spot.spot_availability)
for idx, day in enumerate(days_list):
today_hours = []
start_of_day = datetime.time(0, 0)
end_of_day = datetime.time(23, 59)
day_hours = [h for h in raw_hours if h.day == day]
# Add all the non-special-case hours
for hours in day_hours:
today_hours.append((hours.start_time, hours.end_time))
# Fixes SCOUT-237
today_hours.sort()
hours_object[day] = today_hours
spot.hours = hours_object
return spot
def get_open_periods_by_day(spot, now):
# defining 'late night' as any time not covered by another period
open_periods = {'morning': False,
'afternoon': False,
'evening': False,
'late_night': False}
hours = spot.hours[now.strftime("%A").lower()]
for opening in hours:
start = opening[0]
end = opening[1]
# spot spans midnight
if start > end:
end = datetime.time(23, 59, 59)
open_periods['late_night'] = True
# open for morning
morning = OPEN_PERIODS['morning']
if morning['start'] < end and morning['end'] > start:
open_periods['morning'] = True
# open for afternoon
afternoon = OPEN_PERIODS['afternoon']
if afternoon['start'] < end and afternoon['end'] > start:
open_periods['afternoon'] = True
# open for evening
evening = OPEN_PERIODS['evening']
if evening['start'] < end and evening['end'] > start:
open_periods['evening'] = True
# open late night
if start < morning['start'] or end > evening['end']:
open_periods['late_night'] = True
return open_periods
def get_is_spot_open(spot, now):
hours_today = spot.hours[now.strftime("%A").lower()]
yesterday = now - datetime.timedelta(days=1)
hours_yesterday = spot.hours[yesterday.strftime("%A").lower()]
for period in hours_yesterday:
open_time = period[0]
close_time = period[1]
if open_time > close_time and now.time() < close_time:
# has an opening past midnight yesterday and NOW is before close
return True
if len(hours_today) == 0:
# has no openings today
return False
for period in hours_today:
open_time = period[0]
close_time = period[1]
if open_time > close_time:
if open_time < now.time():
# Spot is open past midnight and open before now
return True
elif open_time <= now.time() < close_time:
return True
return False
def add_additional_info(spot):
# global extended_info (study & food)
spot.app_type = _get_extended_info_by_key("app_type", spot.extended_info)
spot.location_description = \
_get_extended_info_by_key("location_description",
spot.extended_info)
spot.campus = _get_extended_info_by_key("campus", spot.extended_info)
spot.hours_notes = _get_extended_info_by_key("hours_notes",
spot.extended_info)
spot.access_notes = _get_extended_info_by_key("access_notes",
spot.extended_info)
spot.has_alert = _get_extended_info_by_key("has_alert",
spot.extended_info)
spot.alert_notes = _get_extended_info_by_key("alert_notes",
spot.extended_info)
# new extended_info (food only)
spot.description = \
_get_extended_info_by_key("s_description", spot.extended_info)
spot.s_has_reservation = _get_extended_info_by_key("s_has_reservation",
spot.extended_info)
spot.s_reservation_notes = _get_extended_info_by_key("s_reservation_notes",
spot.extended_info)
spot.menu_url = _get_extended_info_by_key("s_menu_url",
spot.extended_info)
spot.has_coupon = _get_extended_info_by_key("s_has_coupon",
spot.extended_info)
spot.coupon_expiration = _get_extended_info_by_key("s_coupon_expiration",
spot.extended_info)
spot.coupon_url = _get_extended_info_by_key("s_coupon_url",
spot.extended_info)
spot.phone = _get_extended_info_by_key("s_phone",
spot.extended_info)
spot.email = _get_extended_info_by_key("s_email",
spot.extended_info)
spot.website_url = _get_extended_info_by_key("s_website_url",
spot.extended_info)
if spot.app_type is None:
spot.app_type = "study"
return spot
def add_study_info(spot):
RESOURCE_MAPPING = {
"has_whiteboards": "Whiteboards",
"has_computers": "Computers",
"has_outlets": "Outlets",
"has_printing": "Printing",
"has_scanner": "Scanner",
"has_displays": "Displays",
"has_projector": "Projector"
}
spot.spot_resources = _get_names_for_extended_info("",
RESOURCE_MAPPING,
spot.extended_info)
spot.num_computers = _get_extended_info_by_key("num_computers",
spot.extended_info)
if (_get_extended_info_by_key("has_natural_light", spot.extended_info) ==
"true"):
spot.natural_light = True
spot.spot_noise = _get_extended_info_by_key("noise_level",
spot.extended_info)
spot.food_nearby = _get_extended_info_by_key("food_nearby",
spot.extended_info)
spot.reservable = _get_extended_info_by_key("reservable",
spot.extended_info)
spot.reservation_notes = _get_extended_info_by_key("reservation_notes",
spot.extended_info)
spot.labstats_id = _get_extended_info_by_key("labstats_id",
spot.extended_info)
spot.auto_labstats_total = _get_extended_info_by_key(
"auto_labstats_total",
spot.extended_info)
spot.auto_labstats_available = _get_extended_info_by_key(
"auto_labstats_available",
spot.extended_info)
if spot.auto_labstats_available is None\
or spot.auto_labstats_total is None:
spot.auto_labstats_total = 0
spot.auto_labstats_available = 0
return spot
def add_tech_info(spot):
spot.has_cte_techloan = _get_extended_info_by_key("has_cte_techloan",
spot.extended_info)
spot.cte_techloan_id = _get_extended_info_by_key("cte_techloan_id",
spot.extended_info)
return spot
def _get_extended_info_by_key(key, extended_info):
for info in extended_info:
if info.key == key:
return info.value
def _get_names_for_extended_info(prefix, mapping, info):
names = []
for obj in info:
if prefix in obj.key and obj.value:
try:
names.append(mapping[obj.key])
except KeyError:
pass
names.sort()
return names
def add_payment_names(spot):
PAYMENT_PREFIX = "s_pay"
PAYMENT_MAPPING = {
"s_pay_cash": "Cash",
"s_pay_visa": "Visa",
"s_pay_mastercard": "Mastercard",
"s_pay_husky": "Husky Card",
"s_pay_dining": "Dining Account",
}
spot.payment_names = _get_names_for_extended_info(PAYMENT_PREFIX,
PAYMENT_MAPPING,
spot.extended_info)
return spot
def add_cuisine_names(spot):
CUISINE_TYPE_PREFIX = "s_cuisine"
CUISINE_TYPE_MAPPING = {
"s_cuisine_american": "American",
"s_cuisine_bbq": "BBQ",
"s_cuisine_chinese": "Chinese",
"s_cuisine_hawaiian": "Hawaiian",
"s_cuisine_indian": "Indian",
"s_cuisine_italian": "Italian",
"s_cuisine_korean": "Korean",
"s_cuisine_mexican": "Mexican",
"s_cuisine_vietnamese": "Vietnamese",
}
spot.cuisinetype_names = _get_names_for_extended_info(CUISINE_TYPE_PREFIX,
CUISINE_TYPE_MAPPING,
spot.extended_info)
return spot
def add_foodtype_names_to_spot(spot):
FOOD_TYPE_PREFIX = "s_food_"
FOOD_TYPE_MAPPING = {
"s_food_burgers": "Burgers",
"s_food_breakfast": "Breakfast",
"s_food_curry": "Curry",
"s_food_desserts": "Desserts",
"s_food_entrees": "Entrees",
"s_food_espresso": "Espresso",
"s_food_frozen_yogurt": "Frozen Yogurt",
"s_food_groceries": "Groceries",
"s_food_pasta": "Pasta",
"s_food_pastries": "Pastries",
"s_food_pho": "Pho",
"s_food_pizza": "Pizza",
"s_food_salads": "Salads",
"s_food_sandwiches": "Sandwiches",
"s_food_smoothies": "Smoothies",
"s_food_sushi_packaged": "Sushi (packaged)",
"s_food_tacos": "Tacos",
}
spot.foodtype_names = _get_names_for_extended_info(FOOD_TYPE_PREFIX,
FOOD_TYPE_MAPPING,
spot.extended_info)
return spot
def group_spots_by_building(spots):
grouped_spots = {}
for spot in spots:
if spot.building_name in grouped_spots:
grouped_spots[spot.building_name].append(spot)
else:
grouped_spots[spot.building_name] = [spot]
list_structure = []
for name in grouped_spots:
building_dict = {"name": name,
"spots": grouped_spots[name]}
list_structure.append(building_dict)
return add_latlng_to_building(list_structure)
def add_latlng_to_building(building_list):
for building in building_list:
building['latitude'], building['longitude'] = \
get_avg_latlng_for_spots(building['spots'])
return building_list
def get_avg_latlng_for_spots(spots):
avg_lat = 0
avg_lng = 0
count = len(spots)
for spot in spots:
avg_lat += spot.latitude
avg_lng += spot.longitude
return avg_lat/count, avg_lng/count
def validate_detail_info(spot, campus, app_type):
if spot:
if not spot.app_type:
spot.app_type = "study"
if spot.campus != campus or spot.app_type != app_type:
spot = []
return spot
def get_random_limit_from_spots(spot_list, count):
# return <count> spots using reservoir sampling
result = []
i = 0
for item in spot_list:
i += 1
if len(result) < count:
result.append(item)
else:
s = int(random.random() * i)
if s < count:
result[s] = item
return result
| 34.770463
| 79
| 0.580062
|
4a134a8ea4e0c9ae382c8eb53866f43949857919
| 30,003
|
py
|
Python
|
galini/sdp/cuts_generator.py
|
cog-imperial/galini
|
b27e62b4e981818624f8dc315f0cadee2f7cbed2
|
[
"Apache-2.0"
] | 14
|
2020-02-17T14:24:16.000Z
|
2022-03-18T03:21:32.000Z
|
galini/sdp/cuts_generator.py
|
JackaChou/galini
|
b27e62b4e981818624f8dc315f0cadee2f7cbed2
|
[
"Apache-2.0"
] | 7
|
2020-09-26T19:52:59.000Z
|
2022-01-10T21:00:44.000Z
|
galini/sdp/cuts_generator.py
|
JackaChou/galini
|
b27e62b4e981818624f8dc315f0cadee2f7cbed2
|
[
"Apache-2.0"
] | 5
|
2020-03-13T16:00:49.000Z
|
2022-02-10T10:19:10.000Z
|
# Copyright 2019 Radu Baltean-Lugojan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SDP Cuts generator."""
import platform
from ctypes import cdll, c_double
from itertools import combinations_with_replacement, chain
from operator import itemgetter
from os import path
import numpy as np
import pyomo.environ as pe
from coramin.relaxations import relaxation_data_objects
from coramin.relaxations.mccormick import PWMcCormickRelaxation
from coramin.relaxations.univariate import PWXSquaredRelaxation
from networkx import enumerate_all_cliques, from_numpy_matrix, Graph
from pyomo.core.expr.current import SumExpression
from suspect.pyomo.quadratic import QuadraticExpression
from galini.config import CutsGeneratorOptions, NumericOption, EnumOption
from galini.cuts import CutsGenerator
from galini.math import is_close
from galini.quantities import relative_bound_improvement
from galini.timelimit import current_time, seconds_elapsed_since
class SdpCutsGenerator(CutsGenerator):
"""
Implements the low-dimensional semidefinite (sdp) cuts developed in
'Selecting cutting planes for quadratic semidefinite outer-approximation via trained neural networks',
Baltean-Lugojan, Radu and Bonami, Pierre and Misener, Ruth and Tramontani, Andrea, 2018,
http://www.optimization-online.org/DB_HTML/2018/11/6943.html
Scaling for the purpose of neural net evaluation is done at each B&B node according to Appendix A
from 'Globally solving nonconvex quadratic programming problems with box
constraints via integer programming methods',
Bonami, Pierre and Gunluk, Oktay and Linderoth, Jeff,
Mathematical Programming Computation, 1-50, 2018, Springer).
"""
name = 'sdp'
description = 'SDP cuts powered by machine learning'
def __init__(self, galini, config):
super().__init__(galini, config)
self.galini = galini
self._telemetry = self.galini.telemetry
self.logger = galini.get_logger(__name__)
self._domain_eps = config['domain_eps']
self._sel_size = config['selection_size']
self._thres_sdp_viol = config['thres_sdp_viol']
self._max_sdp_cuts = config['max_sdp_cuts_per_round']
self._min_sdp_cuts = config['min_sdp_cuts_per_round']
self._dim = config['dim']
self._thres_min_opt_sel = config['thres_min_opt_sel']
self._cut_sel = config['cut_sel_strategy']
self._big_m = config['big_m']
self._thres_min_coeff_comb = config['thres_min_coeff_comb']
self._cuts_relative_tolerance = config['convergence_relative_tolerance']
self._preprocess_time_limit = config['preprocess_time_limit']
assert (0 <= self._sel_size), \
"Selection size (how many cuts) needs to be a positive proportion/absolute number!"
self._possible_dim = [2, 3, 4, 5]
assert (self._dim in self._possible_dim), \
"The dimensionality of SDP cuts evaluated by neural networks is between 2 and 5!"
# *** Problem info related to SDP cuts associated with the entire B&B tree
self._variables = None
self._var_idx_map = None
self._objective = None
self._constraints = None
self._num_vars = 0 # number of variables in problem
self._aux_var_map = None
# Flag for neural nets/ optimality cut selection being used
self._nn_used = self._cut_sel not in ["FEAS", "RANDOM"]
# Hold trained neural networks functions
self._nns = None
# Global variables to speed up eigen-decomposition by preforming matrix to be decomposed
self._mat = None
self._indices = None
# Data structure holding SDP decomposition (involving objectives and constraints)
self._agg_list = None
# *** Problem info related to SDP cuts associated with every node of B&B
self._agg_list_rescaled = None
self._cut_round = 0
self._lower_bounds = None
self._upper_bounds = None
self._domains = None # difference in bounds
@staticmethod
def cuts_generator_options():
return CutsGeneratorOptions(SdpCutsGenerator.name, [
NumericOption('domain_eps',
default=1e-3,
description="Minimum domain length for each variable to consider cut on"),
NumericOption('selection_size',
default=0.1,
description="Cut selection size as a % of all cuts or as absolute number of cuts"),
NumericOption('thres_sdp_viol',
default=-1e-15,
description="Violation (negative eigenvalue) threshold for separation of SDP cuts"),
NumericOption('min_sdp_cuts_per_round',
default=1e1,
description="Min number of SDP cuts to be added to relaxation at each cut round"),
NumericOption('max_sdp_cuts_per_round',
default=5e3,
description="Max number of SDP cuts to be added to relaxation at each cut round"),
NumericOption('dim',
default=3,
description="Dimension of SDP decomposition/cuts - min=2, max=5"),
EnumOption('cut_sel_strategy',
default="COMB_ONE_CON",
values=["OPT", "FEAS", "RANDOM", "COMB_ONE_CON", "COMB_ALL_CON"]),
NumericOption('big_m',
default=10e3,
description="Big M constant value for combined optimality/feasibility cut selection strategy"),
NumericOption('thres_min_opt_sel',
default=0,
description="Threshold of minimum optimality measure to select cut in "
"combined optimality/feasibility cut selection strategy"),
NumericOption('thres_min_coeff_comb',
default=1e-5,
description="Threshold for the min absolute value of a valid coefficient when combining "
"constraints based on Lagrange multipliers for optimality cut selection"),
NumericOption('convergence_relative_tolerance',
default=1e-3,
description='Termination criteria on lower bound improvement between '
'two consecutive cut rounds <= relative_tolerance % of '
'lower bound improvement from cut round'),
NumericOption('preprocess_time_limit',
default=5,
description="Time limit for the pre processing step at the root node. If pre processing "
"does not finish in time, no cuts will be generated."),
])
def before_start_at_root(self, problem, relaxed_problem):
# self._nb_vars = problem.num_variables
# self._add_missing_squares(problem)
if self._nn_used:
self._nns = self._load_neural_nets()
if self._agg_list is None:
self._agg_list = self._get_sdp_decomposition(problem, relaxed_problem)
if not self._agg_list:
return
self._mat = [np.zeros((i + 1, i + 1)) for i in self._possible_dim]
for i in range(len(self._possible_dim)):
self._mat[i][0, 0] = 1
self._indices = [(np.array([1 + x for x in np.triu_indices(dim, 0, dim)[0]]),
np.array([1 + x for x in np.triu_indices(dim, 0, dim)[1]]))
for dim in self._possible_dim]
self.before_start_at_node(problem, relaxed_problem)
def after_end_at_root(self, problem, relaxed_problem, solution):
self.after_end_at_node(problem, relaxed_problem, solution)
def before_start_at_node(self, problem, relaxed_problem):
# We need duals for this, so ask pyomo to import them.
if not hasattr(relaxed_problem, 'dual'):
relaxed_problem.dual = pe.Suffix(direction=pe.Suffix.IMPORT)
lower_bounds = np.zeros(self._num_vars)
upper_bounds = np.zeros(self._num_vars)
domains = np.zeros(self._num_vars)
var_idx_map = self._var_idx_map
for var in self._variables:
var_idx = var_idx_map[var]
lower_bounds[var_idx] = var.lb
upper_bounds[var_idx] = var.ub
if var.has_lb() and var.has_ub():
domains[var_idx] = var.ub - var.lb
else:
domains[var_idx] = np.inf
self._lower_bounds = lower_bounds
self._upper_bounds = upper_bounds
self._domains = domains
self._agg_list_rescaled = self._rescale_coeffs_for_cut_selection() if self._nn_used else self._agg_list
self._cut_round = 0
def after_end_at_node(self, problem, relaxed_problem, solution):
self._lower_bounds = None
self._upper_bounds = None
self._domains = None
self._agg_list_rescaled = None
def has_converged(self, state):
"""Termination criteria for cut generation loop."""
if not self._agg_list:
return True
if not np.all(np.isfinite(self._domains)):
return True
if (state.first_solution is None or
state.previous_solution is None or
state.latest_solution is None):
return False
return relative_bound_improvement(
state.first_solution,
state.previous_solution,
state.latest_solution,
mc=self.galini.mc
) <= self._cuts_relative_tolerance
def generate(self, problem, linear_problem, solution, tree, node):
if not self._agg_list:
return
if not np.all(np.isfinite(self._domains)):
return
relaxation_data = node.storage.relaxation_data
model_to_relaxation_var_map = relaxation_data.original_to_new_var_map
rank_list = self._get_sdp_selection(problem, linear_problem, solution, model_to_relaxation_var_map)
agg_list = self._agg_list_rescaled
nb_sdp_cuts = 0
# Interpret selection size as % or absolute number and threshold the maximum number of SDP cuts per round
if self._sel_size <= 1:
nb_cuts = int(np.floor(self._sel_size * len(rank_list)))
else:
nb_cuts = int(np.floor(self._sel_size))
max_sdp_cuts = \
int(min(max(self._min_sdp_cuts, nb_cuts),
min(self._max_sdp_cuts, len(rank_list))))
cuts = []
# Generate and add selected cuts up to (sel_size) in number
for ix in range(0, max_sdp_cuts):
(idx, obj_improve, x_vals, X_slice, dim_act) = rank_list[ix]
dim_act = len(x_vals)
eigvals, evecs = self._get_eigendecomp(dim_act, x_vals, X_slice, True)
if eigvals[0] < self._thres_sdp_viol:
evect = evecs.T[0]
evect = np.where(abs(evect) <= -self._thres_sdp_viol, 0, evect)
evect_arr = [evect[idx1] * evect[idx2] * 2 if idx1 != idx2 else evect[idx1] * evect[idx2]
for idx1 in range(dim_act + 1) for idx2 in range(max(idx1, 1), dim_act + 1)]
x_vars = [self._variables[i] for i in agg_list[idx][0]]
# Construct SDP cut involving only auxiliary variables in the upper triangular matrix of a slice
quad_v1_gen = chain.from_iterable([[x_var for _ in range(dim_act - x_idx)] for x_idx, x_var in enumerate(x_vars)])
quad_v2_gen = chain.from_iterable([x_vars[i:] for i in range(dim_act)])
quad_coef = evect_arr[dim_act:]
sum_expr = 0.0
for v1, v2, coef in zip(quad_v1_gen, quad_v2_gen, quad_coef):
v1 = linear_problem.find_component(v1.getname(fully_qualified=True))
v2 = linear_problem.find_component(v2.getname(fully_qualified=True))
sum_expr += v1 * v2 * float(coef)
lin_coef = evect_arr[0:dim_act]
sum_expr += evect[0] * evect[0]
for v, coef in zip(x_vars, lin_coef):
v = linear_problem.find_component(v.getname(fully_qualified=True))
sum_expr += v * float(coef)
nb_sdp_cuts += 1
cut_expr = sum_expr >= 0
# Avoid returning a wrong expression if there was an error generatin the cut expr.
if type(cut_expr) not in (np.bool, np.bool_):
cuts.append(sum_expr >= 0)
self._cut_round += 1
return cuts
def _get_lifted_mat_values(self, problem, relaxed_problem, solution):
# Build matrix of lifted X values
nb_vars = self._num_vars
lifted_mat = np.zeros((nb_vars, nb_vars))
var_idx_map = self._var_idx_map
self._aux_var_map = dict()
for relaxation in relaxation_data_objects(relaxed_problem, active=True, descend_into=True):
if isinstance(relaxation, PWMcCormickRelaxation):
x, y = relaxation.get_rhs_vars()
w = relaxation.get_aux_var()
elif isinstance(relaxation, PWXSquaredRelaxation):
x, = relaxation.get_rhs_vars()
y = x
w = relaxation.get_aux_var()
else:
continue
self._aux_var_map[id(x), id(y)] = self._aux_var_map[id(y), id(x)] = w
x = problem.find_component(x.getname(fully_qualified=True))
x = var_idx_map[x]
y = problem.find_component(y.getname(fully_qualified=True))
y = var_idx_map[y]
value = pe.value(w, exception=False)
if value is None:
continue
lifted_mat[x, y] = value
lifted_mat[y, x] = value
return lifted_mat
def _get_sdp_selection(self, problem, relaxed_problem, solution, model_relaxation_var_map):
lifted_mat = self._get_lifted_mat_values(problem, relaxed_problem, solution)
agg_list = self._agg_list_rescaled
nns = self._nns
lower_bounds = self._lower_bounds
domains = self._domains
rank_list = []
if not hasattr(relaxed_problem, 'dual') and not isinstance(relaxed_problem.dual, pe.Suffix):
# pylint: disable=line-too-long
self.logger.warning('SDP Cuts Generator requires solution dual values but solver did not return them.')
return rank_list
# For each sub-problem rho
for idx, (clique, inputNNs) in enumerate(agg_list):
obj_improve = 0
dim_act = len(clique)
clique_vars = [
model_relaxation_var_map[self._variables[var_idx]] for var_idx in clique
]
x_vals = [solution.variables[var] for var in clique_vars]
if any(v is None for v in x_vals):
continue
cl_idxs = list(combinations_with_replacement(clique, 2))
cl_vars = [
(model_relaxation_var_map[self._variables[var1]], model_relaxation_var_map[self._variables[var2]])
for var1, var2 in cl_idxs
]
X_slice = np.asarray(itemgetter(*cl_idxs)(lifted_mat))
# Combined selections with optimality measure (=neural net estimation of objective improvement)
# computed taking into account quadratic objective & constraints.
# Can be implemented considering 1, 2, 3, ..., all subsets of quad objective & constraints at a time.
if self._nn_used:
# If the domain of any variable involved in the cut is very small, don't consider cut
if any(domains[i] <= self._domain_eps for i in clique):
continue
# If neural net evaluations are used, rescale solution to [0, 1] using bounds
X_slice_rs = X_slice + np.asarray([
(lower_bounds[i] * solution.variables[vj] + lower_bounds[j] * solution.variables[vi] - lower_bounds[i] * lower_bounds[j])/domains[i]/domains[j]
for (i, j), (vi, vj) in zip(cl_idxs, cl_vars)
])
x_vals_rs = [(x_vals[x_idx] - lower_bounds[i]) / domains[i] for x_idx, i in enumerate(clique)]
# Get eigenvalues if not optimality-only cut selection
if self._cut_sel != "OPT":
eigval = self._get_eigendecomp(dim_act, x_vals, X_slice, False)[0]
else:
eigval = self._thres_sdp_viol
if eigval <= self._thres_sdp_viol:
# Flag indicating whether valid cut can be selected by the optimality measure
sel_by_opt = False
# One constraint at a time (includes optimality-only cut selection which is guaranteed to converge
# only for instances with no quadratic constraints) to get coefficients/input for neural nets
if self._cut_sel in ["OPT", "COMB_ONE_CON"]:
for idx2, (input_nn, max_elem, con_idx) in enumerate(inputNNs):
# Lagrange multiplier (1 if objective)
if con_idx >= 0:
cons = self._constraints[con_idx]
cons = relaxed_problem.find_component(cons.getname(fully_qualified=True))
mu = relaxed_problem.dual[cons]
else:
mu = 1.0
# If mu negative reverse sense of constraint coefficients inputted in a neural net
# since an improvement in the objective is linked to positive mu
if mu > 0:
input_nn = np.sign(mu) * input_nn
estim = nns[dim_act - 2][0](nns[dim_act - 2][1](*x_vals_rs, *input_nn.tolist())) - \
np.matmul(input_nn, X_slice_rs)
if estim > self._thres_min_opt_sel or self._cut_sel == "OPT":
obj_improve += estim * max_elem * abs(mu)
sel_by_opt = True
# Combine all constraints to get coefficients/input for neural nets
elif self._cut_sel == "COMB_ALL_CON":
input_nn_sum = np.zeros(len(cl_idxs))
# Sum up coefficient of X_rho variables from all relevant constraints
# (accounting for mu and rescaling)
for idx2, (input_nn, max_elem, con_idx) in enumerate(inputNNs):
# Lagrange multiplier (1 if objective)
if con_idx >= 0:
cons = self._constraints[con_idx]
cons = relaxed_problem.find_component(cons.getname(fully_qualified=True))
mu = relaxed_problem.dual[cons]
else:
mu = 1.0
input_nn_sum += input_nn * max_elem * mu
# Bound domains of eigenvalues/coefficients to [-1,1] via Lemma 4.1.2
# TODO(fra): this looks wrong. input_nn is used but it's a leftover from the loop.
max_elem = len(clique) * abs(max(input_nn_sum, key=abs))
if max_elem > self._thres_min_coeff_comb:
input_nn_sum = input_nn_sum / max_elem
estim = nns[dim_act - 2][0](nns[dim_act - 2][1](*x_vals_rs, *input_nn_sum.tolist())) - \
np.matmul(input_nn, X_slice_rs)
if estim > self._thres_min_opt_sel:
obj_improve += estim * max_elem
sel_by_opt = True
# In combined optimality+feasibility selection,
# prioritize cuts selected by optimality then those selected by feasibility
if self._cut_sel != "OPT":
if sel_by_opt: # If strong cut is selected by an optimality measure
obj_improve += self._big_m
else: # If cut not strong but valid
obj_improve = -eigval
# TODO(fra): this one seems to be at the wrong indentation level.
# Feasibility selection by absolute value of largest negative eigenvalue
elif self._cut_sel == "FEAS":
obj_improve = - self._get_eigendecomp(dim_act, x_vals, X_slice, False)[0]
# Random selection
elif self._cut_sel == "RANDOM":
obj_improve = np.random.random_sample()
rank_list.append((idx, obj_improve, x_vals, X_slice, dim_act))
# Sort sub-problems by measure for selection
rank_list.sort(key=lambda tup: tup[1], reverse=True)
return rank_list
def _rescale_coeffs_for_cut_selection(self):
agg_list = self._agg_list
agg_list_rescaled = [0] * len(agg_list)
domains = self._domains
for idx, (clique, inputNNs) in enumerate(agg_list):
clique_size = len(clique)
rescale_vec = np.asarray([
domains[el1] * domains[el2] for el1, el2 in combinations_with_replacement(clique, 2)
])
inputNNs_rescaled = [0] * len(inputNNs)
for idx2, (input_nn, max_elem, con_idx) in enumerate(inputNNs):
# Rescale input coefficients according to bounds
input_nn = input_nn / rescale_vec
# Rescale coefficients to be in [0,1]
max_elem = clique_size * abs(max(input_nn, key=abs))
inputNNs_rescaled[idx2] = (input_nn / max_elem, max_elem, con_idx)
agg_list_rescaled[idx] = (clique, inputNNs_rescaled)
return agg_list_rescaled
def _load_neural_nets(self):
"""Load trained neural networks (from /neural_nets/NNs.dll) up to the sub-problem dimension needed for an SDP
decomposition. These neural networks estimate the expected objective improvement for a
particular sub-problem at the current solution point.
"""
self._nns = []
dirname = path.dirname(__file__)
if platform.uname()[0] == "Windows":
nn_library = path.join(dirname, 'NNs.dll')
elif platform.uname()[0] == "Linux":
nn_library = path.join(dirname, 'NNs.so')
else: # Mac OSX
raise ValueError('The neural net library for SDP cuts is compiled only for '
'Linux/Win! (OSX needs compiling)')
# nn_library = 'neural_nets/NNs.dylib' - Not compiled for OSX, will throw error
nn_library = cdll.LoadLibrary(nn_library)
for d in range(2, self._dim + 1): # (d=|rho|) - each subproblem rho has a neural net depending on its size
func_dim = getattr(nn_library, "neural_net_%dD" % d) # load each neural net
func_dim.restype = c_double # return type from each neural net is a c_double
# c_double array input: x_rho (the current point) and Q_rho (upper triangular part since symmetric)
type_dim = (c_double * (d * (d + 3) // 2))
self._nns.append((func_dim, type_dim))
return self._nns
def _get_sdp_decomposition(self, problem, relaxed_problem):
start_time = current_time()
time_limit = self._preprocess_time_limit
dim = self._dim
agg_list = []
variables = [
var for var in problem.component_data_objects(pe.Var, active=True, descend_into=True)
]
self._variables = variables
num_vars = len(variables)
self._num_vars = num_vars
var_idx_map = pe.ComponentMap([(var, idx) for idx, var in enumerate(variables)])
self._var_idx_map = var_idx_map
constraints = [
constraint
for constraint in problem.component_data_objects(pe.Constraint, active=True, descend_into=True)
]
self._constraints = constraints
objective = next(problem.component_data_objects(pe.Objective, active=True, descend_into=True))
self._objective = objective
quad_terms_per_con = [
[]
for _ in range(1 + len(constraints))
]
if seconds_elapsed_since(start_time) > time_limit:
return []
# Find all quadratic terms (across all objectives + constraints) and form an adjacency matrix for their indices
adj_mat = np.zeros((num_vars, num_vars))
for con_idx, constraint in enumerate([objective, *constraints]):
if isinstance(constraint, pe.Objective):
root_expr = constraint.expr
else:
root_expr = constraint.body
quadratic_expr = None
if isinstance(root_expr, QuadraticExpression):
quadratic_expr = root_expr
elif isinstance(root_expr, SumExpression):
for arg in root_expr.args:
if isinstance(arg, QuadraticExpression):
quadratic_expr = arg
break
if seconds_elapsed_since(start_time) > time_limit:
return []
if quadratic_expr is not None:
for term in quadratic_expr.terms:
if not is_close(term.coefficient, 0.0, atol=self.galini.mc.epsilon):
idx_var1 = var_idx_map[term.var1]
idx_var2 = var_idx_map[term.var2]
adj_mat[idx_var1, idx_var2] = 1
adj_mat[idx_var2, idx_var1] = 1
quad_terms_per_con[con_idx].append((idx_var1, idx_var2, term.coefficient))
# Get only cliques up the the dimension of the SDP decomposition
all_cliques_iterator = enumerate_all_cliques(from_numpy_matrix(adj_mat))
for clique in all_cliques_iterator:
if len(clique) < 2:
continue
elif len(clique) <= dim:
agg_list.append(set(clique))
else:
break
# Eliminate cliques that are subsets of other cliques
agg_list = [(x, []) for x in agg_list if not any(x <= y for y in agg_list if x is not y)]
# Look in each constraint at a time for cliques up to dim in size
nb_objs = 1
for con_idx, constraint in enumerate([objective, *constraints]):
if seconds_elapsed_since(start_time) > time_limit:
return []
adj_mat_con = np.zeros((num_vars, num_vars))
coeff_mat_con = np.zeros((num_vars, num_vars))
G = Graph()
for (idx_var1, idx_var2, term_coeff) in quad_terms_per_con[con_idx]:
adj_mat_con[idx_var1, idx_var2] = 1
adj_mat_con[idx_var2, idx_var1] = 1
G.add_edge(idx_var1, idx_var2)
coeff_mat_con[idx_var1, idx_var2] = term_coeff
coeff_mat_con[idx_var2, idx_var1] = term_coeff
# Get only cliques up the the dimension of the SDP decomposition
agg_list_con = []
for clique in enumerate_all_cliques(G):
if seconds_elapsed_since(start_time) > time_limit:
return []
if len(clique) < 2:
continue
elif len(clique) <= dim:
agg_list_con.append(set(clique))
else:
break
# Eliminate cliques that are subsets of other cliques
agg_list_con = [x for x in agg_list_con if not any(x <= y for y in agg_list_con if x is not y)]
# Aggregate coefficient info (input_nn) used as input for neural networks for each constraint
for agg_idx, (clique, _) in enumerate(agg_list):
for clique_con in agg_list_con:
if clique_con <= clique and len(clique_con.intersection(clique)) > 1:
mat_idxs = list(combinations_with_replacement(sorted(clique), 2))
input_nn = itemgetter(*mat_idxs)(coeff_mat_con)
agg_list[agg_idx][1].append((np.asarray(input_nn), 1, con_idx-nb_objs))
# Sort clique elements after done with them as sets (since neural networks are not invariant on order)
agg_list = [(sorted(clique), _) for (clique, _) in agg_list]
return agg_list
def _get_eigendecomp(self, dim_subpr, x_vals, X_slice, ev_yes):
"""Get eigen-decomposition of a matrix of type [1, x^T; x, X] where x=(x_vals), X=(X_slice),
with/(out) eigenvectors (ev_yes)
"""
mat = self._mat
mat[dim_subpr - 2][0, 1:] = x_vals
mat[dim_subpr - 2][self._indices[dim_subpr - 2]] = X_slice
# Eigenvalues are returned in ascending order
if ev_yes:
return np.linalg.eigh(mat[dim_subpr - 2], "U")
else:
return np.linalg.eigvalsh(mat[dim_subpr - 2], "U")
| 45.597264
| 163
| 0.594974
|
4a134b0771b44df4880a61a366e81adfcc0b5639
| 691
|
py
|
Python
|
setup.py
|
ananswam/bootstrapping
|
3dd412917751b4ea9295311881fe79851a9552b1
|
[
"MIT"
] | null | null | null |
setup.py
|
ananswam/bootstrapping
|
3dd412917751b4ea9295311881fe79851a9552b1
|
[
"MIT"
] | null | null | null |
setup.py
|
ananswam/bootstrapping
|
3dd412917751b4ea9295311881fe79851a9552b1
|
[
"MIT"
] | null | null | null |
from distutils.core import setup
from Cython.Build import cythonize
from numpy import get_include
from distutils.extension import Extension
import os
numpyInclude = [get_include(), '.']
sourceFiles = ['random.pyx', 'lib.pyx']
ext_options = {}
ext_options['language'] = 'c'
ext_options['include_dirs'] = numpyInclude
extra_compile_args = []
# building part
modulename = 'bootstrapping'
extensions = [Extension(modulename+'.'+s.split('.')[0],[s], **ext_options) for s in sourceFiles]
setup(
name = modulename,
packages = [modulename],
package_dir = {modulename : '.'},
package_data = {modulename: ['random.pxd', 'lib.pxd']},
ext_modules = cythonize(extensions)
)
| 21.59375
| 96
| 0.704776
|
4a134b8bd16bb3b8ce8e74f100ea76054fd432d5
| 342
|
py
|
Python
|
tests/conftest.py
|
mambocab/simpletimeit
|
85aecc6543c6f4683fd899ae391278d82fa71925
|
[
"MIT"
] | 3
|
2015-01-05T23:54:45.000Z
|
2015-08-15T17:46:29.000Z
|
tests/conftest.py
|
mambocab/rumble
|
85aecc6543c6f4683fd899ae391278d82fa71925
|
[
"MIT"
] | 3
|
2015-01-13T18:21:18.000Z
|
2015-01-27T17:49:37.000Z
|
tests/conftest.py
|
mambocab/rumble
|
85aecc6543c6f4683fd899ae391278d82fa71925
|
[
"MIT"
] | null | null | null |
import pytest
skip_slow_string = '--skipslow'
def pytest_addoption(parser):
parser.addoption(skip_slow_string, action='store_true',
help='skip slow integration tests')
def pytest_runtest_setup(item):
if 'slow' in item.keywords and item.config.getoption('--skipslow'):
pytest.skip('disabled by --skipslow option')
| 24.428571
| 71
| 0.722222
|
4a134bbbf049de84796eb8add2dfbede5400dd1b
| 741
|
py
|
Python
|
manage.py
|
CynthiaOuma12673/pitches-app
|
cd7160f69fd382bab2ddfb777823be155b74f1b4
|
[
"Unlicense",
"MIT"
] | null | null | null |
manage.py
|
CynthiaOuma12673/pitches-app
|
cd7160f69fd382bab2ddfb777823be155b74f1b4
|
[
"Unlicense",
"MIT"
] | null | null | null |
manage.py
|
CynthiaOuma12673/pitches-app
|
cd7160f69fd382bab2ddfb777823be155b74f1b4
|
[
"Unlicense",
"MIT"
] | null | null | null |
from app import create_app,db
from flask_script import Manager, Server
from app.models import User,Pitch,Upvote,Downvote,Comment
from flask_migrate import MigrateCommand,Migrate
app = create_app('development')
manager = Manager(app)
manager.add_command('server',Server)
migrate = Migrate(app,db)
manager.add_command('db',MigrateCommand)
@manager.command
def test():
"""This function will run the unit tests."""
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
@manager.shell
def make_shell_context():
return dict(app = app,db = db,User=User,Pitch=Pitch,Upvote=Upvote,Downvote=Downvote,Comment=Comment)
if __name__ == '__main__':
manager.run()
| 27.444444
| 104
| 0.755735
|
4a134d9de47aa92b2c91450a94afa8b7fce13a5a
| 3,042
|
py
|
Python
|
mars/tests/test_config.py
|
haijohn/mars
|
672b3a33a70565f01b1a3f508908445491d85acf
|
[
"Apache-2.0"
] | 1
|
2021-06-10T02:43:01.000Z
|
2021-06-10T02:43:01.000Z
|
mars/tests/test_config.py
|
JeffroMF/mars
|
2805241ac55b50c4f6319baa41113fbf8c723832
|
[
"Apache-2.0"
] | null | null | null |
mars/tests/test_config.py
|
JeffroMF/mars
|
2805241ac55b50c4f6319baa41113fbf8c723832
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
import threading
import pytest
from mars.config import options, option_context, is_integer, is_string, Config
def test_config_context():
with pytest.raises(AttributeError):
_ = options.a.b.c
options.register_option('c.d.e', 'a', is_string)
assert 'c' in dir(options)
assert 'd' in dir(options.c)
try:
with option_context() as ctx:
ctx.register_option('a.b.c', 1, validator=is_integer)
assert ctx.a.b.c == 1
ctx.a.b.c = 2
assert ctx.a.b.c == 2
with pytest.raises(ValueError):
ctx.a.b.c = 'a'
assert ctx.c.d.e == 'a'
ctx.c.d.e = 'b'
assert options.c.d.e == 'a'
options.c.d.e = 'c'
assert options.c.d.e == 'c'
with pytest.raises(AttributeError):
_ = options.a.b.c # noqa: F841
finally:
options.unregister_option('c.d.e')
def test_multi_thread_config():
options.register_option('a.b.c', 1)
class T(threading.Thread):
def __init__(self, is_first, condition):
super().__init__()
self.is_first = is_first
self.condition = condition
def run(self):
self.condition.acquire()
if self.is_first:
options.a.b.c = 2
self.condition.notify()
else:
self.condition.wait()
assert options.a.b.c == 1
self.condition.release()
try:
cond = threading.Condition()
a = T(True, cond)
b = T(False, cond)
b.start()
a.start()
a.join()
b.join()
finally:
options.unregister_option('a.b.c')
def test_config_copy():
cfg = Config()
cfg.register_option('a.b.c', 1)
cfg.redirect_option('a.c', 'a.b.c')
target_cfg = Config()
target_cfg.register_option('a.b.c', -1)
target_cfg.redirect_option('a.c', 'a.b.c')
src_cfg_dict = cfg.to_dict()
assert src_cfg_dict == {'a.b.c': 1}
target_cfg.update(src_cfg_dict)
assert target_cfg.a.b.c == 1
def testPickleConfig(self):
cfg = Config()
cfg.register_option('a.b.c', 1)
cfg.redirect_option('a.c', 'a.b.c')
s = pickle.dumps(cfg)
new_cfg = pickle.loads(s)
self.assertEqual(new_cfg.a.b.c, 1)
self.assertEqual(new_cfg.a.c, 1)
| 26.452174
| 78
| 0.591716
|
4a134e8d1b9ffce97a50980125d1226510d6c84c
| 7,921
|
py
|
Python
|
powerline/lib/watcher/inotify.py
|
elventear/powerline
|
cddfc364c1bf269f721dfdbd765cde2649a3410d
|
[
"MIT"
] | 15
|
2017-10-02T06:09:07.000Z
|
2020-01-17T07:53:58.000Z
|
powerline/lib/watcher/inotify.py
|
elventear/powerline
|
cddfc364c1bf269f721dfdbd765cde2649a3410d
|
[
"MIT"
] | null | null | null |
powerline/lib/watcher/inotify.py
|
elventear/powerline
|
cddfc364c1bf269f721dfdbd765cde2649a3410d
|
[
"MIT"
] | 6
|
2017-10-03T15:48:12.000Z
|
2021-08-28T18:07:29.000Z
|
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import errno
import os
import ctypes
from threading import RLock
from powerline.lib.inotify import INotify
from powerline.lib.monotonic import monotonic
from powerline.lib.path import realpath
class INotifyFileWatcher(INotify):
def __init__(self, expire_time=10):
super(INotifyFileWatcher, self).__init__()
self.watches = {}
self.modified = {}
self.last_query = {}
self.lock = RLock()
self.expire_time = expire_time * 60
def expire_watches(self):
now = monotonic()
for path, last_query in tuple(self.last_query.items()):
if last_query - now > self.expire_time:
self.unwatch(path)
def process_event(self, wd, mask, cookie, name):
if wd == -1 and (mask & self.Q_OVERFLOW):
# We missed some INOTIFY events, so we dont
# know the state of any tracked files.
for path in tuple(self.modified):
if os.path.exists(path):
self.modified[path] = True
else:
self.watches.pop(path, None)
self.modified.pop(path, None)
self.last_query.pop(path, None)
return
for path, num in tuple(self.watches.items()):
if num == wd:
if mask & self.IGNORED:
self.watches.pop(path, None)
self.modified.pop(path, None)
self.last_query.pop(path, None)
else:
if mask & self.ATTRIB:
# The watched file could have had its inode changed, in
# which case we will not get any more events for this
# file, so re-register the watch. For example by some
# other file being renamed as this file.
try:
self.unwatch(path)
except OSError:
pass
try:
self.watch(path)
except OSError as e:
if getattr(e, 'errno', None) != errno.ENOENT:
raise
else:
self.modified[path] = True
else:
self.modified[path] = True
def unwatch(self, path):
''' Remove the watch for path. Raises an OSError if removing the watch
fails for some reason. '''
path = realpath(path)
with self.lock:
self.modified.pop(path, None)
self.last_query.pop(path, None)
wd = self.watches.pop(path, None)
if wd is not None:
if self._rm_watch(self._inotify_fd, wd) != 0:
self.handle_error()
def watch(self, path):
''' Register a watch for the file/directory named path. Raises an OSError if path
does not exist. '''
path = realpath(path)
with self.lock:
if path not in self.watches:
bpath = path if isinstance(path, bytes) else path.encode(self.fenc)
flags = self.MOVE_SELF | self.DELETE_SELF
buf = ctypes.c_char_p(bpath)
# Try watching path as a directory
wd = self._add_watch(self._inotify_fd, buf, flags | self.ONLYDIR)
if wd == -1:
eno = ctypes.get_errno()
if eno != errno.ENOTDIR:
self.handle_error()
# Try watching path as a file
flags |= (self.MODIFY | self.ATTRIB)
wd = self._add_watch(self._inotify_fd, buf, flags)
if wd == -1:
self.handle_error()
self.watches[path] = wd
self.modified[path] = False
def is_watching(self, path):
with self.lock:
return realpath(path) in self.watches
def __call__(self, path):
''' Return True if path has been modified since the last call. Can
raise OSError if the path does not exist. '''
path = realpath(path)
with self.lock:
self.last_query[path] = monotonic()
self.expire_watches()
if path not in self.watches:
# Try to re-add the watch, it will fail if the file does not
# exist/you dont have permission
self.watch(path)
return True
self.read(get_name=False)
if path not in self.modified:
# An ignored event was received which means the path has been
# automatically unwatched
return True
ans = self.modified[path]
if ans:
self.modified[path] = False
return ans
def close(self):
with self.lock:
for path in tuple(self.watches):
try:
self.unwatch(path)
except OSError:
pass
super(INotifyFileWatcher, self).close()
class NoSuchDir(ValueError):
pass
class BaseDirChanged(ValueError):
pass
class DirTooLarge(ValueError):
def __init__(self, bdir):
ValueError.__init__(self, 'The directory {0} is too large to monitor. Try increasing the value in /proc/sys/fs/inotify/max_user_watches'.format(bdir))
class INotifyTreeWatcher(INotify):
is_dummy = False
def __init__(self, basedir, ignore_event=None):
super(INotifyTreeWatcher, self).__init__()
self.basedir = realpath(basedir)
self.watch_tree()
self.modified = True
self.ignore_event = (lambda path, name: False) if ignore_event is None else ignore_event
def watch_tree(self):
self.watched_dirs = {}
self.watched_rmap = {}
try:
self.add_watches(self.basedir)
except OSError as e:
if e.errno == errno.ENOSPC:
raise DirTooLarge(self.basedir)
def add_watches(self, base, top_level=True):
''' Add watches for this directory and all its descendant directories,
recursively. '''
base = realpath(base)
# There may exist a link which leads to an endless
# add_watches loop or to maximum recursion depth exceeded
if not top_level and base in self.watched_dirs:
return
try:
is_dir = self.add_watch(base)
except OSError as e:
if e.errno == errno.ENOENT:
# The entry could have been deleted between listdir() and
# add_watch().
if top_level:
raise NoSuchDir('The dir {0} does not exist'.format(base))
return
if e.errno == errno.EACCES:
# We silently ignore entries for which we dont have permission,
# unless they are the top level dir
if top_level:
raise NoSuchDir('You do not have permission to monitor {0}'.format(base))
return
raise
else:
if is_dir:
try:
files = os.listdir(base)
except OSError as e:
if e.errno in (errno.ENOTDIR, errno.ENOENT):
# The dir was deleted/replaced between the add_watch()
# and listdir()
if top_level:
raise NoSuchDir('The dir {0} does not exist'.format(base))
return
raise
for x in files:
self.add_watches(os.path.join(base, x), top_level=False)
elif top_level:
# The top level dir is a file, not good.
raise NoSuchDir('The dir {0} does not exist'.format(base))
def add_watch(self, path):
bpath = path if isinstance(path, bytes) else path.encode(self.fenc)
wd = self._add_watch(
self._inotify_fd,
ctypes.c_char_p(bpath),
# Ignore symlinks and watch only directories
self.DONT_FOLLOW | self.ONLYDIR |
self.MODIFY | self.CREATE | self.DELETE |
self.MOVE_SELF | self.MOVED_FROM | self.MOVED_TO |
self.ATTRIB | self.DELETE_SELF
)
if wd == -1:
eno = ctypes.get_errno()
if eno == errno.ENOTDIR:
return False
raise OSError(eno, 'Failed to add watch for: {0}: {1}'.format(path, self.os.strerror(eno)))
self.watched_dirs[path] = wd
self.watched_rmap[wd] = path
return True
def process_event(self, wd, mask, cookie, name):
if wd == -1 and (mask & self.Q_OVERFLOW):
# We missed some INOTIFY events, so we dont
# know the state of any tracked dirs.
self.watch_tree()
self.modified = True
return
path = self.watched_rmap.get(wd, None)
if path is not None:
if not self.ignore_event(path, name):
self.modified = True
if mask & self.CREATE:
# A new sub-directory might have been created, monitor it.
try:
if not isinstance(path, bytes):
name = name.decode(self.fenc)
self.add_watch(os.path.join(path, name))
except OSError as e:
if e.errno == errno.ENOENT:
# Deleted before add_watch()
pass
elif e.errno == errno.ENOSPC:
raise DirTooLarge(self.basedir)
else:
raise
if (mask & self.DELETE_SELF or mask & self.MOVE_SELF) and path == self.basedir:
raise BaseDirChanged('The directory %s was moved/deleted' % path)
def __call__(self):
self.read()
ret = self.modified
self.modified = False
return ret
| 29.446097
| 152
| 0.678576
|
4a134ed4c94c9dc785fbe7a8e86192bff43511c4
| 1,993
|
py
|
Python
|
distributed_social_network/api/serializers.py
|
CMPUT404F21-TEAM-PROJECT/cmput404-group-project
|
5fc929f6bd22d41dc73734d34b1563bcfdc87f27
|
[
"Apache-2.0"
] | 1
|
2022-02-10T05:50:19.000Z
|
2022-02-10T05:50:19.000Z
|
distributed_social_network/api/serializers.py
|
CMPUT404F21-TEAM-PROJECT/cmput404-group-project
|
5fc929f6bd22d41dc73734d34b1563bcfdc87f27
|
[
"Apache-2.0"
] | 43
|
2022-02-08T00:59:49.000Z
|
2022-03-14T00:10:01.000Z
|
distributed_social_network/api/serializers.py
|
CMPUT404F21-TEAM-PROJECT/cmput404-group-project
|
5fc929f6bd22d41dc73734d34b1563bcfdc87f27
|
[
"Apache-2.0"
] | 1
|
2022-03-17T22:11:38.000Z
|
2022-03-17T22:11:38.000Z
|
from rest_framework import serializers
from .models import Author, FollowRequest, Post, Comment, User, Inbox, Like
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = '__all__'
extra_kwargs = {
'password': {'write_only': True}
}
def create(self, validated_data):
password = validated_data.pop('password', None)
instance = self.Meta.model(**validated_data)
if password:
# set_password() is a function provided by django to hash the password
instance.set_password(password)
instance.save()
return instance
class AuthorSerializer(serializers.ModelSerializer):
type = serializers.CharField(read_only = True, default = 'author')
class Meta:
model = Author
fields = '__all__'
class LikeSerializer(serializers.ModelSerializer):
type = serializers.CharField(read_only = True, default = 'Like')
class Meta:
model = Like
fields = '__all__'
class FollowRequestSerializer(serializers.ModelSerializer):
type = serializers.CharField(read_only = True, default = 'Follow')
class Meta:
model = FollowRequest
fields = '__all__'
class PostSerializer(serializers.ModelSerializer):
type = serializers.CharField(read_only = True, default = 'post')
class Meta:
model = Post
fields = '__all__'
class CommentSerializer(serializers.ModelSerializer):
type = serializers.CharField(read_only = True, default = 'comment')
class Meta:
model = Comment
fields = '__all__'
class InboxSerializer(serializers.ModelSerializer):
type = serializers.CharField(read_only = True, default = 'inbox')
posts = PostSerializer(many = True)
likes = LikeSerializer(many = True)
follow_requests = FollowRequestSerializer(many = True)
comments = CommentSerializer(many = True)
class Meta:
model = Inbox
fields = '__all__'
| 32.672131
| 82
| 0.67135
|
4a134f2f18d4737fd81fd76974f5d9215a4b6feb
| 29,061
|
py
|
Python
|
Model Accuracy under Different Scenarios/util.py
|
jiekeshi/CovTesting_Replication
|
fd647f4f4a455857476366571af6c7afb50ddab0
|
[
"MIT"
] | 11
|
2020-10-28T01:26:48.000Z
|
2021-10-09T02:40:00.000Z
|
Model Accuracy under Different Scenarios/util.py
|
jiekeshi/CovTesting_Replication
|
fd647f4f4a455857476366571af6c7afb50ddab0
|
[
"MIT"
] | 1
|
2022-03-14T09:21:07.000Z
|
2022-03-14T09:32:03.000Z
|
Model Accuracy under Different Scenarios/util.py
|
jiekeshi/CovTesting_Replication
|
fd647f4f4a455857476366571af6c7afb50ddab0
|
[
"MIT"
] | 7
|
2020-11-15T16:21:03.000Z
|
2021-10-21T02:20:48.000Z
|
from __future__ import absolute_import
from __future__ import print_function
import os
import multiprocessing as mp
from subprocess import call
import warnings
import numpy as np
import scipy.io as sio
from tqdm import tqdm
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc, roc_auc_score
from sklearn.linear_model import LogisticRegressionCV
from sklearn.preprocessing import scale
import keras.backend as K
from keras.datasets import mnist, cifar10
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization, AveragePooling2D
from keras.regularizers import l2
import tensorflow as tf
from scipy.spatial.distance import pdist, cdist, squareform
from keras import regularizers
from sklearn.decomposition import PCA
# Gaussian noise scale sizes that were determined so that the average
# L-2 perturbation size is equal to that of the adversarial samples
# mnist roughly L2_difference/20
# cifar roughly L2_difference/54
# svhn roughly L2_difference/60
# be very carefully with these settings, tune to have noisy/adv have the same L2-norm
# otherwise artifact will lose its accuracy
# STDEVS = {
# 'mnist': {'fgsm': 0.264, 'bim-a': 0.111, 'bim-b': 0.184, 'cw-l2': 0.588},
# 'cifar': {'fgsm': 0.0504, 'bim-a': 0.0087, 'bim-b': 0.0439, 'cw-l2': 0.015},
# 'svhn': {'fgsm': 0.1332, 'bim-a': 0.015, 'bim-b': 0.1024, 'cw-l2': 0.0379}
# }
# fined tuned again when retrained all models with X in [-0.5, 0.5]
STDEVS = {
'mnist': {'fgsm': 0.271, 'bim-a': 0.111, 'bim-b': 0.167, 'cw-l2': 0.207},
'cifar': {'fgsm': 0.0504, 'bim-a': 0.0084, 'bim-b': 0.0428, 'cw-l2': 0.007},
'svhn': {'fgsm': 0.133, 'bim-a': 0.0155, 'bim-b': 0.095, 'cw-l2': 0.008}
}
CLIP_MIN = 0.0
CLIP_MAX = 1.0
# CLIP_MIN = -0.5
# CLIP_MAX = 0.5
PATH_DATA = "data/"
# Set random seed
np.random.seed(0)
def color_preprocessing(x_test):
x_test = x_test.astype('float32')
mean = [125.307, 122.95, 113.865]
std = [62.9932, 62.0887, 66.7048]
for i in range(3):
x_test[:, :, :, i] = (x_test[:, :, :, i] - mean[i]) / std[i]
return x_test
def get_data(dataset='mnist'):
"""
images in [-0.5, 0.5] (instead of [0, 1]) which suits C&W attack and generally gives better performance
:param dataset:
:return:
"""
assert dataset in ['mnist', 'cifar', 'svhn'], \
"dataset parameter must be either 'mnist' 'cifar' or 'svhn'"
if dataset == 'mnist':
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# reshape to (n_samples, 28, 28, 1)
X_train = X_train.reshape(-1, 28, 28, 1)
X_test = X_test.reshape(-1, 28, 28, 1)
elif dataset == 'cifar':
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
X_train = color_preprocessing(X_train)
X_test = color_preprocessing(X_test)
else:
if not os.path.isfile(os.path.join(PATH_DATA, "svhn_train.mat")):
print('Downloading SVHN train set...')
call(
"curl -o ./data/svhn_train.mat "
"http://ufldl.stanford.edu/housenumbers/train_32x32.mat",
shell=True
)
if not os.path.isfile(os.path.join(PATH_DATA, "svhn_test.mat")):
print('Downloading SVHN test set...')
call(
"curl -o ./data/svhn_test.mat "
"http://ufldl.stanford.edu/housenumbers/test_32x32.mat",
shell=True
)
train = sio.loadmat(os.path.join(PATH_DATA,'svhn_train.mat'))
test = sio.loadmat(os.path.join(PATH_DATA, 'svhn_test.mat'))
X_train = np.transpose(train['X'], axes=[3, 0, 1, 2])
X_test = np.transpose(test['X'], axes=[3, 0, 1, 2])
# reshape (n_samples, 1) to (n_samples,) and change 1-index
# to 0-index
y_train = np.reshape(train['y'], (-1,)) - 1
y_test = np.reshape(test['y'], (-1,)) - 1
# cast pixels to floats, normalize to [0, 1] range
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
# X_train = (X_train/255.0) - (1.0 - CLIP_MAX)
# X_test = (X_test/255.0) - (1.0 - CLIP_MAX)
# one-hot-encode the labels
Y_train = np_utils.to_categorical(y_train, 10)
Y_test = np_utils.to_categorical(y_test, 10)
print("X_train:", X_train.shape)
print("Y_train:", Y_train.shape)
print("X_test:", X_test.shape)
print("Y_test", Y_test.shape)
return X_train, Y_train, X_test, Y_test
def get_model(dataset='mnist', softmax=False):
"""
Takes in a parameter indicating which model type to use ('mnist',
'cifar' or 'svhn') and returns the appropriate Keras model.
:param dataset: A string indicating which dataset we are building
a model for.
:param softmax: if add softmax to the last layer.
:return: The model; a Keras 'Sequential' instance.
"""
assert dataset in ['mnist', 'cifar', 'svhn'], \
"dataset parameter must be either 'mnist' 'cifar' or 'svhn'"
if dataset == 'mnist':
# MNIST model: 0, 2, 7, 10
# layers = [
# Conv2D(64, (3, 3), padding='valid', input_shape=(28, 28, 1)), # 0
# Activation('relu'), # 1
# BatchNormalization(), # 2
# Conv2D(64, (3, 3)), # 3
# Activation('relu'), # 4
# BatchNormalization(), # 5
# MaxPooling2D(pool_size=(2, 2)), # 6
# Dropout(0.5), # 7
# Flatten(), # 8
# Dense(128), # 9
# Activation('relu'), # 10
# BatchNormalization(), # 11
# Dropout(0.5), # 12
# Dense(10), # 13
# ]
# ## lenet5
# layers = [
# Conv2D(filters=6, kernel_size=(5, 5), input_shape=(28, 28, 1)),
# Activation('relu'),
# MaxPooling2D(pool_size=(2, 2)),
# Conv2D(filters=16, kernel_size=(5, 5)),
# Activation('relu'),
# MaxPooling2D(pool_size=(2, 2)),
# Flatten(),
# Dense(units=120, activation='relu'),
# Dense(units=84, activation='relu'),
# Dense(units=10, activation='softmax'),
# ]
## lenet4
layers = [
Conv2D(filters=6, kernel_size=(5, 5), input_shape=(28, 28, 1)),
Activation('relu'),
MaxPooling2D(pool_size=(2, 2)),
Conv2D(filters=16, kernel_size=(5, 5)),
Activation('relu'),
MaxPooling2D(pool_size=(2, 2)),
Flatten(),
Dense(units=84, activation='relu'),
Dense(units=10, activation='softmax'),
]
# ## lenet1
# layers = [
# Conv2D(filters=4, kernel_size=(5, 5), input_shape=(28, 28, 1)),
# Activation('relu'),
# MaxPooling2D(pool_size=(2, 2)),
# Conv2D(filters=12, kernel_size=(5, 5)),
# Activation('relu'),
# MaxPooling2D(pool_size=(2, 2)),
# Flatten(),
# Dense(units=10, activation='softmax'),
# ]
elif dataset == 'cifar':
# CIFAR-10 model
layers = [
Conv2D(32, (3, 3), padding='same', input_shape=(32, 32, 3)), # 0
Activation('relu'), # 1
BatchNormalization(), # 2
Conv2D(32, (3, 3), padding='same'), # 3
Activation('relu'), # 4
BatchNormalization(), # 5
MaxPooling2D(pool_size=(2, 2)), # 6
Conv2D(64, (3, 3), padding='same'), # 7
Activation('relu'), # 8
BatchNormalization(), # 9
Conv2D(64, (3, 3), padding='same'), # 10
Activation('relu'), # 11
BatchNormalization(), # 12
MaxPooling2D(pool_size=(2, 2)), # 13
Conv2D(128, (3, 3), padding='same'), # 14
Activation('relu'), # 15
BatchNormalization(), # 16
Conv2D(128, (3, 3), padding='same'), # 17
Activation('relu'), # 18
BatchNormalization(), # 19
MaxPooling2D(pool_size=(2, 2)), # 20
Flatten(), # 21
Dropout(0.5), # 22
Dense(1024, kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01)), # 23
Activation('relu'), # 24
BatchNormalization(), # 25
Dropout(0.5), # 26
Dense(512, kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01)), # 27
Activation('relu'), # 28
BatchNormalization(), # 29
Dropout(0.5), # 30
Dense(10), # 31
]
else:
# SVHN model
layers = [
Conv2D(64, (3, 3), padding='valid', input_shape=(32, 32, 3)), # 0
Activation('relu'), # 1
BatchNormalization(), # 2
Conv2D(64, (3, 3)), # 3
Activation('relu'), # 4
BatchNormalization(), # 5
MaxPooling2D(pool_size=(2, 2)), # 6
Dropout(0.5), # 7
Flatten(), # 8
Dense(512), # 9
Activation('relu'), # 10
BatchNormalization(), # 11
Dropout(0.5), # 12
Dense(128), # 13
Activation('relu'), # 14
BatchNormalization(), # 15
Dropout(0.5), # 16
Dense(10), # 17
]
model = Sequential()
for layer in layers:
model.add(layer)
if softmax:
model.add(Activation('softmax'))
return model
def cross_entropy(y_true, y_pred):
return tf.nn.softmax_cross_entropy_with_logits(labels=y_true, logits=y_pred)
def lid_term(logits, batch_size=100):
"""Calculate LID loss term for a minibatch of logits
:param logits:
:return:
"""
# y_pred = tf.nn.softmax(logits)
y_pred = logits
# calculate pairwise distance
r = tf.reduce_sum(tf.square(y_pred), axis=1)
# turn r into column vector
r = tf.reshape(r, [-1, 1])
D = r - 2 * tf.matmul(y_pred, tf.transpose(y_pred)) + tf.transpose(r)
# find the k nearest neighbor
D1 = tf.sqrt(D + 1e-9)
D2, _ = tf.nn.top_k(-D1, k=21, sorted=True)
D3 = -D2[:, 1:]
m = tf.transpose(tf.multiply(tf.transpose(D3), 1.0 / D3[:, -1]))
v_log = tf.reduce_sum(tf.log(m + 1e-9), axis=1) # to avoid nan
lids = -20 / v_log
## batch normalize lids
# lids = tf.nn.l2_normalize(lids, dim=0, epsilon=1e-12)
return lids
def lid_adv_term(clean_logits, adv_logits, batch_size=100):
"""Calculate LID loss term for a minibatch of advs logits
:param logits: clean logits
:param A_logits: adversarial logits
:return:
"""
# y_pred = tf.nn.softmax(logits)
c_pred = tf.reshape(clean_logits, (batch_size, -1))
a_pred = tf.reshape(adv_logits, (batch_size, -1))
# calculate pairwise distance
r_a = tf.reduce_sum(tf.square(a_pred), axis=1)
# turn r_a into column vector
r_a = tf.reshape(r_a, [-1, 1])
r_c = tf.reduce_sum(tf.square(c_pred), axis=1)
# turn r_c into row vector
r_c = tf.reshape(r_c, [1, -1])
D = r_a - 2 * tf.matmul(a_pred, tf.transpose(c_pred)) + r_c
# find the k nearest neighbor
D1 = tf.sqrt(D + 1e-9)
D2, _ = tf.nn.top_k(-D1, k=21, sorted=True)
D3 = -D2[:, 1:]
m = tf.transpose(tf.multiply(tf.transpose(D3), 1.0 / D3[:, -1]))
v_log = tf.reduce_sum(tf.log(m + 1e-9), axis=1) # to avoid nan
lids = -20 / v_log
## batch normalize lids
lids = tf.nn.l2_normalize(lids, dim=0, epsilon=1e-12)
return lids
def flip(x, nb_diff):
"""
Helper function for get_noisy_samples
:param x:
:param nb_diff:
:return:
"""
original_shape = x.shape
x = np.copy(np.reshape(x, (-1,)))
candidate_inds = np.where(x < CLIP_MAX)[0]
assert candidate_inds.shape[0] >= nb_diff
inds = np.random.choice(candidate_inds, nb_diff)
x[inds] = CLIP_MAX
return np.reshape(x, original_shape)
def get_noisy_samples(X_test, X_test_adv, dataset, attack):
"""
TODO
:param X_test:
:param X_test_adv:
:param dataset:
:param attack:
:return:
"""
if attack in ['jsma', 'cw-l0']:
X_test_noisy = np.zeros_like(X_test)
for i in range(len(X_test)):
# Count the number of pixels that are different
nb_diff = len(np.where(X_test[i] != X_test_adv[i])[0])
# Randomly flip an equal number of pixels (flip means move to max
# value of 1)
X_test_noisy[i] = flip(X_test[i], nb_diff)
else:
warnings.warn("Important: using pre-set Gaussian scale sizes to craft noisy "
"samples. You will definitely need to manually tune the scale "
"according to the L2 print below, otherwise the result "
"will inaccurate. In future scale sizes will be inferred "
"automatically. For now, manually tune the scales around "
"mnist: L2/20.0, cifar: L2/54.0, svhn: L2/60.0")
# Add Gaussian noise to the samples
# print(STDEVS[dataset][attack])
X_test_noisy = np.minimum(
np.maximum(
X_test + np.random.normal(loc=0, scale=STDEVS[dataset][attack],
size=X_test.shape),
CLIP_MIN
),
CLIP_MAX
)
return X_test_noisy
def get_mc_predictions(model, X, nb_iter=50, batch_size=256):
"""
TODO
:param model:
:param X:
:param nb_iter:
:param batch_size:
:return:
"""
output_dim = model.layers[-1].output.shape[-1].value
get_output = K.function(
[model.layers[0].input, K.learning_phase()],
[model.layers[-1].output]
)
def predict():
n_batches = int(np.ceil(X.shape[0] / float(batch_size)))
output = np.zeros(shape=(len(X), output_dim))
for i in range(n_batches):
output[i * batch_size:(i + 1) * batch_size] = \
get_output([X[i * batch_size:(i + 1) * batch_size], 1])[0]
return output
preds_mc = []
for i in tqdm(range(nb_iter)):
preds_mc.append(predict())
return np.asarray(preds_mc)
def get_deep_representations(model, X, batch_size=256):
"""
TODO
:param model:
:param X:
:param batch_size:
:return:
"""
# last hidden layer is always at index -4
output_dim = model.layers[-4].output.shape[-1].value
get_encoding = K.function(
[model.layers[0].input, K.learning_phase()],
[model.layers[-4].output]
)
n_batches = int(np.ceil(X.shape[0] / float(batch_size)))
output = np.zeros(shape=(len(X), output_dim))
for i in range(n_batches):
output[i * batch_size:(i + 1) * batch_size] = \
get_encoding([X[i * batch_size:(i + 1) * batch_size], 0])[0]
return output
def get_layer_wise_activations(model, dataset):
"""
Get the deep activation outputs.
:param model:
:param dataset: 'mnist', 'cifar', 'svhn', has different submanifolds architectures
:return:
"""
assert dataset in ['mnist', 'cifar', 'svhn'], \
"dataset parameter must be either 'mnist' 'cifar' or 'svhn'"
if dataset == 'mnist':
# mnist model
acts = [model.layers[0].input]
acts.extend([layer.output for layer in model.layers])
elif dataset == 'cifar':
# cifar-10 model
acts = [model.layers[0].input]
acts.extend([layer.output for layer in model.layers])
else:
# svhn model
acts = [model.layers[0].input]
acts.extend([layer.output for layer in model.layers])
return acts
# lid of a single query point x
def mle_single(data, x, k=20):
data = np.asarray(data, dtype=np.float32)
x = np.asarray(x, dtype=np.float32)
# print('x.ndim',x.ndim)
if x.ndim == 1:
x = x.reshape((-1, x.shape[0]))
# dim = x.shape[1]
k = min(k, len(data)-1)
f = lambda v: - k / np.sum(np.log(v/v[-1]))
a = cdist(x, data)
a = np.apply_along_axis(np.sort, axis=1, arr=a)[:,1:k+1]
a = np.apply_along_axis(f, axis=1, arr=a)
return a[0]
# lid of a batch of query points X
def mle_batch(data, batch, k):
data = np.asarray(data, dtype=np.float32)
batch = np.asarray(batch, dtype=np.float32)
k = min(k, len(data)-1)
f = lambda v: - k / np.sum(np.log(v/v[-1]))
a = cdist(batch, data)
a = np.apply_along_axis(np.sort, axis=1, arr=a)[:,1:k+1]
a = np.apply_along_axis(f, axis=1, arr=a)
return a
# mean distance of x to its k nearest neighbours
def kmean_batch(data, batch, k):
data = np.asarray(data, dtype=np.float32)
batch = np.asarray(batch, dtype=np.float32)
k = min(k, len(data)-1)
f = lambda v: np.mean(v)
a = cdist(batch, data)
a = np.apply_along_axis(np.sort, axis=1, arr=a)[:,1:k+1]
a = np.apply_along_axis(f, axis=1, arr=a)
return a
# mean distance of x to its k nearest neighbours
def kmean_pca_batch(data, batch, k=10):
data = np.asarray(data, dtype=np.float32)
batch = np.asarray(batch, dtype=np.float32)
a = np.zeros(batch.shape[0])
for i in np.arange(batch.shape[0]):
tmp = np.concatenate((data, [batch[i]]))
tmp_pca = PCA(n_components=2).fit_transform(tmp)
a[i] = kmean_batch(tmp_pca[:-1], tmp_pca[-1], k=k)
return a
def get_lids_random_batch(model, X, X_noisy, X_adv, dataset, k=10, batch_size=100):
"""
Get the local intrinsic dimensionality of each Xi in X_adv
estimated by k close neighbours in the random batch it lies in.
:param model:
:param X: normal images
:param X_noisy: noisy images
:param X_adv: advserial images
:param dataset: 'mnist', 'cifar', 'svhn', has different DNN architectures
:param k: the number of nearest neighbours for LID estimation
:param batch_size: default 100
:return: lids: LID of normal images of shape (num_examples, lid_dim)
lids_adv: LID of advs images of shape (num_examples, lid_dim)
"""
# get deep representations
funcs = [K.function([model.layers[0].input, K.learning_phase()], [out])
for out in get_layer_wise_activations(model, dataset)]
lid_dim = len(funcs)
print("Number of layers to estimate: ", lid_dim)
def estimate(i_batch):
start = i_batch * batch_size
end = np.minimum(len(X), (i_batch + 1) * batch_size)
n_feed = end - start
lid_batch = np.zeros(shape=(n_feed, lid_dim))
lid_batch_adv = np.zeros(shape=(n_feed, lid_dim))
lid_batch_noisy = np.zeros(shape=(n_feed, lid_dim))
for i, func in enumerate(funcs):
X_act = func([X[start:end], 0])[0]
X_act = np.asarray(X_act, dtype=np.float32).reshape((n_feed, -1))
# print("X_act: ", X_act.shape)
X_adv_act = func([X_adv[start:end], 0])[0]
X_adv_act = np.asarray(X_adv_act, dtype=np.float32).reshape((n_feed, -1))
# print("X_adv_act: ", X_adv_act.shape)
X_noisy_act = func([X_noisy[start:end], 0])[0]
X_noisy_act = np.asarray(X_noisy_act, dtype=np.float32).reshape((n_feed, -1))
# print("X_noisy_act: ", X_noisy_act.shape)
# random clean samples
# Maximum likelihood estimation of local intrinsic dimensionality (LID)
lid_batch[:, i] = mle_batch(X_act, X_act, k=k)
# print("lid_batch: ", lid_batch.shape)
lid_batch_adv[:, i] = mle_batch(X_act, X_adv_act, k=k)
# print("lid_batch_adv: ", lid_batch_adv.shape)
lid_batch_noisy[:, i] = mle_batch(X_act, X_noisy_act, k=k)
# print("lid_batch_noisy: ", lid_batch_noisy.shape)
return lid_batch, lid_batch_noisy, lid_batch_adv
lids = []
lids_adv = []
lids_noisy = []
n_batches = int(np.ceil(X.shape[0] / float(batch_size)))
for i_batch in tqdm(range(n_batches)):
lid_batch, lid_batch_noisy, lid_batch_adv = estimate(i_batch)
lids.extend(lid_batch)
lids_adv.extend(lid_batch_adv)
lids_noisy.extend(lid_batch_noisy)
# print("lids: ", lids.shape)
# print("lids_adv: ", lids_noisy.shape)
# print("lids_noisy: ", lids_noisy.shape)
lids = np.asarray(lids, dtype=np.float32)
lids_noisy = np.asarray(lids_noisy, dtype=np.float32)
lids_adv = np.asarray(lids_adv, dtype=np.float32)
return lids, lids_noisy, lids_adv
def get_kmeans_random_batch(model, X, X_noisy, X_adv, dataset, k=10, batch_size=100, pca=False):
"""
Get the mean distance of each Xi in X_adv to its k nearest neighbors.
:param model:
:param X: normal images
:param X_noisy: noisy images
:param X_adv: advserial images
:param dataset: 'mnist', 'cifar', 'svhn', has different DNN architectures
:param k: the number of nearest neighbours for LID estimation
:param batch_size: default 100
:param pca: using pca or not, if True, apply pca to the referenced sample and a
minibatch of normal samples, then compute the knn mean distance of the referenced sample.
:return: kms_normal: kmean of normal images (num_examples, 1)
kms_noisy: kmean of normal images (num_examples, 1)
kms_adv: kmean of adv images (num_examples, 1)
"""
# get deep representations
funcs = [K.function([model.layers[0].input, K.learning_phase()], [model.layers[-2].output])]
km_dim = len(funcs)
print("Number of layers to use: ", km_dim)
def estimate(i_batch):
start = i_batch * batch_size
end = np.minimum(len(X), (i_batch + 1) * batch_size)
n_feed = end - start
km_batch = np.zeros(shape=(n_feed, km_dim))
km_batch_adv = np.zeros(shape=(n_feed, km_dim))
km_batch_noisy = np.zeros(shape=(n_feed, km_dim))
for i, func in enumerate(funcs):
X_act = func([X[start:end], 0])[0]
X_act = np.asarray(X_act, dtype=np.float32).reshape((n_feed, -1))
# print("X_act: ", X_act.shape)
X_adv_act = func([X_adv[start:end], 0])[0]
X_adv_act = np.asarray(X_adv_act, dtype=np.float32).reshape((n_feed, -1))
# print("X_adv_act: ", X_adv_act.shape)
X_noisy_act = func([X_noisy[start:end], 0])[0]
X_noisy_act = np.asarray(X_noisy_act, dtype=np.float32).reshape((n_feed, -1))
# print("X_noisy_act: ", X_noisy_act.shape)
# Maximum likelihood estimation of local intrinsic dimensionality (LID)
if pca:
km_batch[:, i] = kmean_pca_batch(X_act, X_act, k=k)
else:
km_batch[:, i] = kmean_batch(X_act, X_act, k=k)
# print("lid_batch: ", lid_batch.shape)
if pca:
km_batch_adv[:, i] = kmean_pca_batch(X_act, X_adv_act, k=k)
else:
km_batch_adv[:, i] = kmean_batch(X_act, X_adv_act, k=k)
# print("lid_batch_adv: ", lid_batch_adv.shape)
if pca:
km_batch_noisy[:, i] = kmean_pca_batch(X_act, X_noisy_act, k=k)
else:
km_batch_noisy[:, i] = kmean_batch(X_act, X_noisy_act, k=k)
# print("lid_batch_noisy: ", lid_batch_noisy.shape)
return km_batch, km_batch_noisy, km_batch_adv
kms = []
kms_adv = []
kms_noisy = []
n_batches = int(np.ceil(X.shape[0] / float(batch_size)))
for i_batch in tqdm(range(n_batches)):
km_batch, km_batch_noisy, km_batch_adv = estimate(i_batch)
kms.extend(km_batch)
kms_adv.extend(km_batch_adv)
kms_noisy.extend(km_batch_noisy)
# print("kms: ", kms.shape)
# print("kms_adv: ", kms_noisy.shape)
# print("kms_noisy: ", kms_noisy.shape)
kms = np.asarray(kms, dtype=np.float32)
kms_noisy = np.asarray(kms_noisy, dtype=np.float32)
kms_adv = np.asarray(kms_adv, dtype=np.float32)
return kms, kms_noisy, kms_adv
def score_point(tup):
"""
TODO
:param tup:
:return:
"""
x, kde = tup
return kde.score_samples(np.reshape(x, (1, -1)))[0]
def score_samples(kdes, samples, preds, n_jobs=None):
"""
TODO
:param kdes:
:param samples:
:param preds:
:param n_jobs:
:return:
"""
if n_jobs is not None:
p = mp.Pool(n_jobs)
else:
p = mp.Pool()
results = np.asarray(
p.map(
score_point,
[(x, kdes[i]) for x, i in zip(samples, preds)]
)
)
p.close()
p.join()
return results
def normalize(normal, adv, noisy):
"""Z-score normalisation
TODO
:param normal:
:param adv:
:param noisy:
:return:
"""
n_samples = len(normal)
total = scale(np.concatenate((normal, adv, noisy)))
return total[:n_samples], total[n_samples:2*n_samples], total[2*n_samples:]
def train_lr(X, y):
"""
TODO
:param X: the data samples
:param y: the labels
:return:
"""
lr = LogisticRegressionCV(n_jobs=-1).fit(X, y)
return lr
def train_lr_rfeinman(densities_pos, densities_neg, uncerts_pos, uncerts_neg):
"""
TODO
:param densities_pos:
:param densities_neg:
:param uncerts_pos:
:param uncerts_neg:
:return:
"""
values_neg = np.concatenate(
(densities_neg.reshape((1, -1)),
uncerts_neg.reshape((1, -1))),
axis=0).transpose([1, 0])
values_pos = np.concatenate(
(densities_pos.reshape((1, -1)),
uncerts_pos.reshape((1, -1))),
axis=0).transpose([1, 0])
values = np.concatenate((values_neg, values_pos))
labels = np.concatenate(
(np.zeros_like(densities_neg), np.ones_like(densities_pos)))
lr = LogisticRegressionCV(n_jobs=-1).fit(values, labels)
return values, labels, lr
def compute_roc(y_true, y_pred, plot=False):
"""
TODO
:param y_true: ground truth
:param y_pred: predictions
:param plot:
:return:
"""
fpr, tpr, _ = roc_curve(y_true, y_pred)
auc_score = roc_auc_score(y_true, y_pred)
if plot:
plt.figure(figsize=(7, 6))
plt.plot(fpr, tpr, color='blue',
label='ROC (AUC = %0.4f)' % auc_score)
plt.legend(loc='lower right')
plt.title("ROC Curve")
plt.xlabel("FPR")
plt.ylabel("TPR")
plt.show()
return fpr, tpr, auc_score
def compute_roc_rfeinman(probs_neg, probs_pos, plot=False):
"""
TODO
:param probs_neg:
:param probs_pos:
:param plot:
:return:
"""
probs = np.concatenate((probs_neg, probs_pos))
labels = np.concatenate((np.zeros_like(probs_neg), np.ones_like(probs_pos)))
fpr, tpr, _ = roc_curve(labels, probs)
auc_score = auc(fpr, tpr)
if plot:
plt.figure(figsize=(7, 6))
plt.plot(fpr, tpr, color='blue',
label='ROC (AUC = %0.4f)' % auc_score)
plt.legend(loc='lower right')
plt.title("ROC Curve")
plt.xlabel("FPR")
plt.ylabel("TPR")
plt.show()
return fpr, tpr, auc_score
def random_split(X, Y):
"""
Random split the data into 80% for training and 20% for testing
:param X:
:param Y:
:return:
"""
print("random split 80%, 20% for training and testing")
num_samples = X.shape[0]
num_train = int(num_samples * 0.8)
rand_pert = np.random.permutation(num_samples)
X = X[rand_pert]
Y = Y[rand_pert]
X_train, X_test = X[:num_train], X[num_train:]
Y_train, Y_test = Y[:num_train], Y[num_train:]
return X_train, Y_train, X_test, Y_test
def block_split(X, Y):
"""
Split the data into 80% for training and 20% for testing
in a block size of 100.
:param X:
:param Y:
:return:
"""
print("Isolated split 80%, 20% for training and testing")
num_samples = X.shape[0]
partition = int(num_samples / 3)
X_adv, Y_adv = X[:partition], Y[:partition]
X_norm, Y_norm = X[partition: 2*partition], Y[partition: 2*partition]
X_noisy, Y_noisy = X[2*partition:], Y[2*partition:]
num_train = int(partition*0.008) * 100
X_train = np.concatenate((X_norm[:num_train], X_noisy[:num_train], X_adv[:num_train]))
Y_train = np.concatenate((Y_norm[:num_train], Y_noisy[:num_train], Y_adv[:num_train]))
X_test = np.concatenate((X_norm[num_train:], X_noisy[num_train:], X_adv[num_train:]))
Y_test = np.concatenate((Y_norm[num_train:], Y_noisy[num_train:], Y_adv[num_train:]))
return X_train, Y_train, X_test, Y_test
if __name__ == "__main__":
# unit test
a = np.array([1, 2, 3, 4, 5])
b = np.array([6, 7, 8, 9, 10])
c = np.array([11, 12, 13, 14, 15])
a_z, b_z, c_z = normalize(a, b, c)
print(a_z)
print(b_z)
print(c_z)
| 33.989474
| 107
| 0.585665
|
4a13504d23ee906bfda335abb09b3ec881384777
| 5,976
|
py
|
Python
|
betahex/models.py
|
StarvingMarvin/betahex
|
0626cf4d003e94423f34f3d83149702a5557ddb8
|
[
"MIT"
] | 2
|
2019-03-17T07:09:14.000Z
|
2020-05-04T17:40:51.000Z
|
betahex/models.py
|
StarvingMarvin/betahex
|
0626cf4d003e94423f34f3d83149702a5557ddb8
|
[
"MIT"
] | null | null | null |
betahex/models.py
|
StarvingMarvin/betahex
|
0626cf4d003e94423f34f3d83149702a5557ddb8
|
[
"MIT"
] | null | null | null |
from io import StringIO
import tensorflow as tf
from tensorflow.contrib.learn.python import learn
def conv_layer(x, filters, size, activation,
name=None, bias=True, reg_scale=None, board=None):
reg = None if reg_scale is None else tf.contrib.layers.l2_regularizer(scale=reg_scale)
conv = tf.layers.conv2d(
x, filters, size, activation=activation, padding='same', use_bias=bias,
name=name, kernel_initializer=tf.random_normal_initializer(),
kernel_regularizer=reg, bias_regularizer=reg
)
if board is not None:
conv = conv * board
return conv
def visualize_layer(features, tensor, channels, name='layer_img', cy=8):
ix = features.shape[0] + 2
iy = features.shape[0] + 2
cx = channels // cy
img = tf.slice(tensor, (0, 0, 0, 0), (1, -1, -1, -1))
img = tf.reshape(img, (features.shape[0], features.shape[1], channels))
img = tf.image.resize_image_with_crop_or_pad(img, iy, ix)
img = tf.reshape(img, (iy, ix, cy, cx))
img = tf.transpose(img, (2, 0, 3, 1))
img = tf.reshape(img, (1, cy * iy, cx * ix, 1))
tf.summary.image(name, img)
def common_model(features, *, filter_count=None, groups=None, reg_scale=None):
def model(input, mode):
tensors = [input[feat] for feat in features.feature_names]
mangled = tf.concat(tensors, 3)
prev = conv_layer(
mangled, filter_count, 5, tf.nn.relu, "5-filter-conv-input",
bias=True, reg_scale=None
)
board = input['ones']
viz_cnt = 0
head = groups[:-1]
tail = groups[-1]
training = mode == learn.ModeKeys.TRAIN
fc = filter_count
rs = reg_scale
for g, group in enumerate(head):
if group < 0:
prev = tf.layers.dropout(prev, rate=-group, training=training)
elif group == 0:
prev = tf.layers.batch_normalization(
prev, axis=3, training=training, name="batch_norm-g{}".format(g)
)
elif 0 < group < 1:
fc = int(filter_count * group)
prev = conv_layer(prev, fc, 3, tf.nn.relu, "3-filter-conv-{}-g{}".format(fc, g),
bias=False, reg_scale=rs, board=board)
else:
for i in range(group):
fc = filter_count
prev = conv_layer(prev, fc, 3, tf.nn.relu, "3-filter-conv-g{}-{}".format(g, i),
bias=False, reg_scale=rs, board=board)
for i in range(tail):
prev = conv_layer(prev, filter_count, 3, tf.nn.relu, "3-filter-conv-{}".format(i),
bias=True, reg_scale=rs, board=board)
visualize_layer(features, prev, filter_count, "{:0=2}-3-filter-conv-{}".format(viz_cnt, i))
viz_cnt += 1
return prev
return model
def mask_invalid(x, valid, name=None):
batch_min = tf.reduce_min(x, name="mask_min")
batch_max = tf.reduce_max(x, name="mask_max")
tf.summary.scalar("mask_min", batch_min)
tf.summary.scalar("mask_max", batch_max)
shifted = x - batch_min
masked = tf.multiply(shifted, valid, name)
return masked
def shift_invalid(x, valid, name=None):
invalid = 1 - valid
valid_min = tf.reduce_min(x * valid, name="valid_min")
invalid_max = tf.reduce_max(x * invalid, name="invalid_max")
tf.summary.scalar("valid_min", valid_min)
tf.summary.scalar("invalid_max", invalid_max)
shift = tf.maximum(invalid_max - valid_min, 0)
shifted = tf.subtract(x, shift * invalid, name)
return shifted
def skew(out, features):
board = tf.reshape(out, (-1, features.board_size, features.board_size, 1))
padded = tf.pad(board, [[0, 0], [0, features.shape[0] - features.shape[1]], [0, 0], [0, 0]])
y_first = tf.transpose(padded, [2, 1, 0, 3])
shredded = tf.unstack(y_first)
skewed_rows = []
for i, t in enumerate(shredded):
indices = [(x - i // 2) % features.shape[0] for x in range(features.shape[0])]
t_indices = tf.constant(indices, dtype=tf.int32)
skewed_rows.append(tf.gather(t, t_indices))
skewed = tf.stack(skewed_rows)
batch_first = tf.transpose(skewed, [2, 1, 0, 3])
return batch_first
def make_policy(features, filter_count=None, groups=None, reg_scale=None):
common_f = common_model(
features,
filter_count=filter_count,
groups=groups,
reg_scale=reg_scale
)
def model(input, mode):
common = common_f(input, mode)
drop = tf.layers.dropout(common, (filter_count - 1.5) / filter_count)
activation = conv_layer(drop, 1, 1, None, "1-filter-conv-output", True,
reg_scale=reg_scale)
tf.summary.image("output_activation_img", activation)
valid = tf.to_float(input['empty'])
masked = activation * valid
logits = tf.reshape(
masked,
[-1, features.shape[0] * features.shape[1]],
name="logits"
)
tf.summary.image("output_logits_img", masked)
return activation, logits
return model
def make_value(features, filter_count, groups, reg_scale=None):
common_f = common_model(features, filter_count=filter_count, groups=groups, reg_scale=reg_scale)
def model(input, mode):
common = common_f(input, mode)
return common
return model
def conf2path(filter_count, groups):
buf = StringIO()
buf.write(str(filter_count))
buf.write('f')
for g in groups:
if g > 0:
buf.write("-")
buf.write(str(g))
return str(buf)
MODEL = {
'name': '72f-relu-(1-bn-drop2)x4-1-drop-but1.5',
'filters': 72,
'shape': [1, 0, -0.2] * 4 + [1],
'features': ['black', 'white', 'empty', 'recentness', 'distances', 'black_edges', 'white_edges', 'ones'],
'regularization_scale': None
}
| 31.787234
| 109
| 0.595716
|
4a1350537616c54be4702db21d591af0ef4f48bd
| 536
|
py
|
Python
|
DjangoBlog/articles/migrations/0008_auto_20210808_0801.py
|
Dimple278/Publication-Repository
|
ec274bf5822e160b90f0a5bc8559c1d199e12854
|
[
"Unlicense",
"MIT"
] | null | null | null |
DjangoBlog/articles/migrations/0008_auto_20210808_0801.py
|
Dimple278/Publication-Repository
|
ec274bf5822e160b90f0a5bc8559c1d199e12854
|
[
"Unlicense",
"MIT"
] | 1
|
2021-08-08T06:46:46.000Z
|
2021-08-08T06:46:46.000Z
|
DjangoBlog/articles/migrations/0008_auto_20210808_0801.py
|
Dimple278/Publication-Repository
|
ec274bf5822e160b90f0a5bc8559c1d199e12854
|
[
"Unlicense",
"MIT"
] | 2
|
2021-07-03T11:55:11.000Z
|
2021-08-09T08:27:52.000Z
|
# Generated by Django 3.2.4 on 2021-08-08 02:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articles', '0007_auto_20210808_0747'),
]
operations = [
migrations.RemoveField(
model_name='conferencearticle',
name='organised_date',
),
migrations.AddField(
model_name='conferencearticle',
name='publisher',
field=models.CharField(default=None, max_length=100),
),
]
| 23.304348
| 65
| 0.600746
|
4a1352003a469543e8cae212516f333438db2a2f
| 95,525
|
py
|
Python
|
salt/master.py
|
JesseRhoads/salt
|
bd5395ea85956e064970710aae03398cbd1b20f5
|
[
"Apache-2.0"
] | 1
|
2020-10-02T02:29:25.000Z
|
2020-10-02T02:29:25.000Z
|
salt/master.py
|
JesseRhoads/salt
|
bd5395ea85956e064970710aae03398cbd1b20f5
|
[
"Apache-2.0"
] | null | null | null |
salt/master.py
|
JesseRhoads/salt
|
bd5395ea85956e064970710aae03398cbd1b20f5
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
This module contains all of the routines needed to set up a master server, this
involves preparing the three listeners and the workers needed by the master.
'''
from __future__ import absolute_import
# Import python libs
import os
import re
import time
import errno
import shutil
import logging
import hashlib
import resource
import multiprocessing
import sys
# Import third party libs
import zmq
from M2Crypto import RSA
# Import salt libs
import salt.crypt
import salt.utils
import salt.client
import salt.exitcodes
import salt.payload
import salt.pillar
import salt.state
import salt.runner
import salt.auth
import salt.wheel
import salt.minion
import salt.search
import salt.key
import salt.fileserver
import salt.daemons.masterapi
import salt.utils.atomicfile
import salt.utils.event
import salt.utils.verify
import salt.utils.minions
import salt.utils.gzip_util
import salt.utils.process
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.utils.debug import enable_sigusr1_handler, enable_sigusr2_handler, inspect_stack
from salt.utils.event import tagify
import binascii
from salt.utils.master import ConnectedCache
from salt.utils.cache import CacheCli
# Import halite libs
try:
import halite
HAS_HALITE = True
except ImportError:
HAS_HALITE = False
log = logging.getLogger(__name__)
class SMaster(object):
'''
Create a simple salt-master, this will generate the top-level master
'''
def __init__(self, opts):
'''
Create a salt master server instance
:param dict opts: The salt options dictionary
'''
self.opts = opts
self.master_key = salt.crypt.MasterKeys(self.opts)
self.key = self.__prep_key()
self.crypticle = self.__prep_crypticle()
def __prep_crypticle(self):
'''
Return the crypticle used for AES
'''
return salt.crypt.Crypticle(self.opts, self.opts['aes'])
def __prep_key(self):
'''
A key needs to be placed in the filesystem with permissions 0400 so
clients are required to run as root.
'''
return salt.daemons.masterapi.access_keys(self.opts)
class Maintenance(multiprocessing.Process):
'''
A generalized maintenance process which performances maintenance
routines.
'''
def __init__(self, opts):
'''
Create a maintenance instance
:param dict opts: The salt options
'''
super(Maintenance, self).__init__()
self.opts = opts
# Init fileserver manager
self.fileserver = salt.fileserver.Fileserver(self.opts)
# Load Runners
self.runners = salt.loader.runner(self.opts)
# Load Returners
self.returners = salt.loader.returners(self.opts, {})
# Init Scheduler
self.schedule = salt.utils.schedule.Schedule(self.opts,
self.runners,
returners=self.returners)
self.ckminions = salt.utils.minions.CkMinions(self.opts)
# Make Event bus for firing
self.event = salt.utils.event.MasterEvent(self.opts['sock_dir'])
# Init any values needed by the git ext pillar
self.pillargitfs = salt.daemons.masterapi.init_git_pillar(self.opts)
# Set up search object
self.search = salt.search.Search(self.opts)
# How often do we perform the maintenance tasks
self.loop_interval = int(self.opts['loop_interval'])
# Track key rotation intervals
self.rotate = int(time.time())
def run(self):
'''
This is the general passive maintenance process controller for the Salt
master.
This is where any data that needs to be cleanly maintained from the
master is maintained.
'''
salt.utils.appendproctitle('Maintenance')
# Make Start Times
last = int(time.time())
# Clean out the fileserver backend cache
salt.daemons.masterapi.clean_fsbackend(self.opts)
old_present = set()
while True:
now = int(time.time())
if (now - last) >= self.loop_interval:
salt.daemons.masterapi.clean_old_jobs(self.opts)
salt.daemons.masterapi.clean_expired_tokens(self.opts)
self.handle_search(now, last)
self.handle_pillargit()
self.handle_schedule()
self.handle_presence(old_present)
self.handle_key_rotate(now)
salt.daemons.masterapi.fileserver_update(self.fileserver)
salt.utils.verify.check_max_open_files(self.opts)
last = now
try:
time.sleep(self.loop_interval)
except KeyboardInterrupt:
break
def handle_search(self, now, last):
'''
Update the search index
'''
if self.opts.get('search'):
if now - last >= self.opts['search_index_interval']:
self.search.index()
def handle_key_rotate(self, now):
'''
Rotate the AES key on a schedule
'''
if self.opts.get('publish_session'):
if now - self.rotate >= self.opts['publish_session']:
salt.crypt.dropfile(
self.opts['cachedir'],
self.opts['user'],
self.opts['sock_dir'])
self.rotate = now
if self.opts.get('ping_on_rotate'):
# Ping all minions to get them to pick up the new key
log.debug('Pinging all connected minions '
'due to AES key rotation')
salt.utils.master.ping_all_connected_minions(self.opts)
def handle_pillargit(self):
'''
Update git pillar
'''
try:
for pillargit in self.pillargitfs:
pillargit.update()
except Exception as exc:
log.error('Exception {0} occurred in file server update '
'for git_pillar module.'.format(exc))
def handle_schedule(self):
'''
Evaluate the scheduler
'''
try:
self.schedule.eval()
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if self.schedule.loop_interval < self.loop_interval:
self.loop_interval = self.schedule.loop_interval
except Exception as exc:
log.error(
'Exception {0} occurred in scheduled job'.format(exc)
)
def handle_presence(self, old_present):
'''
Fire presence events if enabled
'''
if self.opts.get('presence_events', False):
present = self.ckminions.connected_ids()
new = present.difference(old_present)
lost = old_present.difference(present)
if new or lost:
# Fire new minions present event
data = {'new': list(new),
'lost': list(lost)}
self.event.fire_event(data, tagify('change', 'presence'))
data = {'present': list(present)}
self.event.fire_event(data, tagify('present', 'presence'))
old_present = present
class Master(SMaster):
'''
The salt master server
'''
def __init__(self, opts):
'''
Create a salt master server instance
:param dict: The salt options
'''
# Warn if ZMQ < 3.2
try:
zmq_version_info = zmq.zmq_version_info()
except AttributeError:
# PyZMQ <= 2.1.9 does not have zmq_version_info, fall back to
# using zmq.zmq_version() and build a version info tuple.
zmq_version_info = tuple(
[int(x) for x in zmq.zmq_version().split('.')]
)
if zmq_version_info < (3, 2):
log.warning(
'You have a version of ZMQ less than ZMQ 3.2! There are '
'known connection keep-alive issues with ZMQ < 3.2 which '
'may result in loss of contact with minions. Please '
'upgrade your ZMQ!'
)
SMaster.__init__(self, opts)
def __set_max_open_files(self):
# Let's check to see how our max open files(ulimit -n) setting is
mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
if mof_h == resource.RLIM_INFINITY:
# Unclear what to do with infinity... OSX reports RLIM_INFINITY as
# hard limit,but raising to anything above soft limit fails...
mof_h = mof_s
log.info(
'Current values for max open files soft/hard setting: '
'{0}/{1}'.format(
mof_s, mof_h
)
)
# Let's grab, from the configuration file, the value to raise max open
# files to
mof_c = self.opts['max_open_files']
if mof_c > mof_h:
# The configured value is higher than what's allowed
log.info(
'The value for the \'max_open_files\' setting, {0}, is higher '
'than what the user running salt is allowed to raise to, {1}. '
'Defaulting to {1}.'.format(mof_c, mof_h)
)
mof_c = mof_h
if mof_s < mof_c:
# There's room to raise the value. Raise it!
log.info('Raising max open files value to {0}'.format(mof_c))
resource.setrlimit(resource.RLIMIT_NOFILE, (mof_c, mof_h))
try:
mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
log.info(
'New values for max open files soft/hard values: '
'{0}/{1}'.format(mof_s, mof_h)
)
except ValueError:
# https://github.com/saltstack/salt/issues/1991#issuecomment-13025595
# A user under OSX reported that our 100000 default value is
# still too high.
log.critical(
'Failed to raise max open files setting to {0}. If this '
'value is too low. The salt-master will most likely fail '
'to run properly.'.format(
mof_c
)
)
def _pre_flight(self):
'''
Run pre flight checks. If anything in this method fails then the master
should not start up.
'''
errors = []
fileserver = salt.fileserver.Fileserver(self.opts)
if not fileserver.servers:
errors.append(
'Failed to load fileserver backends, the configured backends '
'are: {0}'.format(', '.join(self.opts['fileserver_backend']))
)
if not self.opts['fileserver_backend']:
errors.append('No fileserver backends are configured')
if errors:
for error in errors:
log.error(error)
log.error('Master failed pre flight checks, exiting\n')
sys.exit(salt.exitcodes.EX_GENERIC)
def start(self):
'''
Turn on the master server components
'''
self._pre_flight()
log.info(
'salt-master is starting as user {0!r}'.format(salt.utils.get_user())
)
enable_sigusr1_handler()
enable_sigusr2_handler()
self.__set_max_open_files()
log.info('Creating master process manager')
process_manager = salt.utils.process.ProcessManager()
log.info('Creating master maintenance process')
process_manager.add_process(Maintenance, args=(self.opts,))
log.info('Creating master publisher process')
process_manager.add_process(Publisher, args=(self.opts,))
log.info('Creating master event publisher process')
process_manager.add_process(salt.utils.event.EventPublisher, args=(self.opts,))
if self.opts.get('reactor'):
log.info('Creating master reactor process')
process_manager.add_process(salt.utils.event.Reactor, args=(self.opts,))
ext_procs = self.opts.get('ext_processes', [])
for proc in ext_procs:
log.info('Creating ext_processes process: {0}'.format(proc))
try:
mod = '.'.join(proc.split('.')[:-1])
cls = proc.split('.')[-1]
_tmp = __import__(mod, globals(), locals(), [cls], -1)
cls = _tmp.__getattribute__(cls)
process_manager.add_process(cls, args=(self.opts,))
except Exception:
log.error(('Error creating ext_processes '
'process: {0}').format(proc))
if HAS_HALITE and 'halite' in self.opts:
log.info('Creating master halite process')
process_manager.add_process(Halite, args=(self.opts['halite'],))
if self.opts['con_cache']:
log.info('Creating master concache process')
process_manager.add_process(ConnectedCache, args=(self.opts,))
# workaround for issue #16315, race condition
log.debug('Sleeping for two seconds to let concache rest')
time.sleep(2)
def run_reqserver():
reqserv = ReqServer(
self.opts,
self.crypticle,
self.key,
self.master_key)
reqserv.run()
log.info('Creating master request server process')
process_manager.add_process(run_reqserver)
try:
process_manager.run()
except KeyboardInterrupt:
# Shut the master down gracefully on SIGINT
log.warn('Stopping the Salt Master')
process_manager.kill_children()
raise SystemExit('\nExiting on Ctrl-c')
class Halite(multiprocessing.Process):
'''
Manage the Halite server
'''
def __init__(self, hopts):
'''
Create a halite instance
:param dict hopts: The halite options
'''
super(Halite, self).__init__()
self.hopts = hopts
def run(self):
'''
Fire up halite!
'''
salt.utils.appendproctitle(self.__class__.__name__)
halite.start(self.hopts)
class Publisher(multiprocessing.Process):
'''
The publishing interface, a simple zeromq publisher that sends out the
commands.
'''
def __init__(self, opts):
'''
Create a publisher instance
:param dict opts: The salt options
'''
super(Publisher, self).__init__()
self.opts = opts
def run(self):
'''
Bind to the interface specified in the configuration file
Override of multiprocessing.Process.run()
'''
salt.utils.appendproctitle(self.__class__.__name__)
# Set up the context
context = zmq.Context(1)
# Prepare minion publish socket
pub_sock = context.socket(zmq.PUB)
# if 2.1 >= zmq < 3.0, we only have one HWM setting
try:
pub_sock.setsockopt(zmq.HWM, self.opts.get('pub_hwm', 1000))
# in zmq >= 3.0, there are separate send and receive HWM settings
except AttributeError:
pub_sock.setsockopt(zmq.SNDHWM, self.opts.get('pub_hwm', 1000))
pub_sock.setsockopt(zmq.RCVHWM, self.opts.get('pub_hwm', 1000))
if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
pub_sock.setsockopt(zmq.IPV4ONLY, 0)
pub_uri = 'tcp://{interface}:{publish_port}'.format(**self.opts)
# Prepare minion pull socket
pull_sock = context.socket(zmq.PULL)
pull_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
)
salt.utils.check_ipc_path_max_len(pull_uri)
# Start the minion command publisher
log.info('Starting the Salt Publisher on {0}'.format(pub_uri))
pub_sock.bind(pub_uri)
# Securely create socket
log.info('Starting the Salt Puller on {0}'.format(pull_uri))
old_umask = os.umask(0o177)
try:
pull_sock.bind(pull_uri)
finally:
os.umask(old_umask)
try:
while True:
# Catch and handle EINTR from when this process is sent
# SIGUSR1 gracefully so we don't choke and die horribly
try:
package = pull_sock.recv()
unpacked_package = salt.payload.unpackage(package)
payload = unpacked_package['payload']
if self.opts['zmq_filtering']:
# if you have a specific topic list, use that
if 'topic_lst' in unpacked_package:
for topic in unpacked_package['topic_lst']:
# zmq filters are substring match, hash the topic
# to avoid collisions
htopic = hashlib.sha1(topic).hexdigest()
pub_sock.send(htopic, flags=zmq.SNDMORE)
pub_sock.send(payload)
# otherwise its a broadcast
else:
# TODO: constants file for "broadcast"
pub_sock.send('broadcast', flags=zmq.SNDMORE)
pub_sock.send(payload)
else:
pub_sock.send(payload)
except zmq.ZMQError as exc:
if exc.errno == errno.EINTR:
continue
raise exc
except KeyboardInterrupt:
if pub_sock.closed is False:
pub_sock.setsockopt(zmq.LINGER, 1)
pub_sock.close()
if pull_sock.closed is False:
pull_sock.setsockopt(zmq.LINGER, 1)
pull_sock.close()
if context.closed is False:
context.term()
class ReqServer(object):
'''
Starts up the master request server, minions send results to this
interface.
'''
def __init__(self, opts, crypticle, key, mkey):
'''
Create a request server
:param dict opts: The salt options dictionary
:crypticle salt.crypt.Crypticle crypticle: Encryption crypticle
:key dict: The user starting the server and the AES key
:mkey dict: The user starting the server and the RSA key
:rtype: ReqServer
:returns: Request server
'''
self.opts = opts
self.master_key = mkey
# Prepare the AES key
self.key = key
self.crypticle = crypticle
def zmq_device(self):
salt.utils.appendproctitle('MWorkerQueue')
self.context = zmq.Context(self.opts['worker_threads'])
# Prepare the zeromq sockets
self.uri = 'tcp://{interface}:{ret_port}'.format(**self.opts)
self.clients = self.context.socket(zmq.ROUTER)
if self.opts['ipv6'] is True and hasattr(zmq, 'IPV4ONLY'):
# IPv6 sockets work for both IPv6 and IPv4 addresses
self.clients.setsockopt(zmq.IPV4ONLY, 0)
try:
self.clients.setsockopt(zmq.HWM, self.opts['rep_hwm'])
# in zmq >= 3.0, there are separate send and receive HWM settings
except AttributeError:
self.clients.setsockopt(zmq.SNDHWM, self.opts['rep_hwm'])
self.clients.setsockopt(zmq.RCVHWM, self.opts['rep_hwm'])
self.workers = self.context.socket(zmq.DEALER)
self.w_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'workers.ipc')
)
log.info('Setting up the master communication server')
self.clients.bind(self.uri)
self.workers.bind(self.w_uri)
while True:
try:
zmq.device(zmq.QUEUE, self.clients, self.workers)
except zmq.ZMQError as exc:
if exc.errno == errno.EINTR:
continue
raise exc
def __bind(self):
'''
Binds the reply server
'''
dfn = os.path.join(self.opts['cachedir'], '.dfn')
if os.path.isfile(dfn):
try:
os.remove(dfn)
except os.error:
pass
self.process_manager = salt.utils.process.ProcessManager(name='ReqServer_ProcessManager')
for ind in range(int(self.opts['worker_threads'])):
self.process_manager.add_process(MWorker,
args=(self.opts,
self.master_key,
self.key,
self.crypticle,
),
)
self.process_manager.add_process(self.zmq_device)
# start zmq device
self.process_manager.run()
def run(self):
'''
Start up the ReqServer
'''
self.__bind()
def destroy(self):
if hasattr(self, 'clients') and self.clients.closed is False:
self.clients.setsockopt(zmq.LINGER, 1)
self.clients.close()
if hasattr(self, 'workers') and self.workers.closed is False:
self.workers.setsockopt(zmq.LINGER, 1)
self.workers.close()
if hasattr(self, 'context') and self.context.closed is False:
self.context.term()
# Also stop the workers
self.process_manager.kill_children()
def __del__(self):
self.destroy()
class MWorker(multiprocessing.Process):
'''
The worker multiprocess instance to manage the backend operations for the
salt master.
'''
def __init__(self,
opts,
mkey,
key,
crypticle):
'''
Create a salt master worker process
:param dict opts: The salt options
:param dict mkey: The user running the salt master and the AES key
:param dict key: The user running the salt master and the RSA key
:param salt.crypt.Crypticle crypticle: Encryption crypticle
:rtype: MWorker
:return: Master worker
'''
multiprocessing.Process.__init__(self)
self.opts = opts
self.serial = salt.payload.Serial(opts)
self.crypticle = crypticle
self.mkey = mkey
self.key = key
self.k_mtime = 0
def __bind(self):
'''
Bind to the local port
'''
context = zmq.Context(1)
socket = context.socket(zmq.REP)
w_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'workers.ipc')
)
log.info('Worker binding to socket {0}'.format(w_uri))
try:
socket.connect(w_uri)
while True:
try:
package = socket.recv()
self._update_aes()
payload = self.serial.loads(package)
ret = self.serial.dumps(self._handle_payload(payload))
socket.send(ret)
# don't catch keyboard interrupts, just re-raise them
except KeyboardInterrupt:
raise
# catch all other exceptions, so we don't go defunct
except Exception as exc:
# Properly handle EINTR from SIGUSR1
if isinstance(exc, zmq.ZMQError) and exc.errno == errno.EINTR:
continue
log.critical('Unexpected Error in Mworker',
exc_info=True)
# lets just redo the socket (since we won't know what state its in).
# This protects against a single minion doing a send but not
# recv and thereby causing an MWorker process to go defunct
del socket
socket = context.socket(zmq.REP)
socket.connect(w_uri)
# Changes here create a zeromq condition, check with thatch45 before
# making any zeromq changes
except KeyboardInterrupt:
socket.close()
def _handle_payload(self, payload):
'''
The _handle_payload method is the key method used to figure out what
needs to be done with communication to the server
Example cleartext payload generated for 'salt myminion test.ping':
{'enc': 'clear',
'load': {'arg': [],
'cmd': 'publish',
'fun': 'test.ping',
'jid': '',
'key': 'alsdkjfa.,maljf-==adflkjadflkjalkjadfadflkajdflkj',
'kwargs': {'show_jid': False, 'show_timeout': False},
'ret': '',
'tgt': 'myminion',
'tgt_type': 'glob',
'user': 'root'}}
:param dict payload: The payload route to the appropriate handler
'''
try:
key = payload['enc']
load = payload['load']
except KeyError:
return ''
return {'aes': self._handle_aes,
'clear': self._handle_clear}[key](load)
def _handle_clear(self, load):
'''
Process a cleartext command
:param dict load: Cleartext payload
:return: The result of passing the load to a function in ClearFuncs corresponding to
the command specified in the load's 'cmd' key.
'''
log.info('Clear payload received with command {cmd}'.format(**load))
if load['cmd'].startswith('__'):
return False
return getattr(self.clear_funcs, load['cmd'])(load)
def _handle_aes(self, load):
'''
Process a command sent via an AES key
:param str load: Encrypted payload
:return: The result of passing the load to a function in AESFuncs corresponding to
the command specified in the load's 'cmd' key.
'''
try:
data = self.crypticle.loads(load)
except Exception:
return ''
if 'cmd' not in data:
log.error('Received malformed command {0}'.format(data))
return {}
log.info('AES payload received with command {0}'.format(data['cmd']))
if data['cmd'].startswith('__'):
return False
return self.aes_funcs.run_func(data['cmd'], data)
def _update_aes(self):
'''
Check to see if a fresh AES key is available and update the components
of the worker
'''
dfn = os.path.join(self.opts['cachedir'], '.dfn')
try:
stats = os.stat(dfn)
except os.error:
return
if stats.st_mode != 0o100400:
# Invalid dfn, return
return
if stats.st_mtime > self.k_mtime:
# new key, refresh crypticle
with salt.utils.fopen(dfn) as fp_:
aes = fp_.read()
if len(aes) != 76:
return
log.debug('New master AES key found by pid {0}'.format(os.getpid()))
self.crypticle = salt.crypt.Crypticle(self.opts, aes)
self.clear_funcs.crypticle = self.crypticle
self.clear_funcs.opts['aes'] = aes
self.aes_funcs.crypticle = self.crypticle
self.aes_funcs.opts['aes'] = aes
self.k_mtime = stats.st_mtime
def run(self):
'''
Start a Master Worker
'''
salt.utils.appendproctitle(self.__class__.__name__)
self.clear_funcs = ClearFuncs(
self.opts,
self.key,
self.mkey,
self.crypticle)
self.aes_funcs = AESFuncs(self.opts, self.crypticle)
self.__bind()
class AESFuncs(object):
'''
Set up functions that are available when the load is encrypted with AES
'''
# The AES Functions:
#
def __init__(self, opts, crypticle):
'''
Create a new AESFuncs
:param dict opts: The salt options
:param salt.crypt.Crypticle crypticle: Encryption crypticle
:rtype: AESFuncs
:returns: Instance for handling AES operations
'''
self.opts = opts
self.event = salt.utils.event.MasterEvent(self.opts['sock_dir'])
self.serial = salt.payload.Serial(opts)
self.crypticle = crypticle
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make a client
self.local = salt.client.get_local_client(self.opts['conf_file'])
# Create the master minion to access the external job cache
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False)
self.__setup_fileserver()
self.masterapi = salt.daemons.masterapi.RemoteFuncs(opts)
def __setup_fileserver(self):
'''
Set the local file objects from the file server interface
'''
fs_ = salt.fileserver.Fileserver(self.opts)
self._serve_file = fs_.serve_file
self._file_hash = fs_.file_hash
self._file_list = fs_.file_list
self._file_list_emptydirs = fs_.file_list_emptydirs
self._dir_list = fs_.dir_list
self._symlink_list = fs_.symlink_list
self._file_envs = fs_.envs
def __verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
if not salt.utils.verify.valid_id(self.opts, id_):
return False
pub_path = os.path.join(self.opts['pki_dir'], 'minions', id_)
with salt.utils.fopen(pub_path, 'r') as fp_:
minion_pub = fp_.read()
tmp_pub = salt.utils.mkstemp()
with salt.utils.fopen(tmp_pub, 'w+') as fp_:
fp_.write(minion_pub)
pub = None
try:
pub = RSA.load_pub_key(tmp_pub)
except RSA.RSAError as err:
log.error('Unable to load temporary public key "{0}": {1}'
.format(tmp_pub, err))
try:
os.remove(tmp_pub)
if pub.public_decrypt(token, 5) == 'salt':
return True
except RSA.RSAError as err:
log.error('Unable to decrypt token: {0}'.format(err))
log.error('Salt minion claiming to be {0} has attempted to'
'communicate with the master and could not be verified'
.format(id_))
return False
def __verify_minion_publish(self, clear_load):
'''
Verify that the passed information authorized a minion to execute
:param dict clear_load: A publication load from a minion
:rtype: bool
:return: A boolean indicating if the minion is allowed to publish the command in the load
'''
# Verify that the load is valid
if 'peer' not in self.opts:
return False
if not isinstance(self.opts['peer'], dict):
return False
if any(key not in clear_load for key in ('fun', 'arg', 'tgt', 'ret', 'tok', 'id')):
return False
# If the command will make a recursive publish don't run
if clear_load['fun'].startswith('publish.'):
return False
# Check the permissions for this minion
if not self.__verify_minion(clear_load['id'], clear_load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warn(
(
'Minion id {0} is not who it says it is and is attempting '
'to issue a peer command'
).format(clear_load['id'])
)
return False
clear_load.pop('tok')
perms = []
for match in self.opts['peer']:
if re.match(match, clear_load['id']):
# This is the list of funcs/modules!
if isinstance(self.opts['peer'][match], list):
perms.extend(self.opts['peer'][match])
if ',' in clear_load['fun']:
# 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']]
clear_load['fun'] = clear_load['fun'].split(',')
arg_ = []
for arg in clear_load['arg']:
arg_.append(arg.split())
clear_load['arg'] = arg_
# finally, check the auth of the load
return self.ckminions.auth_check(
perms,
clear_load['fun'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
publish_validate=True)
def __verify_load(self, load, verify_keys):
'''
A utility function to perform common verification steps.
:param dict load: A payload received from a minion
:param list verify_keys: A list of strings that should be present in a given load
:rtype: bool
:rtype: dict
:return: The original load (except for the token) if the load can be verified. False if the load is invalid.
'''
if any(key not in load for key in verify_keys):
return False
if 'tok' not in load:
log.error(
'Received incomplete call from {0} for {1!r}, missing {2!r}'
.format(
load['id'],
inspect_stack()['co_name'],
'tok'
))
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warn(
'Minion id {0} is not who it says it is!'.format(
load['id']
)
)
return False
if 'tok' in load:
load.pop('tok')
return load
def _ext_nodes(self, load):
'''
Return the results from an external node classifier if one is
specified
:param dict load: A payload received from a minion
:return: The results from an external node classifier
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
return self.masterapi._ext_nodes(load, skip_verify=True)
def _master_opts(self, load):
'''
Return the master options to the minion
:param dict load: A payload received from a minion
:rtype: dict
:return: The master options
'''
mopts = {}
file_roots = {}
envs = self._file_envs()
for saltenv in envs:
if saltenv not in file_roots:
file_roots[saltenv] = []
mopts['file_roots'] = file_roots
if load.get('env_only'):
return mopts
mopts['renderer'] = self.opts['renderer']
mopts['failhard'] = self.opts['failhard']
mopts['state_top'] = self.opts['state_top']
mopts['nodegroups'] = self.opts['nodegroups']
mopts['state_auto_order'] = self.opts['state_auto_order']
mopts['state_events'] = self.opts['state_events']
mopts['state_aggregate'] = self.opts['state_aggregate']
mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks']
mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks']
return mopts
def _mine_get(self, load):
'''
Gathers the data from the specified minions' mine
:param dict load: A payload received from a minion
:rtype: dict
:return: Mine data from the specified minions
'''
load = self.__verify_load(load, ('id', 'tgt', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_get(load, skip_verify=True)
def _mine(self, load):
'''
Store the mine data
:param dict load: A payload received from a minion
:rtype: bool
:return: True if the data has been stored in the mine
'''
load = self.__verify_load(load, ('id', 'data', 'tok'))
if load is False:
return {}
return self.masterapi._mine(load, skip_verify=True)
def _mine_delete(self, load):
'''
Allow the minion to delete a specific function from its own mine
:param dict load: A payload received from a minion
:rtype: bool
:return: Boolean indicating whether or not the given function was deleted from the mine
'''
load = self.__verify_load(load, ('id', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_delete(load)
def _mine_flush(self, load):
'''
Allow the minion to delete all of its own mine contents
:param dict load: A payload received from a minion
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_flush(load, skip_verify=True)
def _file_recv(self, load):
'''
Allows minions to send files to the master, files are sent to the
master file cache
'''
if any(key not in load for key in ('id', 'path', 'loc')):
return False
if not self.opts['file_recv'] or os.path.isabs(load['path']):
return False
if os.path.isabs(load['path']) or '../' in load['path']:
# Can overwrite master files!!
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
file_recv_max_size = 1024*1024 * self.opts['file_recv_max_size']
if 'loc' in load and load['loc'] < 0:
log.error('Invalid file pointer: load[loc] < 0')
return False
if len(load['data']) + load.get('loc', 0) > file_recv_max_size:
log.error(
'Exceeding file_recv_max_size limit: {0}'.format(
file_recv_max_size
)
)
return False
if 'tok' not in load:
log.error(
'Received incomplete call from {0} for {1!r}, missing {2!r}'
.format(
load['id'],
inspect_stack()['co_name'],
'tok'
))
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warn(
'Minion id {0} is not who it says it is!'.format(
load['id']
)
)
return {}
load.pop('tok')
# Normalize Windows paths
normpath = load['path']
if ':' in normpath:
# make sure double backslashes are normalized
normpath = normpath.replace('\\', '/')
normpath = os.path.normpath(normpath)
cpath = os.path.join(
self.opts['cachedir'],
'minions',
load['id'],
'files',
normpath)
cdir = os.path.dirname(cpath)
if not os.path.isdir(cdir):
try:
os.makedirs(cdir)
except os.error:
pass
if os.path.isfile(cpath) and load['loc'] != 0:
mode = 'ab'
else:
mode = 'wb'
with salt.utils.fopen(cpath, mode) as fp_:
if load['loc']:
fp_.seek(load['loc'])
fp_.write(load['data'])
return True
def _pillar(self, load):
'''
Return the pillar data for the minion
:param dict load: Minion payload
:rtype: dict
:return: The pillar data for the minion
'''
if any(key not in load for key in ('id', 'grains')):
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
load['grains']['id'] = load['id']
mods = set()
for func in self.mminion.functions.values():
mods.add(func.__module__)
for mod in mods:
sys.modules[mod].__grains__ = load['grains']
pillar_dirs = {}
pillar = salt.pillar.Pillar(
self.opts,
load['grains'],
load['id'],
load.get('saltenv', load.get('env')),
load.get('ext'),
self.mminion.functions)
data = pillar.compile_pillar(pillar_dirs=pillar_dirs)
if self.opts.get('minion_data_cache', False):
cdir = os.path.join(self.opts['cachedir'], 'minions', load['id'])
if not os.path.isdir(cdir):
os.makedirs(cdir)
datap = os.path.join(cdir, 'data.p')
with salt.utils.fopen(datap, 'w+b') as fp_:
fp_.write(
self.serial.dumps(
{'grains': load['grains'],
'pillar': data})
)
for mod in mods:
sys.modules[mod].__grains__ = self.opts['grains']
return data
def _minion_event(self, load):
'''
Receive an event from the minion and fire it on the master event
interface
:param dict load: The minion payload
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
self.masterapi._minion_event(load)
def _return(self, load):
'''
Handle the return data sent from the minions.
Takes the return, verifies it and fires it on the master event bus.
Typically, this event is consumed by the Salt CLI waiting on the other
end of the event bus but could be heard by any listener on the bus.
:param dict load: The minion payload
'''
# If the return data is invalid, just ignore it
if any(key not in load for key in ('return', 'jid', 'id')):
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
if load['jid'] == 'req':
# The minion is returning a standalone job, request a jobid
load['arg'] = load.get('arg', load.get('fun_args', []))
load['tgt_type'] = 'glob'
load['tgt'] = load['id']
prep_fstr = '{0}.prep_jid'.format(self.opts['master_job_cache'])
load['jid'] = self.mminion.returners[prep_fstr](nocache=load.get('nocache', False))
# save the load, since we don't have it
saveload_fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[saveload_fstr](load['jid'], load)
log.info('Got return from {id} for job {jid}'.format(**load))
self.event.fire_event(load, load['jid']) # old dup event
self.event.fire_event(
load, tagify([load['jid'], 'ret', load['id']], 'job'))
self.event.fire_ret_load(load)
# if you have a job_cache, or an ext_job_cache, don't write to the regular master cache
if not self.opts['job_cache'] or self.opts.get('ext_job_cache'):
return
# otherwise, write to the master cache
fstr = '{0}.returner'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](load)
def _syndic_return(self, load):
'''
Receive a syndic minion return and format it to look like returns from
individual minions.
:param dict load: The minion payload
'''
# Verify the load
if any(key not in load for key in ('return', 'jid', 'id')):
return None
# if we have a load, save it
if 'load' in load:
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](load['jid'], load)
# Format individual return loads
for key, item in load['return'].items():
ret = {'jid': load['jid'],
'id': key,
'return': item}
if 'out' in load:
ret['out'] = load['out']
self._return(ret)
def minion_runner(self, clear_load):
'''
Execute a runner from a minion, return the runner's function data
:param dict clear_load: The minion payload
:rtype: dict
:return: The runner function data
'''
load = self.__verify_load(clear_load, ('fun', 'arg', 'id', 'tok'))
if load is False:
return {}
else:
return self.masterapi.minion_runner(clear_load)
def pub_ret(self, load):
'''
Request the return data from a specific jid, only allowed
if the requesting minion also initialted the execution.
:param dict load: The minion payload
:rtype: dict
:return: Return data corresponding to a given JID
'''
load = self.__verify_load(load, ('jid', 'id', 'tok'))
if load is False:
return {}
# Check that this minion can access this data
auth_cache = os.path.join(
self.opts['cachedir'],
'publish_auth')
if not os.path.isdir(auth_cache):
os.makedirs(auth_cache)
jid_fn = os.path.join(auth_cache, str(load['jid']))
with salt.utils.fopen(jid_fn, 'r') as fp_:
if not load['id'] == fp_.read():
return {}
# Grab the latest and return
return self.local.get_cache_returns(load['jid'])
def minion_pub(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands:
.. code-block:: bash
peer:
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion pay
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_pub(clear_load)
def minion_publish(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
.. code-block:: bash
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion payload
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_publish(clear_load, skip_verify=True)
def revoke_auth(self, load):
'''
Allow a minion to request revocation of its own key
:param dict load: The minion payload
:rtype: dict
:return: If the load is invalid, it may be returned. No key operation is performed.
:rtype: bool
:return: True if key was revoked, False if not
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return load
else:
return self.masterapi.revoke_auth(load)
def run_func(self, func, load):
'''
Wrapper for running functions executed with AES encryption
:param function func: The function to run
:return: The result of the master function that was called
'''
# Don't honor private functions
if func.startswith('__'):
return self.crypticle.dumps({})
# Run the func
if hasattr(self, func):
try:
start = time.time()
ret = getattr(self, func)(load)
log.trace(
'Master function call {0} took {1} seconds'.format(
func, time.time() - start
)
)
except Exception:
ret = ''
log.error(
'Error in function {0}:\n'.format(func),
exc_info=True
)
else:
log.error(
'Received function {0} which is unavailable on the master, '
'returning False'.format(
func
)
)
return self.crypticle.dumps(False)
# Don't encrypt the return value for the _return func
# (we don't care about the return value, so why encrypt it?)
if func == '_return':
return ret
if func == '_pillar' and 'id' in load:
if load.get('ver') != '2' and self.opts['pillar_version'] == 1:
# Authorized to return old pillar proto
return self.crypticle.dumps(ret)
# encrypt with a specific AES key
pubfn = os.path.join(self.opts['pki_dir'],
'minions',
load['id'])
key = salt.crypt.Crypticle.generate_key_string()
pcrypt = salt.crypt.Crypticle(
self.opts,
key)
try:
pub = RSA.load_pub_key(pubfn)
except RSA.RSAError:
return self.crypticle.dumps({})
pret = {}
pret['key'] = pub.public_encrypt(key, 4)
pret['pillar'] = pcrypt.dumps(
ret if ret is not False else {}
)
return pret
# AES Encrypt the return
return self.crypticle.dumps(ret)
class ClearFuncs(object):
'''
Set up functions that are safe to execute when commands sent to the master
without encryption and authentication
'''
# The ClearFuncs object encapsulates the functions that can be executed in
# the clear:
# publish (The publish from the LocalClient)
# _auth
def __init__(self, opts, key, master_key, crypticle):
self.opts = opts
self.serial = salt.payload.Serial(opts)
self.key = key
self.master_key = master_key
self.crypticle = crypticle
# Create the event manager
self.event = salt.utils.event.MasterEvent(self.opts['sock_dir'])
# Make a client
self.local = salt.client.get_local_client(self.opts['conf_file'])
# Make an minion checker object
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make an Auth object
self.loadauth = salt.auth.LoadAuth(opts)
# Stand up the master Minion to access returner data
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False)
# Make a wheel object
self.wheel_ = salt.wheel.Wheel(opts)
self.masterapi = salt.daemons.masterapi.LocalFuncs(opts, key)
self.auto_key = salt.daemons.masterapi.AutoKey(opts)
# only create a con_cache-client if the con_cache is active
if self.opts['con_cache']:
self.cache_cli = CacheCli(self.opts)
else:
self.cache_cli = False
def _auth(self, load):
'''
Authenticate the client, use the sent public key to encrypt the AES key
which was generated at start up.
This method fires an event over the master event manager. The event is
tagged "auth" and returns a dict with information about the auth
event
# Verify that the key we are receiving matches the stored key
# Store the key if it is not there
# Make an RSA key with the pub key
# Encrypt the AES key as an encrypted salt.payload
# Package the return and return it
'''
if not salt.utils.verify.valid_id(self.opts, load['id']):
log.info(
'Authentication request from invalid id {id}'.format(**load)
)
return {'enc': 'clear',
'load': {'ret': False}}
log.info('Authentication request from {id}'.format(**load))
# 0 is default which should be 'unlimited'
if self.opts['max_minions'] > 0:
# use the ConCache if enabled, else use the minion utils
if self.cache_cli:
minions = self.cache_cli.get_cached()
else:
minions = self.ckminions.connected_ids()
if len(minions) > 1000:
log.info('With large numbers of minions it is advised '
'to enable the ConCache with \'con_cache: True\' '
'in the masters configuration file.')
if not len(minions) <= self.opts['max_minions']:
# we reject new minions, minions that are already
# connected must be allowed for the mine, highstate, etc.
if load['id'] not in minions:
msg = ('Too many minions connected (max_minions={0}). '
'Rejecting connection from id '
'{1}'.format(self.opts['max_minions'],
load['id']))
log.info(msg)
eload = {'result': False,
'act': 'full',
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix='auth'))
return {'enc': 'clear',
'load': {'ret': 'full'}}
# Check if key is configured to be auto-rejected/signed
auto_reject = self.auto_key.check_autoreject(load['id'])
auto_sign = self.auto_key.check_autosign(load['id'])
pubfn = os.path.join(self.opts['pki_dir'],
'minions',
load['id'])
pubfn_pend = os.path.join(self.opts['pki_dir'],
'minions_pre',
load['id'])
pubfn_rejected = os.path.join(self.opts['pki_dir'],
'minions_rejected',
load['id'])
pubfn_denied = os.path.join(self.opts['pki_dir'],
'minions_denied',
load['id'])
if self.opts['open_mode']:
# open mode is turned on, nuts to checks and overwrite whatever
# is there
pass
elif os.path.isfile(pubfn_rejected):
# The key has been rejected, don't place it in pending
log.info('Public key rejected for {0}. Key is present in '
'rejection key dir.'.format(load['id']))
eload = {'result': False,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix='auth'))
return {'enc': 'clear',
'load': {'ret': False}}
elif os.path.isfile(pubfn):
# The key has been accepted, check it
if salt.utils.fopen(pubfn, 'r').read() != load['pub']:
log.error(
'Authentication attempt from {id} failed, the public '
'keys did not match. This may be an attempt to compromise '
'the Salt cluster.'.format(**load)
)
# put denied minion key into minions_denied
with salt.utils.fopen(pubfn_denied, 'w+') as fp_:
fp_.write(load['pub'])
eload = {'result': False,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix='auth'))
return {'enc': 'clear',
'load': {'ret': False}}
elif not os.path.isfile(pubfn_pend):
# The key has not been accepted, this is a new minion
if os.path.isdir(pubfn_pend):
# The key path is a directory, error out
log.info(
'New public key {id} is a directory'.format(**load)
)
eload = {'result': False,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix='auth'))
return {'enc': 'clear',
'load': {'ret': False}}
if auto_reject:
key_path = pubfn_rejected
log.info('New public key for {id} rejected via autoreject_file'
.format(**load))
key_act = 'reject'
key_result = False
elif not auto_sign:
key_path = pubfn_pend
log.info('New public key for {id} placed in pending'
.format(**load))
key_act = 'pend'
key_result = True
else:
# The key is being automatically accepted, don't do anything
# here and let the auto accept logic below handle it.
key_path = None
if key_path is not None:
# Write the key to the appropriate location
with salt.utils.fopen(key_path, 'w+') as fp_:
fp_.write(load['pub'])
ret = {'enc': 'clear',
'load': {'ret': key_result}}
eload = {'result': key_result,
'act': key_act,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix='auth'))
return ret
elif os.path.isfile(pubfn_pend):
# This key is in the pending dir and is awaiting acceptance
if auto_reject:
# We don't care if the keys match, this minion is being
# auto-rejected. Move the key file from the pending dir to the
# rejected dir.
try:
shutil.move(pubfn_pend, pubfn_rejected)
except (IOError, OSError):
pass
log.info('Pending public key for {id} rejected via '
'autoreject_file'.format(**load))
ret = {'enc': 'clear',
'load': {'ret': False}}
eload = {'result': False,
'act': 'reject',
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix='auth'))
return ret
elif not auto_sign:
# This key is in the pending dir and is not being auto-signed.
# Check if the keys are the same and error out if this is the
# case. Otherwise log the fact that the minion is still
# pending.
if salt.utils.fopen(pubfn_pend, 'r').read() != load['pub']:
log.error(
'Authentication attempt from {id} failed, the public '
'key in pending did not match. This may be an '
'attempt to compromise the Salt cluster.'
.format(**load)
)
# put denied minion key into minions_denied
with salt.utils.fopen(pubfn_denied, 'w+') as fp_:
fp_.write(load['pub'])
eload = {'result': False,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix='auth'))
return {'enc': 'clear',
'load': {'ret': False}}
else:
log.info(
'Authentication failed from host {id}, the key is in '
'pending and needs to be accepted with salt-key '
'-a {id}'.format(**load)
)
eload = {'result': True,
'act': 'pend',
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix='auth'))
return {'enc': 'clear',
'load': {'ret': True}}
else:
# This key is in pending and has been configured to be
# auto-signed. Check to see if it is the same key, and if
# so, pass on doing anything here, and let it get automatically
# accepted below.
if salt.utils.fopen(pubfn_pend, 'r').read() != load['pub']:
log.error(
'Authentication attempt from {id} failed, the public '
'keys in pending did not match. This may be an '
'attempt to compromise the Salt cluster.'
.format(**load)
)
# put denied minion key into minions_denied
with salt.utils.fopen(pubfn_denied, 'w+') as fp_:
fp_.write(load['pub'])
eload = {'result': False,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix='auth'))
return {'enc': 'clear',
'load': {'ret': False}}
else:
pass
else:
# Something happened that I have not accounted for, FAIL!
log.warn('Unaccounted for authentication failure')
eload = {'result': False,
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix='auth'))
return {'enc': 'clear',
'load': {'ret': False}}
log.info('Authentication accepted from {id}'.format(**load))
# only write to disk if you are adding the file, and in open mode,
# which implies we accept any key from a minion (key needs to be
# written every time because what's on disk is used for encrypting)
if not os.path.isfile(pubfn) or self.opts['open_mode']:
with salt.utils.fopen(pubfn, 'w+') as fp_:
fp_.write(load['pub'])
pub = None
# the con_cache is enabled, send the minion id to the cache
if self.cache_cli:
self.cache_cli.put_cache([load['id']])
# The key payload may sometimes be corrupt when using auto-accept
# and an empty request comes in
try:
pub = RSA.load_pub_key(pubfn)
except RSA.RSAError as err:
log.error('Corrupt public key "{0}": {1}'.format(pubfn, err))
return {'enc': 'clear',
'load': {'ret': False}}
ret = {'enc': 'pub',
'pub_key': self.master_key.get_pub_str(),
'publish_port': self.opts['publish_port']}
# sign the masters pubkey (if enabled) before it is
# send to the minion that was just authenticated
if self.opts['master_sign_pubkey']:
# append the pre-computed signature to the auth-reply
if self.master_key.pubkey_signature():
log.debug('Adding pubkey signature to auth-reply')
log.debug(self.master_key.pubkey_signature())
ret.update({'pub_sig': self.master_key.pubkey_signature()})
else:
# the master has its own signing-keypair, compute the master.pub's
# signature and append that to the auth-reply
log.debug("Signing master public key before sending")
pub_sign = salt.crypt.sign_message(self.master_key.get_sign_paths()[1],
ret['pub_key'])
ret.update({'pub_sig': binascii.b2a_base64(pub_sign)})
if self.opts['auth_mode'] >= 2:
if 'token' in load:
try:
mtoken = self.master_key.key.private_decrypt(load['token'], 4)
aes = '{0}_|-{1}'.format(self.opts['aes'], mtoken)
except Exception:
# Token failed to decrypt, send back the salty bacon to
# support older minions
pass
else:
aes = self.opts['aes']
ret['aes'] = pub.public_encrypt(aes, 4)
else:
if 'token' in load:
try:
mtoken = self.master_key.key.private_decrypt(
load['token'], 4
)
ret['token'] = pub.public_encrypt(mtoken, 4)
except Exception:
# Token failed to decrypt, send back the salty bacon to
# support older minions
pass
aes = self.opts['aes']
ret['aes'] = pub.public_encrypt(self.opts['aes'], 4)
# Be aggressive about the signature
digest = hashlib.sha256(aes).hexdigest()
ret['sig'] = self.master_key.key.private_encrypt(digest, 5)
eload = {'result': True,
'act': 'accept',
'id': load['id'],
'pub': load['pub']}
self.event.fire_event(eload, tagify(prefix='auth'))
return ret
def process_token(self, tok, fun, auth_type):
'''
Process a token and determine if a command is authorized
'''
try:
token = self.loadauth.get_tok(tok)
except Exception as exc:
msg = 'Exception occurred when generating auth token: {0}'.format(
exc)
log.error(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
if not token:
msg = 'Authentication failure of type "token" occurred.'
log.warning(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
if token['eauth'] not in self.opts['external_auth']:
msg = 'Authentication failure of type "token" occurred.'
log.warning(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
check_fun = getattr(self.ckminions,
'{auth}_check'.format(auth=auth_type))
good = check_fun(
self.opts['external_auth'][token['eauth']][token['name']]
if token['name'] in self.opts['external_auth'][token['eauth']]
else self.opts['external_auth'][token['eauth']]['*'],
fun)
if not good:
msg = ('Authentication failure of type "token" occurred for '
'user {0}.').format(token['name'])
log.warning(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
return None
def process_eauth(self, clear_load, auth_type):
'''
Process a clear load to determine eauth perms
Any return other than None is an eauth failure
'''
if 'eauth' not in clear_load:
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
if clear_load['eauth'] not in self.opts['external_auth']:
# The eauth system is not enabled, fail
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
name = self.loadauth.load_name(clear_load)
if not ((name in self.opts['external_auth'][clear_load['eauth']]) |
('*' in self.opts['external_auth'][clear_load['eauth']])):
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
if not self.loadauth.time_auth(clear_load):
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
check_fun = getattr(self.ckminions,
'{auth}_check'.format(auth=auth_type))
good = check_fun(
self.opts['external_auth'][clear_load['eauth']][name]
if name in self.opts['external_auth'][clear_load['eauth']]
else self.opts['external_auth'][clear_load['eauth']]['*'],
clear_load['fun'])
if not good:
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(clear_load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
return None
def runner(self, clear_load):
'''
Send a master control function back to the runner system
'''
# All runner ops pass through eauth
if 'token' in clear_load:
auth_error = self.process_token(clear_load['token'],
clear_load['fun'],
'runner')
if auth_error:
return auth_error
else:
token = self.loadauth.get_tok(clear_load['token'])
try:
fun = clear_load.pop('fun')
runner_client = salt.runner.RunnerClient(self.opts)
return runner_client.async(
fun,
clear_load.get('kwarg', {}),
token['name'])
except Exception as exc:
log.error('Exception occurred while '
'introspecting {0}: {1}'.format(fun, exc))
return dict(error=dict(name=exc.__class__.__name__,
args=exc.args,
message=str(exc)))
try:
eauth_error = self.process_eauth(clear_load, 'runner')
if eauth_error:
return eauth_error
try:
fun = clear_load.pop('fun')
runner_client = salt.runner.RunnerClient(self.opts)
return runner_client.async(fun,
clear_load.get('kwarg', {}),
clear_load.get('username', 'UNKNOWN'))
except Exception as exc:
log.error('Exception occurred while '
'introspecting {0}: {1}'.format(fun, exc))
return dict(error=dict(name=exc.__class__.__name__,
args=exc.args,
message=str(exc)))
except Exception as exc:
log.error(
'Exception occurred in the runner system: {0}'.format(exc)
)
return dict(error=dict(name=exc.__class__.__name__,
args=exc.args,
message=str(exc)))
def wheel(self, clear_load):
'''
Send a master control function back to the wheel system
'''
# All wheel ops pass through eauth
if 'token' in clear_load:
auth_error = self.process_token(clear_load['token'],
clear_load['fun'],
'wheel')
if auth_error:
return auth_error
else:
token = self.loadauth.get_tok(clear_load['token'])
jid = salt.utils.gen_jid()
fun = clear_load.pop('fun')
tag = tagify(jid, prefix='wheel')
data = {'fun': "wheel.{0}".format(fun),
'jid': jid,
'tag': tag,
'user': token['name']}
try:
self.event.fire_event(data, tagify([jid, 'new'], 'wheel'))
ret = self.wheel_.call_func(fun, **clear_load)
data['return'] = ret
data['success'] = True
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
except Exception as exc:
log.error(exc)
log.error('Exception occurred while '
'introspecting {0}: {1}'.format(fun, exc))
data['return'] = 'Exception occurred in wheel {0}: {1}: {2}'.format(
fun,
exc.__class__.__name__,
exc,
)
data['success'] = False
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
try:
eauth_error = self.process_eauth(clear_load, 'wheel')
if eauth_error:
return eauth_error
jid = salt.utils.gen_jid()
fun = clear_load.pop('fun')
tag = tagify(jid, prefix='wheel')
data = {'fun': "wheel.{0}".format(fun),
'jid': jid,
'tag': tag,
'user': clear_load.get('username', 'UNKNOWN')}
try:
self.event.fire_event(data, tagify([jid, 'new'], 'wheel'))
ret = self.wheel_.call_func(fun, **clear_load)
data['return'] = ret
data['success'] = True
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
except Exception as exc:
log.error('Exception occurred while '
'introspecting {0}: {1}'.format(fun, exc))
data['return'] = 'Exception occurred in wheel {0}: {1}: {2}'.format(
fun,
exc.__class__.__name__,
exc,
)
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
except Exception as exc:
log.error(
'Exception occurred in the wheel system: {0}'.format(exc)
)
return dict(error=dict(name=exc.__class__.__name__,
args=exc.args,
message=str(exc)))
def mk_token(self, clear_load):
'''
Create and return an authentication token, the clear load needs to
contain the eauth key and the needed authentication creds.
'''
if 'eauth' not in clear_load:
log.warning('Authentication failure of type "eauth" occurred.')
return ''
if clear_load['eauth'] not in self.opts['external_auth']:
# The eauth system is not enabled, fail
log.warning('Authentication failure of type "eauth" occurred.')
return ''
try:
name = self.loadauth.load_name(clear_load)
if not ((name in self.opts['external_auth'][clear_load['eauth']]) |
('*' in self.opts['external_auth'][clear_load['eauth']])):
log.warning('Authentication failure of type "eauth" occurred.')
return ''
if not self.loadauth.time_auth(clear_load):
log.warning('Authentication failure of type "eauth" occurred.')
return ''
return self.loadauth.mk_token(clear_load)
except Exception as exc:
log.error(
'Exception occurred while authenticating: {0}'.format(exc)
)
return ''
def get_token(self, clear_load):
'''
Return the name associated with a token or False if the token is invalid
'''
if 'token' not in clear_load:
return False
return self.loadauth.get_tok(clear_load['token'])
def publish(self, clear_load):
'''
This method sends out publications to the minions, it can only be used
by the LocalClient.
'''
extra = clear_load.get('kwargs', {})
# check blacklist/whitelist
good = True
# Check if the user is blacklisted
for user_re in self.opts['client_acl_blacklist'].get('users', []):
if re.match(user_re, clear_load['user']):
good = False
break
# check if the cmd is blacklisted
for module_re in self.opts['client_acl_blacklist'].get('modules', []):
# if this is a regular command, its a single function
if isinstance(clear_load['fun'], str):
funs_to_check = [clear_load['fun']]
# if this a compound function
else:
funs_to_check = clear_load['fun']
for fun in funs_to_check:
if re.match(module_re, fun):
good = False
break
if good is False:
log.error(
'{user} does not have permissions to run {function}. Please '
'contact your local administrator if you believe this is in '
'error.\n'.format(
user=clear_load['user'],
function=clear_load['fun']
)
)
return ''
# to make sure we don't step on anyone else's toes
del good
# Check for external auth calls
if extra.get('token', False):
# A token was passed, check it
try:
token = self.loadauth.get_tok(extra['token'])
except Exception as exc:
log.error(
'Exception occurred when generating auth token: {0}'.format(
exc
)
)
return ''
if not token:
log.warning('Authentication failure of type "token" occurred.')
return ''
if token['eauth'] not in self.opts['external_auth']:
log.warning('Authentication failure of type "token" occurred.')
return ''
if not ((token['name'] in self.opts['external_auth'][token['eauth']]) |
('*' in self.opts['external_auth'][token['eauth']])):
log.warning('Authentication failure of type "token" occurred.')
return ''
good = self.ckminions.auth_check(
self.opts['external_auth'][token['eauth']][token['name']]
if token['name'] in self.opts['external_auth'][token['eauth']]
else self.opts['external_auth'][token['eauth']]['*'],
clear_load['fun'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'))
if not good:
# Accept find_job so the CLI will function cleanly
if clear_load['fun'] != 'saltutil.find_job':
log.warning(
'Authentication failure of type "token" occurred.'
)
return ''
clear_load['user'] = token['name']
log.debug('Minion tokenized user = "{0}"'.format(clear_load['user']))
elif 'eauth' in extra:
if extra['eauth'] not in self.opts['external_auth']:
# The eauth system is not enabled, fail
log.warning(
'Authentication failure of type "eauth" occurred.'
)
return ''
try:
name = self.loadauth.load_name(extra) # The username we are attempting to auth with
groups = self.loadauth.get_groups(extra) # The groups this user belongs to
group_perm_keys = filter(lambda item: item.endswith('%'), self.opts['external_auth'][extra['eauth']]) # The configured auth groups
# First we need to know if the user is allowed to proceed via any of their group memberships.
group_auth_match = False
for group_config in group_perm_keys:
group_config = group_config.rstrip('%')
for group in groups:
if group == group_config:
group_auth_match = True
# If a group_auth_match is set it means only that we have a user which matches at least one or more
# of the groups defined in the configuration file.
# If neither a catchall, a named membership or a group membership is found, there is no need
# to continue. Simply deny the user access.
if not ((name in self.opts['external_auth'][extra['eauth']]) |
('*' in self.opts['external_auth'][extra['eauth']]) |
group_auth_match):
# A group def is defined and the user is a member
#[group for groups in ['external_auth'][extra['eauth']]]):
# Auth successful, but no matching user found in config
log.warning(
'Authentication failure of type "eauth" occurred.'
)
return ''
# Perform the actual authentication. If we fail here, do not continue.
if not self.loadauth.time_auth(extra):
log.warning(
'Authentication failure of type "eauth" occurred.'
)
return ''
except Exception as exc:
log.error(
'Exception occurred while authenticating: {0}'.format(exc)
)
return ''
# auth_list = self.opts['external_auth'][extra['eauth']][name] if name in self.opts['external_auth'][extra['eauth']] else self.opts['external_auth'][extra['eauth']]['*']
# We now have an authenticated session and it is time to determine
# what the user has access to.
auth_list = []
if name in self.opts['external_auth'][extra['eauth']]:
auth_list = self.opts['external_auth'][extra['eauth']][name]
if group_auth_match:
auth_list.append(self.ckminions.gather_groups(self.opts['external_auth'][extra['eauth']], groups, auth_list))
good = self.ckminions.auth_check(
auth_list,
clear_load['fun'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob')
)
if not good:
# Accept find_job so the CLI will function cleanly
if clear_load['fun'] != 'saltutil.find_job':
log.warning(
'Authentication failure of type "eauth" occurred.'
)
return ''
clear_load['user'] = name
# Verify that the caller has root on master
elif 'user' in clear_load:
if clear_load['user'].startswith('sudo_'):
# If someone can sudo, allow them to act as root
if clear_load.get('key', 'invalid') == self.key.get('root'):
clear_load.pop('key')
elif clear_load.pop('key') != self.key[self.opts.get('user', 'root')]:
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
elif clear_load['user'] == self.opts.get('user', 'root'):
if clear_load.pop('key') != self.key[self.opts.get('user', 'root')]:
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
elif clear_load['user'] == 'root':
if clear_load.pop('key') != self.key.get(self.opts.get('user', 'root')):
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
elif clear_load['user'] == salt.utils.get_user():
if clear_load.pop('key') != self.key.get(clear_load['user']):
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
else:
if clear_load['user'] in self.key:
# User is authorised, check key and check perms
if clear_load.pop('key') != self.key[clear_load['user']]:
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
if clear_load['user'] not in self.opts['client_acl']:
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
good = self.ckminions.auth_check(
self.opts['client_acl'][clear_load['user']],
clear_load['fun'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'))
if not good:
# Accept find_job so the CLI will function cleanly
if clear_load['fun'] != 'saltutil.find_job':
log.warning(
'Authentication failure of type "user" '
'occurred.'
)
return ''
else:
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
else:
if clear_load.pop('key') != self.key[salt.utils.get_user()]:
log.warning(
'Authentication failure of type "other" occurred.'
)
return ''
# Retrieve the minions list
delimiter = clear_load.get('kwargs', {}).get('delimiter', DEFAULT_TARGET_DELIM)
minions = self.ckminions.check_minions(
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
delimiter
)
# If we order masters (via a syndic), don't short circuit if no minions
# are found
if not self.opts.get('order_masters'):
# Check for no minions
if not minions:
return {
'enc': 'clear',
'load': {
'jid': None,
'minions': minions
}
}
# Retrieve the jid
fstr = '{0}.prep_jid'.format(self.opts['master_job_cache'])
try:
clear_load['jid'] = self.mminion.returners[fstr](nocache=extra.get('nocache', False),
# the jid in clear_load can be None, '', or something else.
# this is an attempt to clean up the value before passing to plugins
passed_jid=clear_load['jid'] if clear_load.get('jid') else None)
except TypeError: # The returner is not present
log.error('The requested returner {0} could not be loaded. Publication not sent.'.format(fstr.split('.')[0]))
return {}
# TODO Error reporting over the master event bus
self.event.fire_event({'minions': minions}, clear_load['jid'])
new_job_load = {
'jid': clear_load['jid'],
'tgt_type': clear_load['tgt_type'],
'tgt': clear_load['tgt'],
'user': clear_load['user'],
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'minions': minions,
}
# Announce the job on the event bus
self.event.fire_event(new_job_load, 'new_job') # old dup event
self.event.fire_event(new_job_load, tagify([clear_load['jid'], 'new'], 'job'))
if self.opts['ext_job_cache']:
try:
fstr = '{0}.save_load'.format(self.opts['ext_job_cache'])
self.mminion.returners[fstr](clear_load['jid'], clear_load)
except KeyError:
log.critical(
'The specified returner used for the external job cache '
'"{0}" does not have a save_load function!'.format(
self.opts['ext_job_cache']
)
)
except Exception:
log.critical(
'The specified returner threw a stack trace:\n',
exc_info=True
)
# always write out to the master job caches
try:
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](clear_load['jid'], clear_load)
except KeyError:
log.critical(
'The specified returner used for the master job cache '
'"{0}" does not have a save_load function!'.format(
self.opts['master_job_cache']
)
)
except Exception:
log.critical(
'The specified returner threw a stack trace:\n',
exc_info=True
)
# Set up the payload
payload = {'enc': 'aes'}
# Altering the contents of the publish load is serious!! Changes here
# break compatibility with minion/master versions and even tiny
# additions can have serious implications on the performance of the
# publish commands.
#
# In short, check with Thomas Hatch before you even think about
# touching this stuff, we can probably do what you want to do another
# way that won't have a negative impact.
load = {
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'tgt': clear_load['tgt'],
'jid': clear_load['jid'],
'ret': clear_load['ret'],
}
# if you specified a master id, lets put that in the load
if 'master_id' in self.opts:
load['master_id'] = self.opts['master_id']
elif 'master_id' in extra:
load['master_id'] = extra['master_id']
# Only add the delimiter to the pub data if it is non-default
if delimiter != DEFAULT_TARGET_DELIM:
load['delimiter'] = delimiter
if 'id' in extra:
load['id'] = extra['id']
if 'tgt_type' in clear_load:
load['tgt_type'] = clear_load['tgt_type']
if 'to' in clear_load:
load['to'] = clear_load['to']
if 'kwargs' in clear_load:
if 'ret_config' in clear_load['kwargs']:
load['ret_config'] = clear_load['kwargs'].get('ret_config')
if 'metadata' in clear_load['kwargs']:
load['metadata'] = clear_load['kwargs'].get('metadata')
if 'user' in clear_load:
log.info(
'User {user} Published command {fun} with jid {jid}'.format(
**clear_load
)
)
load['user'] = clear_load['user']
else:
log.info(
'Published command {fun} with jid {jid}'.format(
**clear_load
)
)
log.debug('Published command details {0}'.format(load))
payload['load'] = self.crypticle.dumps(load)
if self.opts['sign_pub_messages']:
master_pem_path = os.path.join(self.opts['pki_dir'], 'master.pem')
log.debug("Signing data packet")
payload['sig'] = salt.crypt.sign_message(master_pem_path, payload['load'])
# Send 0MQ to the publisher
context = zmq.Context(1)
pub_sock = context.socket(zmq.PUSH)
pull_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
)
pub_sock.connect(pull_uri)
int_payload = {'payload': self.serial.dumps(payload)}
# add some targeting stuff for lists only (for now)
if load['tgt_type'] == 'list':
int_payload['topic_lst'] = load['tgt']
pub_sock.send(self.serial.dumps(int_payload))
return {
'enc': 'clear',
'load': {
'jid': clear_load['jid'],
'minions': minions
}
}
| 39.246097
| 180
| 0.52335
|
4a135232d32c469cd93d6a540d73b1702b96a9be
| 14,364
|
py
|
Python
|
tests/test_menu_response.py
|
teamtaverna/taverna_integrations
|
6f1c4bd6b142f55060a8218562b25c5cf3d1f406
|
[
"MIT"
] | null | null | null |
tests/test_menu_response.py
|
teamtaverna/taverna_integrations
|
6f1c4bd6b142f55060a8218562b25c5cf3d1f406
|
[
"MIT"
] | null | null | null |
tests/test_menu_response.py
|
teamtaverna/taverna_integrations
|
6f1c4bd6b142f55060a8218562b25c5cf3d1f406
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from unittest.mock import patch
from freezegun import freeze_time
from plugins.menu_plugin import MenuHelper, menu
from faker import fake_creds, FakeClient, FakeMessage
from common.utils import render, DateHelper
def servings():
return [
{
'publicId': 'zaba4r4z',
'dateServed': '2017-07-06',
'vendor': {'name': 'vendor1'},
'menuItem': {
'cycleDay': 1,
'meal': {'name': 'breakfast'},
'course': {'name': 'appetizer', 'sequenceOrder': 2},
'dish': {'name': 'bread'},
'timetable': {'name': 'timetable1'}
}
},
{
'publicId': 'vl9l8b8w',
'dateServed': '2017-07-06',
'vendor': {'name': 'vendor1'},
'menuItem': {
'cycleDay': 1,
'meal': {'name': 'breakfast'},
'course': {'name': 'main dish', 'sequenceOrder': 1},
'dish': {'name': 'rice'},
'timetable': {'name': 'timetable1'}
}
},
{
'publicId': 'qrpr737y',
'dateServed': '2017-07-06',
'vendor': {'name': 'vendor1'},
'menuItem': {
'cycleDay': 1,
'meal': {'name': 'lunch'},
'course': {'name': 'main dish', 'sequenceOrder': 1},
'dish': {'name': 'beans'},
'timetable': {'name': 'timetable1'}
}
}
]
def sorted_servings():
return {
'breakfast': [
{
'public_id': 'vl9l8b8w',
'course': 'main dish',
'sequence_order': 1,
'dish': 'rice'
},
{
'public_id': 'zaba4r4z',
'course': 'appetizer',
'sequence_order': 2,
'dish': 'bread'
}
],
'lunch': [
{
'public_id': 'qrpr737y',
'course': 'main dish',
'sequence_order': 1,
'dish': 'beans'
}
]
}
class MenuHelperTest(TestCase):
"""Tests the MenuHelper class."""
servings = servings()
def setUp(self):
self.menu_helper = MenuHelper()
self.sorted_servings = sorted_servings()
def test_servings_to_dict(self):
servings_to_dict = self.menu_helper.servings_to_dict(self.servings)
self.assertEqual(servings_to_dict, self.sorted_servings)
@patch('plugins.menu_plugin.MenuHelper.make_api_request_for_servings')
def test_get_meals(self, mock_obj):
mock_obj.return_value = self.servings
meals = self.menu_helper.get_meals('timetable1', 'today')
self.assertEqual(meals, self.sorted_servings)
@patch('plugins.menu_plugin.MenuHelper.make_api_request_for_servings')
def test_get_meals_without_servings(self, mock_obj):
mock_obj.return_value = None
meals = self.menu_helper.get_meals('timetable1', 'today')
self.assertEqual(meals, None)
@freeze_time('2017-07-06')
@patch('common.utils.DateHelper.date_to_str', return_value='2017-07-06')
@patch('plugins.menu_plugin.MenuHelper.make_api_request_for_events')
def test_past_events(self, mock_event, day_mock):
event_list = [
{
'name': 'event1',
'action': 'NO_MEAL',
'startDate': '2017-07-01T08:00:00+00:00',
'endDate': '2017-07-03T00:00:00+00:00'
}
]
mock_event.return_value = event_list
events = self.menu_helper.get_event('today')
self.assertEqual(events, [])
@freeze_time('2017-07-06')
@patch('plugins.menu_plugin.MenuHelper.make_api_request_for_events')
def test_present_events(self, mock_event):
event_list = [
{
'name': 'event1',
'action': 'NO_MEAL',
'startDate': '2017-07-04T08:00:00+00:00',
'endDate': '2017-07-07T00:00:00+00:00'
}
]
mock_event.return_value = event_list
events = self.menu_helper.get_event('today')
self.assertEqual(events, event_list)
@freeze_time('2017-07-06')
@patch('plugins.menu_plugin.MenuHelper.make_api_request_for_events')
def test_no_event(self, mock_event):
mock_event.return_value = []
events = self.menu_helper.get_event('today')
self.assertEqual(events, [])
@freeze_time('2017-07-06')
@patch('plugins.menu_plugin.MenuHelper.get_event', return_value=['random'])
def test_meals_check_context_update_with_events(self, mock_obj):
meals = self.sorted_servings
context = {'random': 'stuff'}
self.menu_helper.meals_check_context_update(
meals, context, 'today'
)
updated_context = {
'random': 'stuff',
'no_meals': True
}
self.assertEqual(context, updated_context)
@freeze_time('2017-07-06')
@patch('plugins.menu_plugin.MenuHelper.get_event', return_value=[])
def test_meals_check_context_update_without_events(self, mock_obj):
meals = self.sorted_servings
context = {'random': 'stuff'}
self.menu_helper.meals_check_context_update(
meals, context, 'today'
)
updated_context = {
'random': 'stuff',
'meals': meals
}
self.assertEqual(context, updated_context)
def test_timetable_check_context_update_with_no_timetable(self):
context = {'random': 'stuff'}
response = self.menu_helper.timetable_check_context_update(
0, 'timetable1', context
)
updated_context = {
'random': 'stuff',
'no_timetable': True
}
expected = render('menu_response.j2', updated_context)
self.assertEqual(response, expected)
def test_timetable_check_context_update_with_available_timetable(self):
context = {}
response = self.menu_helper.timetable_check_context_update(
1, 'timetable1', context
)
error = 'timetable1 is not a valid timetable name.'
expected = render('timetable_response.j2', error=error)
self.assertEqual(response, expected)
class MenuTest(TestCase):
"""Tests the menu function."""
date_helper = DateHelper()
client = FakeClient()
menu = {
'channel': fake_creds['FAKE_CHANNEL'],
'type': 'message',
'text': 'menu'
}
menu_timetable = {
'channel': fake_creds['FAKE_CHANNEL'],
'type': 'message',
'text': 'menu timetable1'
}
menu_wrong_timetable = {
'channel': fake_creds['FAKE_CHANNEL'],
'type': 'message',
'text': 'menu wrongstuff'
}
menu_weekday = {
'channel': fake_creds['FAKE_CHANNEL'],
'type': 'message',
'text': 'menu timetable1 monday'
}
menu_wrong_weekday = {
'channel': fake_creds['FAKE_CHANNEL'],
'type': 'message',
'text': 'menu timetable1 blablabla'
}
menu_msg = FakeMessage(client, menu)
menu_timetable_msg = FakeMessage(client, menu_timetable)
menu_wrong_timetable_msg = FakeMessage(client, menu_wrong_timetable)
menu_weekday_msg = FakeMessage(client, menu_weekday)
menu_wrong_weekday_msg = FakeMessage(client, menu_wrong_weekday)
@patch('slackbot.dispatcher.Message', return_value=menu_msg)
@patch('common.utils.TimetableAPIUtils.make_api_request_for_timetables')
@patch('plugins.menu_plugin.MenuHelper.get_event', return_value=[])
@patch('plugins.menu_plugin.MenuHelper.get_meals')
def test_menu_with_one_timetable(self, meals_mock, event_mock, utils_mock, mock_msg):
mock_msg.body = self.menu
utils_mock.return_value = [{'slug': 'timetable1'}]
meals_mock.return_value = sorted_servings()
context = {
'timetable_names': ['timetable1'],
'day_of_week': 'today',
'meals': meals_mock.return_value
}
menu(mock_msg)
self.assertTrue(mock_msg.reply.called)
mock_msg.reply.assert_called_with(
render('menu_response.j2', context)
)
@patch('slackbot.dispatcher.Message', return_value=menu_msg)
@patch('common.utils.TimetableAPIUtils.make_api_request_for_timetables')
@patch('plugins.menu_plugin.MenuHelper.get_event', return_value=[])
@patch('plugins.menu_plugin.MenuHelper.get_meals')
def test_menu_with_no_timetable(self, meals_mock, event_mock, utils_mock, mock_msg):
mock_msg.body = self.menu
utils_mock.return_value = []
context = {
'timetable_names': [],
'day_of_week': 'today',
'no_timetable': True
}
menu(mock_msg)
self.assertTrue(mock_msg.reply.called)
mock_msg.reply.assert_called_with(
render('menu_response.j2', context)
)
@patch('slackbot.dispatcher.Message', return_value=menu_msg)
@patch('common.utils.TimetableAPIUtils.make_api_request_for_timetables')
@patch('plugins.menu_plugin.MenuHelper.get_event', return_value=[])
@patch('plugins.menu_plugin.MenuHelper.get_meals')
def test_menu_with_multiple_timetable(self, meals_mock, event_mock, utils_mock, mock_msg):
mock_msg.body = self.menu
utils_mock.return_value = [
{'slug': 'timetable1'},
{'slug': 'timetable2'}
]
meals_mock.return_value = sorted_servings()
context = {
'timetable_names': ['timetable1', 'timetable2'],
'day_of_week': 'today',
'multiple_timetables': True
}
menu(mock_msg)
self.assertTrue(mock_msg.reply.called)
mock_msg.reply.assert_called_with(
render('menu_response.j2', context)
)
@patch('slackbot.dispatcher.Message', return_value=menu_timetable_msg)
@patch('common.utils.TimetableAPIUtils.make_api_request_for_timetables')
@patch('plugins.menu_plugin.MenuHelper.get_event', return_value=[])
@patch('plugins.menu_plugin.MenuHelper.get_meals')
def test_menu_timetable_command(self, meals_mock, event_mock, utils_mock, mock_msg):
mock_msg.body = self.menu_timetable
utils_mock.return_value = [{'slug': 'timetable1'}]
meals_mock.return_value = sorted_servings()
context = {
'timetable_names': ['timetable1'],
'day_of_week': 'today',
'meals': meals_mock.return_value
}
menu(mock_msg)
self.assertTrue(mock_msg.reply.called)
mock_msg.reply.assert_called_with(
render('menu_response.j2', context)
)
@patch('slackbot.dispatcher.Message', return_value=menu_wrong_timetable_msg)
@patch('common.utils.TimetableAPIUtils.make_api_request_for_timetables')
@patch('plugins.menu_plugin.MenuHelper.get_event', return_value=[])
@patch('plugins.menu_plugin.MenuHelper.get_meals')
def test_menu_with_wrong_timetable(self, meals_mock, event_mock, utils_mock, mock_msg):
mock_msg.body = self.menu_wrong_timetable
utils_mock.return_value = [{'slug': 'timetable1'}]
error = 'wrongstuff is not a valid timetable name.'
menu(mock_msg)
self.assertTrue(mock_msg.reply.called)
mock_msg.reply.assert_called_with(
render('timetable_response.j2', error=error)
)
# Test when there are no timetables available in the database
utils_mock.return_value = []
context = {
'random': 'stuff',
'no_timetable': True
}
menu(mock_msg)
mock_msg.reply.assert_called_with(
render('menu_response.j2', context)
)
@patch('slackbot.dispatcher.Message', return_value=menu_weekday_msg)
@patch('common.utils.TimetableAPIUtils.make_api_request_for_timetables')
@patch('plugins.menu_plugin.MenuHelper.get_event', return_value=[])
@patch('plugins.menu_plugin.MenuHelper.get_meals')
def test_menu_with_weekday(self, meals_mock, event_mock, utils_mock, mock_msg):
mock_msg.body = self.menu_weekday
utils_mock.return_value = [{'slug': 'timetable1'}]
meals_mock.return_value = sorted_servings()
context = {
'timetable_names': ['timetable1'],
'day_of_week': 'monday',
'meals': meals_mock.return_value
}
menu(mock_msg)
self.assertTrue(mock_msg.reply.called)
mock_msg.reply.assert_called_with(
render('menu_response.j2', context)
)
@patch('slackbot.dispatcher.Message', return_value=menu_wrong_weekday_msg)
@patch('common.utils.TimetableAPIUtils.make_api_request_for_timetables')
@patch('plugins.menu_plugin.MenuHelper.get_event', return_value=[])
@patch('plugins.menu_plugin.MenuHelper.get_meals')
def test_menu_with_wrong_weekday(self, meals_mock, event_mock, utils_mock, mock_msg):
mock_msg.body = self.menu_wrong_weekday
utils_mock.return_value = [{'slug': 'timetable1'}]
meals_mock.return_value = sorted_servings()
error = 'You did not enter a valid day.'
context = {
'random': 'stuff',
'invalid_day': True,
'days': self.date_helper.get_days()
}
menu(mock_msg)
self.assertTrue(mock_msg.reply.called)
mock_msg.reply.assert_called_with(
render('menu_response.j2', context, error)
)
@patch('slackbot.dispatcher.Message', return_value=menu_msg)
@patch('common.utils.TimetableAPIUtils.make_api_request_for_timetables')
@patch('plugins.menu_plugin.MenuHelper.get_event', return_value=[])
@patch('plugins.menu_plugin.MenuHelper.get_meals')
def test_menu_with_empty_db(self, meals_mock, event_mock, utils_mock, mock_msg):
mock_msg.body = self.menu
utils_mock.return_value = []
context = {
'timetable_names': [],
'day_of_week': 'today',
'no_timetable': True
}
menu(mock_msg)
self.assertTrue(mock_msg.reply.called)
mock_msg.reply.assert_called_with(
render('menu_response.j2', context)
)
| 34.948905
| 94
| 0.610206
|
4a135294042232d55bf132afa1098b5dfc142ad5
| 731
|
py
|
Python
|
tests/test_demo.py
|
bruziev/security_interface
|
0758a88f3c6ce96502ad287ab1a743cd5040c0b8
|
[
"MIT"
] | 5
|
2018-11-02T07:50:30.000Z
|
2019-03-22T19:40:17.000Z
|
tests/test_demo.py
|
theruziev/security_interface
|
cacb85f0736c20f6cbe1b4d148ebb8b56b921642
|
[
"MIT"
] | 146
|
2019-05-30T09:16:06.000Z
|
2022-02-04T17:20:51.000Z
|
tests/test_demo.py
|
theruziev/security_interface
|
cacb85f0736c20f6cbe1b4d148ebb8b56b921642
|
[
"MIT"
] | 1
|
2018-11-13T06:21:01.000Z
|
2018-11-13T06:21:01.000Z
|
import pytest
from demo.jwt import IdentityMaker, JwtIdentityPolicy, JwtAuthPolicy
from security_interface.api import Security
SECRET = "SECRET"
identity_maker = IdentityMaker(expired_after=1, secret=SECRET)
jwt_identity = JwtIdentityPolicy(secret=SECRET)
jwt_auth = JwtAuthPolicy()
security = Security(jwt_identity, jwt_auth)
@pytest.mark.asyncio
async def test_jwt_demo():
payload = {"login": "Bakhtiyor", "scope": ["read", "write"]}
token = identity_maker.make(payload)
identity = await security.check_authorized(token)
assert "Bakhtiyor" == identity["login"]
assert await security.can(token, "read")
assert await security.can(token, "write")
assert not await security.can(token, "private")
| 27.074074
| 68
| 0.74829
|
4a1352f866d86c8eb9c949038a4a4b9bd8063b8f
| 2,860
|
py
|
Python
|
ac-test.py
|
simondlevy/pytorch-drl
|
b197bb93c2cc698971f98095d4e0180811c52042
|
[
"MIT"
] | null | null | null |
ac-test.py
|
simondlevy/pytorch-drl
|
b197bb93c2cc698971f98095d4e0180811c52042
|
[
"MIT"
] | 10
|
2020-11-28T05:29:52.000Z
|
2020-12-01T22:43:38.000Z
|
ac-test.py
|
simondlevy/pytorch-drl
|
b197bb93c2cc698971f98095d4e0180811c52042
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import argparse
import time
from gym import wrappers
from ac_gym import model, gym_make, is_env_bullet
from ac_gym.td3 import TD3, eval_policy
import numpy as np
import torch
def run_td3(parts, env, nhid, nodisplay, is_bullet, dump):
policy = TD3(
env.observation_space.shape[0],
env.action_space.shape[0],
float(env.action_space.high[0]),
nhid)
policy.set(parts)
return eval_policy(policy, env, render=(not nodisplay), eval_episodes=1, is_bullet=is_bullet, dump=dump)
def run_other(parts, env, nhid, nodisplay, is_bullet, dump):
net = model.ModelActor(env.observation_space.shape[0],
env.action_space.shape[0],
nhid)
net.load_state_dict(parts)
# Start rendering thread for PyBullet if needed
if is_bullet:
env.render()
obs = env.reset()
total_reward = 0.0
total_steps = 0
while True:
obs_v = torch.FloatTensor(obs)
mu_v = net(obs_v)
action = mu_v.squeeze(dim=0).data.numpy()
action = np.clip(action, -1, 1)
if np.isscalar(action):
action = [action]
if dump:
print(action)
obs, reward, done, _ = env.step(action)
if not nodisplay:
if not is_bullet:
env.render('rgb_array')
time.sleep(.02)
total_reward += reward
total_steps += 1
if done:
break
return total_reward, total_steps
def main():
fmtr = argparse.ArgumentDefaultsHelpFormatter
parser = argparse.ArgumentParser(formatter_class=fmtr)
parser.add_argument('filename', metavar='FILENAME', help='.dat input file')
parser.add_argument('--record', default=None,
help='If specified, sets the recording dir')
parser.add_argument('--dump', dest='dump', action='store_true',
help='Print actions to stdout')
parser.add_argument('--nodisplay', dest='nodisplay', action='store_true',
help='Suppress display')
parser.add_argument('--seed', default=None, type=int,
help='Sets Gym, PyTorch and Numpy seeds')
args = parser.parse_args()
parts, env_name, nhid = torch.load(open(args.filename, 'rb'))
env = gym_make(env_name)
if args.seed is not None:
env.seed(args.seed)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
if args.record:
env = wrappers.Monitor(env, args.record, force=True)
fun = run_td3 if 'td3' in args.filename else run_other
reward, steps = fun(parts, env, nhid, args.nodisplay, is_env_bullet(env_name), args.dump)
print('In %d steps we got %.3f reward.' % (steps, reward))
env.close()
if __name__ == '__main__':
main()
| 25.765766
| 108
| 0.61049
|
4a1353310b99c6ce76227924e7c890366b81f5c9
| 33,266
|
py
|
Python
|
ironic_python_agent/extensions/image.py
|
atotala/ironic-python-agent
|
f9c03a8de29fd3b7c479a2b0c4343585b9f171e8
|
[
"Apache-2.0"
] | null | null | null |
ironic_python_agent/extensions/image.py
|
atotala/ironic-python-agent
|
f9c03a8de29fd3b7c479a2b0c4343585b9f171e8
|
[
"Apache-2.0"
] | null | null | null |
ironic_python_agent/extensions/image.py
|
atotala/ironic-python-agent
|
f9c03a8de29fd3b7c479a2b0c4343585b9f171e8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
import shlex
import shutil
import stat
import tempfile
from ironic_lib import utils as ilib_utils
from oslo_concurrency import processutils
from oslo_log import log
from ironic_python_agent import errors
from ironic_python_agent.extensions import base
from ironic_python_agent.extensions import iscsi
from ironic_python_agent import hardware
from ironic_python_agent import raid_utils
from ironic_python_agent import utils
LOG = log.getLogger(__name__)
BIND_MOUNTS = ('/dev', '/proc', '/run')
BOOTLOADERS_EFI = ['bootx64.efi', 'grubaa64.efi', 'winload.efi']
def _rescan_device(device):
"""Force the device to be rescanned
:param device: device upon which to rescan and update
kernel partition records.
"""
try:
utils.execute('partx', '-u', device, attempts=3,
delay_on_retry=True)
utils.execute('udevadm', 'settle')
except processutils.ProcessExecutionError:
LOG.warning("Couldn't re-read the partition table "
"on device %s", device)
def _get_partition(device, uuid):
"""Find the partition of a given device."""
LOG.debug("Find the partition %(uuid)s on device %(dev)s",
{'dev': device, 'uuid': uuid})
try:
_rescan_device(device)
lsblk = utils.execute('lsblk', '-PbioKNAME,UUID,PARTUUID,TYPE', device)
report = lsblk[0]
for line in report.split('\n'):
part = {}
# Split into KEY=VAL pairs
vals = shlex.split(line)
for key, val in (v.split('=', 1) for v in vals):
part[key] = val.strip()
# Ignore non partition
if part.get('TYPE') not in ['md', 'part']:
# NOTE(TheJulia): This technically creates an edge failure
# case where a filesystem on a whole block device sans
# partitioning would behave differently.
continue
if part.get('UUID') == uuid:
LOG.debug("Partition %(uuid)s found on device "
"%(dev)s", {'uuid': uuid, 'dev': device})
return '/dev/' + part.get('KNAME')
if part.get('PARTUUID') == uuid:
LOG.debug("Partition %(uuid)s found on device "
"%(dev)s", {'uuid': uuid, 'dev': device})
return '/dev/' + part.get('KNAME')
else:
# NOTE(TheJulia): We may want to consider moving towards using
# findfs in the future, if we're comfortable with the execution
# and interaction. There is value in either way though.
# NOTE(rg): alternative: blkid -l -t UUID=/PARTUUID=
try:
findfs, stderr = utils.execute('findfs', 'UUID=%s' % uuid)
return findfs.strip()
except processutils.ProcessExecutionError as e:
LOG.debug('First fallback detection attempt for locating '
'partition via UUID %(uuid)s failed. '
'Error: %(err)s',
{'uuid': uuid,
'err': e})
try:
findfs, stderr = utils.execute(
'findfs', 'PARTUUID=%s' % uuid)
return findfs.strip()
except processutils.ProcessExecutionError as e:
LOG.debug('Secondary fallback detection attempt for '
'locating partition via UUID %(uuid)s failed. '
'Error: %(err)s',
{'uuid': uuid,
'err': e})
# Last fallback: In case we cannot find the partition by UUID
# and the deploy device is an md device, we check if the md
# device has a partition (which we assume to contain the root fs).
if hardware.is_md_device(device):
md_partition = device + 'p1'
if (os.path.exists(md_partition)
and stat.S_ISBLK(os.stat(md_partition).st_mode)):
LOG.debug("Found md device with partition %s",
md_partition)
return md_partition
else:
LOG.debug('Could not find partition %(part)s on md '
'device %(dev)s',
{'part': md_partition,
'dev': device})
# Partition not found, time to escalate.
error_msg = ("No partition with UUID %(uuid)s found on "
"device %(dev)s" % {'uuid': uuid, 'dev': device})
LOG.error(error_msg)
raise errors.DeviceNotFound(error_msg)
except processutils.ProcessExecutionError as e:
error_msg = ('Finding the partition with UUID %(uuid)s on '
'device %(dev)s failed with %(err)s' %
{'uuid': uuid, 'dev': device, 'err': e})
LOG.error(error_msg)
raise errors.CommandExecutionError(error_msg)
def _has_dracut(root):
try:
utils.execute('chroot %(path)s /bin/sh -c '
'"which dracut"' %
{'path': root}, shell=True)
except processutils.ProcessExecutionError:
return False
return True
def _has_boot_sector(device):
"""Checks the device for a boot sector indicator."""
stdout, stderr = utils.execute('file', '-s', device)
if 'boot sector' not in stdout:
return False
else:
# Now lets check the signature
ddout, dderr = utils.execute(
'dd', 'if=%s' % device, 'bs=218', 'count=1', binary=True)
stdout, stderr = utils.execute('file', '-', process_input=ddout)
# The bytes recovered by dd show as a "dos executable" when
# examined with file. In other words, the bootloader is present.
if 'executable' in stdout:
return True
return False
def _find_bootable_device(partitions, dev):
"""Checks the base device and partition for bootloader contents."""
LOG.debug('Looking for a bootable device in %s', dev)
for line in partitions.splitlines():
partition = line.split(':')
try:
if 'boot' in partition[6]:
if _has_boot_sector(dev) or _has_boot_sector(partition[0]):
return True
except IndexError:
continue
return False
def _is_bootloader_loaded(dev):
"""Checks the device to see if a MBR bootloader is present.
:param str dev: Block device upon which to check if it appears
to be bootable via MBR.
:returns: True if a device appears to be bootable with a boot
loader, otherwise False.
"""
boot = hardware.dispatch_to_managers('get_boot_info')
if boot.current_boot_mode != 'bios':
# We're in UEFI mode, this logic is invalid
LOG.debug('Skipping boot sector check as the system is in UEFI '
'boot mode.')
return False
LOG.debug('Starting check for pre-intalled BIOS boot-loader.')
try:
# Looking for things marked "bootable" in the partition table
stdout, stderr = utils.execute('parted', dev, '-s', '-m',
'--', 'print')
except processutils.ProcessExecutionError:
return False
return _find_bootable_device(stdout, dev)
def _get_efi_bootloaders(location):
"""Get all valid efi bootloaders in a given location
:param location: the location where it should start looking for the
efi files.
:return: a list of valid efi bootloaders
"""
# Let's find all files with .efi or .EFI extension
LOG.debug('Looking for all efi files on %s', location)
valid_bootloaders = []
for root, dirs, files in os.walk(location):
efi_files = [f for f in files if f.lower() in BOOTLOADERS_EFI]
LOG.debug('efi files found in %(location)s : %(efi_files)s',
{'location': location, 'efi_files': str(efi_files)})
for name in efi_files:
efi_f = os.path.join(root, name)
LOG.debug('Checking if %s is executable', efi_f)
if os.access(efi_f, os.X_OK):
v_bl = efi_f.split('/boot/efi')[-1].replace('/', '\\')
LOG.debug('%s is a valid bootloader', v_bl)
valid_bootloaders.append(v_bl)
return valid_bootloaders
def _run_efibootmgr(valid_efi_bootloaders, device, efi_partition):
"""Executes efibootmgr and removes duplicate entries.
:param valid_efi_bootloaders: the list of valid efi bootloaders
:param device: the device to be used
:param efi_partition: the efi partition on the device
"""
# Before updating let's get information about the bootorder
LOG.debug("Getting information about boot order")
utils.execute('efibootmgr')
# NOTE(iurygregory): regex used to identify the Warning in the stderr after
# we add the new entry. Example:
# "efibootmgr: ** Warning ** : Boot0004 has same label ironic"
duplicated_label = re.compile(r'^.*:\s\*\*.*\*\*\s:\s.*'
r'Boot([0-9a-f-A-F]+)\s.*$')
label_id = 1
for v_efi_bl_path in valid_efi_bootloaders:
# Update the nvram using efibootmgr
# https://linux.die.net/man/8/efibootmgr
label = 'ironic' + str(label_id)
LOG.debug("Adding loader %(path)s on partition %(part)s of device "
" %(dev)s", {'path': v_efi_bl_path, 'part': efi_partition,
'dev': device})
cmd = utils.execute('efibootmgr', '-c', '-d', device,
'-p', efi_partition, '-w', '-L', label,
'-l', v_efi_bl_path)
for line in cmd[1].split('\n'):
match = duplicated_label.match(line)
if match:
boot_num = match.group(1)
LOG.debug("Found bootnum %s matching label", boot_num)
utils.execute('efibootmgr', '-b', boot_num, '-B')
label_id += 1
def _manage_uefi(device, efi_system_part_uuid=None):
"""Manage the device looking for valid efi bootloaders to update the nvram.
This method checks for valid efi bootloaders in the device, if they exists
it updates the nvram using the efibootmgr.
:param device: the device to be checked.
:param efi_system_part_uuid: efi partition uuid.
:return: True - if it founds any efi bootloader and the nvram was updated
using the efibootmgr.
False - if no efi bootloader is found.
"""
efi_partition = None
efi_partition_mount_point = None
efi_mounted = False
try:
# Force UEFI to rescan the device. Required if the deployment
# was over iscsi.
_rescan_device(device)
local_path = tempfile.mkdtemp()
# Trust the contents on the disk in the event of a whole disk image.
efi_partition = utils.get_efi_part_on_device(device)
if not efi_partition:
# _get_partition returns <device>+<partition> and we only need the
# partition number
partition = _get_partition(device, uuid=efi_system_part_uuid)
efi_partition = int(partition.replace(device, ""))
if efi_partition:
efi_partition_mount_point = os.path.join(local_path, "boot/efi")
if not os.path.exists(efi_partition_mount_point):
os.makedirs(efi_partition_mount_point)
# The mount needs the device with the partition, in case the
# device ends with a digit we add a `p` and the partition number we
# found, otherwise we just join the device and the partition number
if device[-1].isdigit():
efi_device_part = '{}p{}'.format(device, efi_partition)
utils.execute('mount', efi_device_part,
efi_partition_mount_point)
else:
efi_device_part = '{}{}'.format(device, efi_partition)
utils.execute('mount', efi_device_part,
efi_partition_mount_point)
efi_mounted = True
else:
# If we can't find the partition we need to decide what should
# happen
return False
valid_efi_bootloaders = _get_efi_bootloaders(efi_partition_mount_point)
if valid_efi_bootloaders:
_run_efibootmgr(valid_efi_bootloaders, device, efi_partition)
return True
else:
return False
except processutils.ProcessExecutionError as e:
error_msg = ('Could not verify uefi on device %(dev)s'
'failed with %(err)s.' % {'dev': device, 'err': e})
LOG.error(error_msg)
raise errors.CommandExecutionError(error_msg)
finally:
umount_warn_msg = "Unable to umount %(local_path)s. Error: %(error)s"
try:
if efi_mounted:
utils.execute('umount', efi_partition_mount_point,
attempts=3, delay_on_retry=True)
except processutils.ProcessExecutionError as e:
error_msg = ('Umounting efi system partition failed. '
'Attempted 3 times. Error: %s' % e)
LOG.error(error_msg)
raise errors.CommandExecutionError(error_msg)
else:
# If umounting the binds succeed then we can try to delete it
try:
utils.execute('sync')
except processutils.ProcessExecutionError as e:
LOG.warning(umount_warn_msg, {'path': local_path, 'error': e})
else:
# After everything is umounted we can then remove the
# temporary directory
shutil.rmtree(local_path)
# TODO(rg): handle PreP boot parts relocation as well
def _prepare_boot_partitions_for_softraid(device, holders, efi_part,
target_boot_mode):
"""Prepare boot partitions when relevant.
Create either efi partitions or bios boot partitions for softraid,
according to both target boot mode and disk holders partition table types.
:param device: the softraid device path
:param holders: the softraid drive members
:param efi_part: when relevant the efi partition coming from the image
deployed on softraid device, can be/is often None
:param target_boot_mode: target boot mode can be bios/uefi/None
or anything else for unspecified
:returns: the efi partition paths on softraid disk holders when target
boot mode is uefi, empty list otherwise.
"""
efi_partitions = []
# Actually any fat partition could be a candidate. Let's assume the
# partition also has the esp flag
if target_boot_mode == 'uefi':
if not efi_part:
LOG.debug("No explicit EFI partition provided. Scanning for any "
"EFI partition located on software RAID device %s to "
"be relocated",
device)
# NOTE: for whole disk images, no efi part uuid will be provided.
# Let's try to scan for esp on the root softraid device. If not
# found, it's fine in most cases to just create an empty esp and
# let grub handle the magic.
efi_part = utils.get_efi_part_on_device(device)
if efi_part:
efi_part = '{}p{}'.format(device, efi_part)
LOG.info("Creating EFI partitions on software RAID holder disks")
# We know that we kept this space when configuring raid,see
# hardware.GenericHardwareManager.create_configuration.
# We could also directly get the EFI partition size.
partsize_mib = raid_utils.ESP_SIZE_MIB
partlabel_prefix = 'uefi-holder-'
for number, holder in enumerate(holders):
# NOTE: see utils.get_partition_table_type_from_specs
# for uefi we know that we have setup a gpt partition table,
# sgdisk can be used to edit table, more user friendly
# for alignment and relative offsets
partlabel = '{}{}'.format(partlabel_prefix, number)
out, _u = utils.execute('sgdisk', '-F', holder)
start_sector = '{}s'.format(out.splitlines()[-1].strip())
out, _u = utils.execute(
'sgdisk', '-n', '0:{}:+{}MiB'.format(start_sector,
partsize_mib),
'-t', '0:ef00', '-c', '0:{}'.format(partlabel), holder)
# Refresh part table
utils.execute("partprobe")
utils.execute("blkid")
target_part, _u = utils.execute(
"blkid", "-l", "-t", "PARTLABEL={}".format(partlabel), holder)
target_part = target_part.splitlines()[-1].split(':', 1)[0]
LOG.debug("EFI partition %s created on holder disk %s",
target_part, holder)
if efi_part:
LOG.debug("Relocating EFI %s to holder part %s", efi_part,
target_part)
# Blockdev copy
utils.execute("cp", efi_part, target_part)
else:
# Creating a label is just to make life easier
if number == 0:
fslabel = 'efi-part'
else:
# bak, label is limited to 11 chars
fslabel = 'efi-part-b'
ilib_utils.mkfs(fs='vfat', path=target_part, label=fslabel)
efi_partitions.append(target_part)
# TBD: Would not hurt to destroy source efi part when defined,
# for clarity.
elif target_boot_mode == 'bios':
partlabel_prefix = 'bios-boot-part-'
for number, holder in enumerate(holders):
label = utils.scan_partition_table_type(holder)
if label == 'gpt':
LOG.debug("Creating bios boot partition on disk holder %s",
holder)
out, _u = utils.execute('sgdisk', '-F', holder)
start_sector = '{}s'.format(out.splitlines()[-1].strip())
partlabel = '{}{}'.format(partlabel_prefix, number)
out, _u = utils.execute(
'sgdisk', '-n', '0:{}:+2MiB'.format(start_sector),
'-t', '0:ef02', '-c', '0:{}'.format(partlabel), holder)
# Q: MBR case, could we dd the boot code from the softraid
# (446 first bytes) if we detect a bootloader with
# _is_bootloader_loaded?
# A: This won't work. Because it includes the address on the
# disk, as in virtual disk, where to load the data from.
# Since there is a structural difference, this means it will
# fail.
# Just an empty list if not uefi boot mode, nvm, not used anyway
return efi_partitions
def _install_grub2(device, root_uuid, efi_system_part_uuid=None,
prep_boot_part_uuid=None, target_boot_mode='bios'):
"""Install GRUB2 bootloader on a given device."""
LOG.debug("Installing GRUB2 bootloader on device %s", device)
efi_partitions = None
efi_part = None
efi_partition_mount_point = None
efi_mounted = False
holders = None
# NOTE(TheJulia): Seems we need to get this before ever possibly
# restart the device in the case of multi-device RAID as pyudev
# doesn't exactly like the partition disappearing.
root_partition = _get_partition(device, uuid=root_uuid)
# If the root device is an md device (or partition), restart the device
# (to help grub finding it) and identify the underlying holder disks
# to install grub.
if hardware.is_md_device(device):
# If the root device is an md device (or partition),
# restart the device to help grub find it later on.
hardware.md_restart(device)
# If an md device, we need to rescan the devices anyway to pickup
# the md device partition.
_rescan_device(device)
elif (_is_bootloader_loaded(device)
and not (efi_system_part_uuid
or prep_boot_part_uuid)):
# We always need to put the bootloader in place with software raid
# so it is okay to elif into the skip doing a bootloader step.
LOG.info("Skipping installation of bootloader on device %s "
"as it is already marked bootable.", device)
return
try:
# Add /bin to PATH variable as grub requires it to find efibootmgr
# when running in uefi boot mode.
# Add /usr/sbin to PATH variable to ensure it is there as we do
# not use full path to grub binary anymore.
path_variable = os.environ.get('PATH', '')
path_variable = '%s:/bin:/usr/sbin:/sbin' % path_variable
# Mount the partition and binds
path = tempfile.mkdtemp()
if efi_system_part_uuid:
efi_part = _get_partition(device, uuid=efi_system_part_uuid)
efi_partitions = [efi_part]
if hardware.is_md_device(device):
holders = hardware.get_holder_disks(device)
efi_partitions = _prepare_boot_partitions_for_softraid(
device, holders, efi_part, target_boot_mode
)
if efi_partitions:
efi_partition_mount_point = os.path.join(path, "boot/efi")
# For power we want to install grub directly onto the PreP partition
if prep_boot_part_uuid:
device = _get_partition(device, uuid=prep_boot_part_uuid)
# If the root device is an md device (or partition),
# identify the underlying holder disks to install grub.
if hardware.is_md_device(device):
disks = holders
else:
disks = [device]
utils.execute('mount', root_partition, path)
for fs in BIND_MOUNTS:
utils.execute('mount', '-o', 'bind', fs, path + fs)
utils.execute('mount', '-t', 'sysfs', 'none', path + '/sys')
binary_name = "grub"
if os.path.exists(os.path.join(path, 'usr/sbin/grub2-install')):
binary_name = "grub2"
# Mount all vfat partitions listed in the fstab of the root partition.
# This is to make sure grub2 finds all files it needs, as some of them
# may not be inside the root partition but in the ESP (like grub2env).
LOG.debug("Mounting all partitions inside the image ...")
utils.execute('chroot %(path)s /bin/sh -c "mount -a -t vfat"' %
{'path': path}, shell=True,
env_variables={'PATH': path_variable})
if efi_partitions:
if not os.path.exists(efi_partition_mount_point):
os.makedirs(efi_partition_mount_point)
LOG.info("GRUB2 will be installed for UEFI on efi partitions %s",
efi_partitions)
for efi_partition in efi_partitions:
utils.execute(
'mount', efi_partition, efi_partition_mount_point)
efi_mounted = True
# FIXME(rg): does not work in cross boot mode case (target
# boot mode differs from ramdisk one)
# Probe for the correct target (depends on the arch, example
# --target=x86_64-efi)
utils.execute('chroot %(path)s /bin/sh -c '
'"%(bin)s-install"' %
{'path': path, 'bin': binary_name},
shell=True,
env_variables={
'PATH': path_variable
})
# Also run grub-install with --removable, this installs grub to
# the EFI fallback path. Useful if the NVRAM wasn't written
# correctly, was reset or if testing with virt as libvirt
# resets the NVRAM on instance start.
# This operation is essentially a copy operation. Use of the
# --removable flag, per the grub-install source code changes
# the default file to be copied, destination file name, and
# prevents NVRAM from being updated.
# We only run grub2_install for uefi if we can't verify the
# uefi bits
utils.execute('chroot %(path)s /bin/sh -c '
'"%(bin)s-install --removable"' %
{'path': path, 'bin': binary_name},
shell=True,
env_variables={
'PATH': path_variable
})
utils.execute('umount', efi_partition_mount_point, attempts=3,
delay_on_retry=True)
efi_mounted = False
# NOTE: probably never needed for grub-mkconfig, does not hurt in
# case of doubt, cleaned in the finally clause anyway
utils.execute('mount', efi_partitions[0],
efi_partition_mount_point)
efi_mounted = True
else:
# FIXME(rg): does not work if ramdisk boot mode is not the same
# as the target (--target=i386-pc, arch dependent).
# See previous FIXME
# Install grub. Normally, grub goes to one disk only. In case of
# md devices, grub goes to all underlying holder (RAID-1) disks.
LOG.info("GRUB2 will be installed on disks %s", disks)
for grub_disk in disks:
LOG.debug("Installing GRUB2 on disk %s", grub_disk)
utils.execute(
'chroot %(path)s /bin/sh -c "%(bin)s-install %(dev)s"' %
{
'path': path,
'bin': binary_name,
'dev': grub_disk
},
shell=True,
env_variables={
'PATH': path_variable
}
)
LOG.debug("GRUB2 successfully installed on device %s",
grub_disk)
# If the image has dracut installed, set the rd.md.uuid kernel
# parameter for discovered md devices.
if hardware.is_md_device(device) and _has_dracut(path):
rd_md_uuids = ["rd.md.uuid=%s" % x['UUID']
for x in hardware.md_get_raid_devices().values()]
LOG.debug("Setting rd.md.uuid kernel parameters: %s", rd_md_uuids)
with open('%s/etc/default/grub' % path, 'r') as g:
contents = g.read()
with open('%s/etc/default/grub' % path, 'w') as g:
g.write(
re.sub(r'GRUB_CMDLINE_LINUX="(.*)"',
r'GRUB_CMDLINE_LINUX="\1 %s"'
% " ".join(rd_md_uuids),
contents))
# Generate the grub configuration file
utils.execute('chroot %(path)s /bin/sh -c '
'"%(bin)s-mkconfig -o '
'/boot/%(bin)s/grub.cfg"' %
{'path': path, 'bin': binary_name}, shell=True,
env_variables={'PATH': path_variable})
LOG.info("GRUB2 successfully installed on %s", device)
except processutils.ProcessExecutionError as e:
error_msg = ('Installing GRUB2 boot loader to device %(dev)s '
'failed with %(err)s.' % {'dev': device, 'err': e})
LOG.error(error_msg)
raise errors.CommandExecutionError(error_msg)
finally:
umount_warn_msg = "Unable to umount %(path)s. Error: %(error)s"
# Umount binds and partition
umount_binds_fail = False
# If umount fails for efi partition, then we cannot be sure that all
# the changes were written back to the filesystem.
try:
if efi_mounted:
utils.execute('umount', efi_partition_mount_point, attempts=3,
delay_on_retry=True)
except processutils.ProcessExecutionError as e:
error_msg = ('Umounting efi system partition failed. '
'Attempted 3 times. Error: %s' % e)
LOG.error(error_msg)
raise errors.CommandExecutionError(error_msg)
# Umount the vfat partitions we may have mounted
LOG.debug("Unmounting all partitions inside the image ...")
try:
utils.execute('chroot %(path)s /bin/sh -c "umount -a -t vfat"' %
{'path': path}, shell=True,
env_variables={'PATH': path_variable})
except processutils.ProcessExecutionError as e:
LOG.warning("Unable to umount vfat partitions. Error: %(error)s",
{'error': e})
for fs in BIND_MOUNTS:
try:
utils.execute('umount', path + fs, attempts=3,
delay_on_retry=True)
except processutils.ProcessExecutionError as e:
umount_binds_fail = True
LOG.warning(umount_warn_msg, {'path': path + fs, 'error': e})
try:
utils.execute('umount', path + '/sys', attempts=3,
delay_on_retry=True)
except processutils.ProcessExecutionError as e:
umount_binds_fail = True
LOG.warning(umount_warn_msg, {'path': path + '/sys', 'error': e})
# If umounting the binds succeed then we can try to delete it
if not umount_binds_fail:
try:
utils.execute('umount', path, attempts=3, delay_on_retry=True)
except processutils.ProcessExecutionError as e:
LOG.warning(umount_warn_msg, {'path': path, 'error': e})
else:
# After everything is umounted we can then remove the
# temporary directory
shutil.rmtree(path)
class ImageExtension(base.BaseAgentExtension):
@base.async_command('install_bootloader')
def install_bootloader(self, root_uuid, efi_system_part_uuid=None,
prep_boot_part_uuid=None,
target_boot_mode='bios'):
"""Install the GRUB2 bootloader on the image.
:param root_uuid: The UUID of the root partition.
:param efi_system_part_uuid: The UUID of the efi system partition.
To be used only for uefi boot mode. For uefi boot mode, the
boot loader will be installed here.
:param prep_boot_part_uuid: The UUID of the PReP Boot partition.
Used only for booting ppc64* partition images locally. In this
scenario the bootloader will be installed here.
:param target_boot_mode: bios, uefi. Only taken into account
for softraid, when no efi partition is explicitely provided
(happens for whole disk images)
:raises: CommandExecutionError if the installation of the
bootloader fails.
:raises: DeviceNotFound if the root partition is not found.
"""
device = hardware.dispatch_to_managers('get_os_install_device')
iscsi.clean_up(device)
boot = hardware.dispatch_to_managers('get_boot_info')
# FIXME(arne_wiebalck): make software RAID work with efibootmgr
if (boot.current_boot_mode == 'uefi'
and not hardware.is_md_device(device)):
has_efibootmgr = True
try:
utils.execute('efibootmgr', '--version')
except FileNotFoundError:
LOG.warning("efibootmgr is not available in the ramdisk")
has_efibootmgr = False
if has_efibootmgr:
if _manage_uefi(device,
efi_system_part_uuid=efi_system_part_uuid):
return
# In case we can't use efibootmgr for uefi we will continue using grub2
LOG.debug('Using grub2-install to set up boot files')
_install_grub2(device,
root_uuid=root_uuid,
efi_system_part_uuid=efi_system_part_uuid,
prep_boot_part_uuid=prep_boot_part_uuid,
target_boot_mode=target_boot_mode)
| 43.771053
| 79
| 0.575963
|
4a135408cd0959a2bb5065e18c4c5fbb935add79
| 2,403
|
py
|
Python
|
src/extra_apps/xadmin/__init__.py
|
NewReStarter/Django_Form
|
3a9c8b536f5750ed9490533cee64ca358020a265
|
[
"MIT"
] | null | null | null |
src/extra_apps/xadmin/__init__.py
|
NewReStarter/Django_Form
|
3a9c8b536f5750ed9490533cee64ca358020a265
|
[
"MIT"
] | null | null | null |
src/extra_apps/xadmin/__init__.py
|
NewReStarter/Django_Form
|
3a9c8b536f5750ed9490533cee64ca358020a265
|
[
"MIT"
] | null | null | null |
VERSION = (0,6,0)
from xadmin.sites import AdminSite, site
class Settings(object):
pass
def autodiscover():
"""
Auto-discover INSTALLED_APPS myadmin.py modules and fail silently when
not present. This forces an import on them to register any myadmin bits they
may want.
"""
from importlib import import_module
from django.conf import settings
from django.utils.module_loading import module_has_submodule
from django.apps import apps
setattr(settings, 'CRISPY_TEMPLATE_PACK', 'bootstrap3')
setattr(settings, 'CRISPY_CLASS_CONVERTERS', {
"textinput": "textinput textInput form-control",
"fileinput": "fileinput fileUpload form-control",
"passwordinput": "textinput textInput form-control",
})
from xadmin.views import register_builtin_views
register_builtin_views(site)
# load xadmin settings from XADMIN_CONF module
try:
xadmin_conf = getattr(settings, 'XADMIN_CONF', 'xadmin_conf.py')
conf_mod = import_module(xadmin_conf)
except Exception:
conf_mod = None
if conf_mod:
for key in dir(conf_mod):
setting = getattr(conf_mod, key)
try:
if issubclass(setting, Settings):
site.register_settings(setting.__name__, setting)
except Exception:
pass
from xadmin.plugins import register_builtin_plugins
register_builtin_plugins(site)
for app_config in apps.get_app_configs():
mod = import_module(app_config.name)
# Attempt to import the app's myadmin module.
try:
before_import_registry = site.copy_registry()
import_module('%s.adminx' % app_config.name)
except:
# Reset the model registry to the state before the last import as
# this import will have to reoccur on the next request and this
# could raise NotRegistered and AlreadyRegistered exceptions
# (see #8245).
site.restore_registry(before_import_registry)
# Decide whether to bubble up this error. If the app just
# doesn't have an myadmin module, we can ignore the error
# attempting to import it, otherwise we want it to bubble up.
if module_has_submodule(mod, 'adminx'):
raise
default_app_config = 'xadmin.apps.XAdminConfig'
| 33.84507
| 80
| 0.661673
|
4a1354a955034544dd9d8f8e1edf0fc881262db7
| 3,204
|
py
|
Python
|
day2/pytorch_dvc_cnn_simple_hvd.py
|
wikfeldt/intro-to-dl
|
7fb1fb6c520941143000c5e1b46c48c95db17ed6
|
[
"MIT"
] | 59
|
2018-04-27T04:34:41.000Z
|
2022-03-16T02:43:50.000Z
|
day2/pytorch_dvc_cnn_simple_hvd.py
|
wikfeldt/intro-to-dl
|
7fb1fb6c520941143000c5e1b46c48c95db17ed6
|
[
"MIT"
] | 1
|
2020-10-10T05:04:00.000Z
|
2020-10-12T08:19:38.000Z
|
day2/pytorch_dvc_cnn_simple_hvd.py
|
wikfeldt/intro-to-dl
|
7fb1fb6c520941143000c5e1b46c48c95db17ed6
|
[
"MIT"
] | 53
|
2017-04-14T09:35:04.000Z
|
2022-02-28T19:19:36.000Z
|
# coding: utf-8
# Dogs-vs-cats classification with CNNs
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from datetime import datetime
import horovod.torch as hvd
from pytorch_dvc_cnn_hvd import get_train_loader, get_validation_loader, get_test_loader
from pytorch_dvc_cnn_hvd import device, train, evaluate, get_tensorboard
model_file = 'dvc_simple_cnn_hvd.pt'
# Option 1: Train a small CNN from scratch
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 32, (3, 3))
self.pool1 = nn.MaxPool2d((2, 2))
self.conv2 = nn.Conv2d(32, 32, (3, 3))
self.pool2 = nn.MaxPool2d((2, 2))
self.conv3 = nn.Conv2d(32, 64, (3, 3))
self.pool3 = nn.MaxPool2d((2, 2))
self.fc1 = nn.Linear(17*17*64, 64)
self.fc1_drop = nn.Dropout(0.5)
self.fc2 = nn.Linear(64, 1)
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.pool1(x)
x = F.relu(self.conv2(x))
x = self.pool2(x)
x = F.relu(self.conv3(x))
x = self.pool3(x)
# "flatten" 2D to 1D
x = x.view(-1, 17*17*64)
x = F.relu(self.fc1(x))
x = self.fc1_drop(x)
return torch.sigmoid(self.fc2(x))
def train_main():
model = Net().to(device)
# optimizer = optim.SGD(model.parameters(), lr=0.05)
if hvd.rank() == 0:
print(model)
# Horovod: broadcast parameters.
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
# Horovod: scale learning rate by the number of GPUs.
lr = 0.05
optimizer = optim.SGD(model.parameters(), lr=lr * hvd.size())
# Horovod: wrap optimizer with DistributedOptimizer.
optimizer = hvd.DistributedOptimizer(optimizer,
named_parameters=model.named_parameters())
criterion = nn.BCELoss()
batch_size = 25
train_loader, train_sampler = get_train_loader(batch_size)
validation_loader, validation_sampler = get_validation_loader(batch_size)
log = get_tensorboard('simple_hvd')
epochs = 50
start_time = datetime.now()
for epoch in range(1, epochs + 1):
train(model, train_loader, train_sampler, criterion, optimizer, epoch, log)
with torch.no_grad():
if hvd.rank() == 0:
print('\nValidation:')
evaluate(model, validation_loader, validation_sampler, criterion, epoch, log)
end_time = datetime.now()
if hvd.rank() == 0:
print('Total training time: {}.'.format(end_time - start_time))
torch.save(model.state_dict(), model_file)
print('Wrote model to', model_file)
def test_main():
model = Net()
model.load_state_dict(torch.load(model_file))
model.to(device)
test_loader = get_test_loader(25)
print('=========')
print('Test set:')
with torch.no_grad():
evaluate(model, test_loader)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--test', action='store_true')
args = parser.parse_args()
if args.test:
test_main()
else:
train_main()
| 27.86087
| 89
| 0.627029
|
4a1354f4a891846927b10177b6dcff7166acce7f
| 698
|
py
|
Python
|
files/native_extensions/ctypes_mandelbrot.py
|
mipt-npm-study/sciprog-python
|
09a0d99254fcd559ec8d13bedd5521e98e86b3ce
|
[
"MIT"
] | 3
|
2021-09-12T20:54:08.000Z
|
2021-09-21T14:49:15.000Z
|
files/native_extensions/ctypes_mandelbrot.py
|
mipt-npm-study/sciprog-python
|
09a0d99254fcd559ec8d13bedd5521e98e86b3ce
|
[
"MIT"
] | null | null | null |
files/native_extensions/ctypes_mandelbrot.py
|
mipt-npm-study/sciprog-python
|
09a0d99254fcd559ec8d13bedd5521e98e86b3ce
|
[
"MIT"
] | 2
|
2021-09-14T13:15:41.000Z
|
2021-09-14T15:18:01.000Z
|
import time
from ctypes import CDLL, POINTER, c_int
import matplotlib.pyplot as plt
import numpy as np
if __name__ == '__main__':
ctypes_dll = CDLL("./libmandelbrot.so")
ctypes_dll.mandelbrot.restype = POINTER(POINTER(c_int*200)*200)
tic = time.perf_counter()
image_ctypes = ctypes_dll.mandelbrot(200, 200)
toc = time.perf_counter()
# More Correct variant
# arg = POINTER(POINTER(c_int*200)*200)
# ctypes_dll.mandelbrot(arg, 200, 200)
print(toc - tic, "s")
image = np.zeros((200, 200))
for i, item in enumerate(image_ctypes.contents):
for j, value in enumerate(item.contents):
image[i, j] = value
plt.imshow(image)
plt.show()
| 33.238095
| 67
| 0.670487
|
4a135538e9298d5420377b5d8a7e424b9f7c5057
| 35,050
|
py
|
Python
|
lib/sqlalchemy/sql/traversals.py
|
eukreign/sqlalchemy
|
18ce4f9937c2d6753acbb054b4990c7da298a5d7
|
[
"MIT"
] | null | null | null |
lib/sqlalchemy/sql/traversals.py
|
eukreign/sqlalchemy
|
18ce4f9937c2d6753acbb054b4990c7da298a5d7
|
[
"MIT"
] | null | null | null |
lib/sqlalchemy/sql/traversals.py
|
eukreign/sqlalchemy
|
18ce4f9937c2d6753acbb054b4990c7da298a5d7
|
[
"MIT"
] | 1
|
2020-05-05T21:56:23.000Z
|
2020-05-05T21:56:23.000Z
|
from collections import deque
from collections import namedtuple
import operator
from . import operators
from .visitors import ExtendedInternalTraversal
from .visitors import InternalTraversal
from .. import util
from ..inspection import inspect
from ..util import collections_abc
from ..util import HasMemoized
SKIP_TRAVERSE = util.symbol("skip_traverse")
COMPARE_FAILED = False
COMPARE_SUCCEEDED = True
NO_CACHE = util.symbol("no_cache")
CACHE_IN_PLACE = util.symbol("cache_in_place")
CALL_GEN_CACHE_KEY = util.symbol("call_gen_cache_key")
STATIC_CACHE_KEY = util.symbol("static_cache_key")
def compare(obj1, obj2, **kw):
if kw.get("use_proxies", False):
strategy = ColIdentityComparatorStrategy()
else:
strategy = TraversalComparatorStrategy()
return strategy.compare(obj1, obj2, **kw)
class HasCacheKey(HasMemoized):
_cache_key_traversal = NO_CACHE
__slots__ = ()
def _gen_cache_key(self, anon_map, bindparams):
"""return an optional cache key.
The cache key is a tuple which can contain any series of
objects that are hashable and also identifies
this object uniquely within the presence of a larger SQL expression
or statement, for the purposes of caching the resulting query.
The cache key should be based on the SQL compiled structure that would
ultimately be produced. That is, two structures that are composed in
exactly the same way should produce the same cache key; any difference
in the structures that would affect the SQL string or the type handlers
should result in a different cache key.
If a structure cannot produce a useful cache key, it should raise
NotImplementedError, which will result in the entire structure
for which it's part of not being useful as a cache key.
"""
idself = id(self)
if anon_map is not None:
if idself in anon_map:
return (anon_map[idself], self.__class__)
else:
# inline of
# id_ = anon_map[idself]
anon_map[idself] = id_ = str(anon_map.index)
anon_map.index += 1
else:
id_ = None
_cache_key_traversal = self._cache_key_traversal
if _cache_key_traversal is None:
try:
_cache_key_traversal = self._traverse_internals
except AttributeError:
_cache_key_traversal = NO_CACHE
if _cache_key_traversal is NO_CACHE:
if anon_map is not None:
anon_map[NO_CACHE] = True
return None
result = (id_, self.__class__)
# inline of _cache_key_traversal_visitor.run_generated_dispatch()
try:
dispatcher = self.__class__.__dict__[
"_generated_cache_key_traversal"
]
except KeyError:
dispatcher = _cache_key_traversal_visitor.generate_dispatch(
self, _cache_key_traversal, "_generated_cache_key_traversal"
)
for attrname, obj, meth in dispatcher(
self, _cache_key_traversal_visitor
):
if obj is not None:
if meth is CACHE_IN_PLACE:
# cache in place is always going to be a Python
# tuple, dict, list, etc. so we can do a boolean check
if obj:
result += (attrname, obj)
elif meth is STATIC_CACHE_KEY:
result += (attrname, obj._static_cache_key)
elif meth is CALL_GEN_CACHE_KEY:
result += (
attrname,
obj._gen_cache_key(anon_map, bindparams),
)
elif meth is InternalTraversal.dp_annotations_key:
# obj is here is the _annotations dict. however,
# we want to use the memoized cache key version of it.
# for Columns, this should be long lived. For select()
# statements, not so much, but they usually won't have
# annotations.
if obj:
result += self._annotations_cache_key
elif meth is InternalTraversal.dp_clauseelement_list:
if obj:
result += (
attrname,
tuple(
[
elem._gen_cache_key(anon_map, bindparams)
for elem in obj
]
),
)
else:
# note that all the "ClauseElement" standalone cases
# here have been handled by inlines above; so we can
# safely assume the object is a standard list/tuple/dict
# which we can skip if it evaluates to false.
# improvement would be to have this as a flag delivered
# up front in the dispatcher list
if obj:
result += meth(
attrname, obj, self, anon_map, bindparams
)
return result
@HasMemoized.memoized_instancemethod
def _generate_cache_key(self):
"""return a cache key.
The cache key is a tuple which can contain any series of
objects that are hashable and also identifies
this object uniquely within the presence of a larger SQL expression
or statement, for the purposes of caching the resulting query.
The cache key should be based on the SQL compiled structure that would
ultimately be produced. That is, two structures that are composed in
exactly the same way should produce the same cache key; any difference
in the structures that would affect the SQL string or the type handlers
should result in a different cache key.
The cache key returned by this method is an instance of
:class:`.CacheKey`, which consists of a tuple representing the
cache key, as well as a list of :class:`.BindParameter` objects
which are extracted from the expression. While two expressions
that produce identical cache key tuples will themselves generate
identical SQL strings, the list of :class:`.BindParameter` objects
indicates the bound values which may have different values in
each one; these bound parameters must be consulted in order to
execute the statement with the correct parameters.
a :class:`_expression.ClauseElement` structure that does not implement
a :meth:`._gen_cache_key` method and does not implement a
:attr:`.traverse_internals` attribute will not be cacheable; when
such an element is embedded into a larger structure, this method
will return None, indicating no cache key is available.
"""
bindparams = []
_anon_map = anon_map()
key = self._gen_cache_key(_anon_map, bindparams)
if NO_CACHE in _anon_map:
return None
else:
return CacheKey(key, bindparams)
class CacheKey(namedtuple("CacheKey", ["key", "bindparams"])):
def __hash__(self):
return hash(self.key)
def __eq__(self, other):
return self.key == other.key
def __str__(self):
stack = [self.key]
output = []
sentinel = object()
indent = -1
while stack:
elem = stack.pop(0)
if elem is sentinel:
output.append((" " * (indent * 2)) + "),")
indent -= 1
elif isinstance(elem, tuple):
if not elem:
output.append((" " * ((indent + 1) * 2)) + "()")
else:
indent += 1
stack = list(elem) + [sentinel] + stack
output.append((" " * (indent * 2)) + "(")
else:
if isinstance(elem, HasCacheKey):
repr_ = "<%s object at %s>" % (
type(elem).__name__,
hex(id(elem)),
)
else:
repr_ = repr(elem)
output.append((" " * (indent * 2)) + " " + repr_ + ", ")
return "CacheKey(key=%s)" % ("\n".join(output),)
def _clone(element, **kw):
return element._clone()
class _CacheKey(ExtendedInternalTraversal):
# very common elements are inlined into the main _get_cache_key() method
# to produce a dramatic savings in Python function call overhead
visit_has_cache_key = visit_clauseelement = CALL_GEN_CACHE_KEY
visit_clauseelement_list = InternalTraversal.dp_clauseelement_list
visit_annotations_key = InternalTraversal.dp_annotations_key
visit_string = (
visit_boolean
) = visit_operator = visit_plain_obj = CACHE_IN_PLACE
visit_statement_hint_list = CACHE_IN_PLACE
visit_type = STATIC_CACHE_KEY
def visit_inspectable(self, attrname, obj, parent, anon_map, bindparams):
return self.visit_has_cache_key(
attrname, inspect(obj), parent, anon_map, bindparams
)
def visit_string_list(self, attrname, obj, parent, anon_map, bindparams):
return tuple(obj)
def visit_multi(self, attrname, obj, parent, anon_map, bindparams):
return (
attrname,
obj._gen_cache_key(anon_map, bindparams)
if isinstance(obj, HasCacheKey)
else obj,
)
def visit_multi_list(self, attrname, obj, parent, anon_map, bindparams):
return (
attrname,
tuple(
elem._gen_cache_key(anon_map, bindparams)
if isinstance(elem, HasCacheKey)
else elem
for elem in obj
),
)
def visit_has_cache_key_tuples(
self, attrname, obj, parent, anon_map, bindparams
):
if not obj:
return ()
return (
attrname,
tuple(
tuple(
elem._gen_cache_key(anon_map, bindparams)
for elem in tup_elem
)
for tup_elem in obj
),
)
def visit_has_cache_key_list(
self, attrname, obj, parent, anon_map, bindparams
):
if not obj:
return ()
return (
attrname,
tuple(elem._gen_cache_key(anon_map, bindparams) for elem in obj),
)
def visit_inspectable_list(
self, attrname, obj, parent, anon_map, bindparams
):
return self.visit_has_cache_key_list(
attrname, [inspect(o) for o in obj], parent, anon_map, bindparams
)
def visit_clauseelement_tuples(
self, attrname, obj, parent, anon_map, bindparams
):
return self.visit_has_cache_key_tuples(
attrname, obj, parent, anon_map, bindparams
)
def visit_anon_name(self, attrname, obj, parent, anon_map, bindparams):
from . import elements
name = obj
if isinstance(name, elements._anonymous_label):
name = name.apply_map(anon_map)
return (attrname, name)
def visit_fromclause_ordered_set(
self, attrname, obj, parent, anon_map, bindparams
):
if not obj:
return ()
return (
attrname,
tuple([elem._gen_cache_key(anon_map, bindparams) for elem in obj]),
)
def visit_clauseelement_unordered_set(
self, attrname, obj, parent, anon_map, bindparams
):
if not obj:
return ()
cache_keys = [
elem._gen_cache_key(anon_map, bindparams) for elem in obj
]
return (
attrname,
tuple(
sorted(cache_keys)
), # cache keys all start with (id_, class)
)
def visit_named_ddl_element(
self, attrname, obj, parent, anon_map, bindparams
):
return (attrname, obj.name)
def visit_prefix_sequence(
self, attrname, obj, parent, anon_map, bindparams
):
if not obj:
return ()
return (
attrname,
tuple(
[
(clause._gen_cache_key(anon_map, bindparams), strval)
for clause, strval in obj
]
),
)
def visit_table_hint_list(
self, attrname, obj, parent, anon_map, bindparams
):
if not obj:
return ()
return (
attrname,
tuple(
[
(
clause._gen_cache_key(anon_map, bindparams),
dialect_name,
text,
)
for (clause, dialect_name), text in obj.items()
]
),
)
def visit_plain_dict(self, attrname, obj, parent, anon_map, bindparams):
return (attrname, tuple([(key, obj[key]) for key in sorted(obj)]))
def visit_dialect_options(
self, attrname, obj, parent, anon_map, bindparams
):
return (
attrname,
tuple(
(
dialect_name,
tuple(
[
(key, obj[dialect_name][key])
for key in sorted(obj[dialect_name])
]
),
)
for dialect_name in sorted(obj)
),
)
def visit_string_clauseelement_dict(
self, attrname, obj, parent, anon_map, bindparams
):
return (
attrname,
tuple(
(key, obj[key]._gen_cache_key(anon_map, bindparams))
for key in sorted(obj)
),
)
def visit_string_multi_dict(
self, attrname, obj, parent, anon_map, bindparams
):
return (
attrname,
tuple(
(
key,
value._gen_cache_key(anon_map, bindparams)
if isinstance(value, HasCacheKey)
else value,
)
for key, value in [(key, obj[key]) for key in sorted(obj)]
),
)
def visit_fromclause_canonical_column_collection(
self, attrname, obj, parent, anon_map, bindparams
):
# inlining into the internals of ColumnCollection
return (
attrname,
tuple(
col._gen_cache_key(anon_map, bindparams)
for k, col in obj._collection
),
)
def visit_unknown_structure(
self, attrname, obj, parent, anon_map, bindparams
):
anon_map[NO_CACHE] = True
return ()
def visit_dml_ordered_values(
self, attrname, obj, parent, anon_map, bindparams
):
return (
attrname,
tuple(
(
key._gen_cache_key(anon_map, bindparams)
if hasattr(key, "__clause_element__")
else key,
value._gen_cache_key(anon_map, bindparams),
)
for key, value in obj
),
)
def visit_dml_values(self, attrname, obj, parent, anon_map, bindparams):
expr_values = {k for k in obj if hasattr(k, "__clause_element__")}
if expr_values:
# expr values can't be sorted deterministically right now,
# so no cache
anon_map[NO_CACHE] = True
return ()
str_values = expr_values.symmetric_difference(obj)
return (
attrname,
tuple(
(k, obj[k]._gen_cache_key(anon_map, bindparams))
for k in sorted(str_values)
),
)
def visit_dml_multi_values(
self, attrname, obj, parent, anon_map, bindparams
):
# multivalues are simply not cacheable right now
anon_map[NO_CACHE] = True
return ()
_cache_key_traversal_visitor = _CacheKey()
class _CopyInternals(InternalTraversal):
"""Generate a _copy_internals internal traversal dispatch for classes
with a _traverse_internals collection."""
def visit_clauseelement(self, parent, element, clone=_clone, **kw):
return clone(element, **kw)
def visit_clauseelement_list(self, parent, element, clone=_clone, **kw):
return [clone(clause, **kw) for clause in element]
def visit_clauseelement_unordered_set(
self, parent, element, clone=_clone, **kw
):
return {clone(clause, **kw) for clause in element}
def visit_clauseelement_tuples(self, parent, element, clone=_clone, **kw):
return [
tuple(clone(tup_elem, **kw) for tup_elem in elem)
for elem in element
]
def visit_string_clauseelement_dict(
self, parent, element, clone=_clone, **kw
):
return dict(
(key, clone(value, **kw)) for key, value in element.items()
)
def visit_dml_ordered_values(self, parent, element, clone=_clone, **kw):
# sequence of 2-tuples
return [
(
clone(key, **kw)
if hasattr(key, "__clause_element__")
else key,
clone(value, **kw),
)
for key, value in element
]
def visit_dml_values(self, parent, element, clone=_clone, **kw):
return {
(
clone(key, **kw) if hasattr(key, "__clause_element__") else key
): clone(value, **kw)
for key, value in element.items()
}
def visit_dml_multi_values(self, parent, element, clone=_clone, **kw):
# sequence of sequences, each sequence contains a list/dict/tuple
def copy(elem):
if isinstance(elem, (list, tuple)):
return [
clone(value, **kw)
if hasattr(value, "__clause_element__")
else value
for value in elem
]
elif isinstance(elem, dict):
return {
(
clone(key, **kw)
if hasattr(key, "__clause_element__")
else key
): (
clone(value, **kw)
if hasattr(value, "__clause_element__")
else value
)
for key, value in elem.items()
}
else:
# TODO: use abc classes
assert False
return [
[copy(sub_element) for sub_element in sequence]
for sequence in element
]
_copy_internals = _CopyInternals()
class _GetChildren(InternalTraversal):
"""Generate a _children_traversal internal traversal dispatch for classes
with a _traverse_internals collection."""
def visit_has_cache_key(self, element, **kw):
return (element,)
def visit_clauseelement(self, element, **kw):
return (element,)
def visit_clauseelement_list(self, element, **kw):
return tuple(element)
def visit_clauseelement_tuples(self, element, **kw):
tup = ()
for elem in element:
tup += elem
return tup
def visit_fromclause_canonical_column_collection(self, element, **kw):
if kw.get("column_collections", False):
return tuple(element)
else:
return ()
def visit_string_clauseelement_dict(self, element, **kw):
return tuple(element.values())
def visit_fromclause_ordered_set(self, element, **kw):
return tuple(element)
def visit_clauseelement_unordered_set(self, element, **kw):
return tuple(element)
def visit_dml_ordered_values(self, element, **kw):
for k, v in element:
if hasattr(k, "__clause_element__"):
yield k
yield v
def visit_dml_values(self, element, **kw):
expr_values = {k for k in element if hasattr(k, "__clause_element__")}
str_values = expr_values.symmetric_difference(element)
for k in sorted(str_values):
yield element[k]
for k in expr_values:
yield k
yield element[k]
def visit_dml_multi_values(self, element, **kw):
return ()
_get_children = _GetChildren()
@util.preload_module("sqlalchemy.sql.elements")
def _resolve_name_for_compare(element, name, anon_map, **kw):
if isinstance(name, util.preloaded.sql_elements._anonymous_label):
name = name.apply_map(anon_map)
return name
class anon_map(dict):
"""A map that creates new keys for missing key access.
Produces an incrementing sequence given a series of unique keys.
This is similar to the compiler prefix_anon_map class although simpler.
Inlines the approach taken by :class:`sqlalchemy.util.PopulateDict` which
is otherwise usually used for this type of operation.
"""
def __init__(self):
self.index = 0
def __missing__(self, key):
self[key] = val = str(self.index)
self.index += 1
return val
class TraversalComparatorStrategy(InternalTraversal, util.MemoizedSlots):
__slots__ = "stack", "cache", "anon_map"
def __init__(self):
self.stack = deque()
self.cache = set()
def _memoized_attr_anon_map(self):
return (anon_map(), anon_map())
def compare(self, obj1, obj2, **kw):
stack = self.stack
cache = self.cache
compare_annotations = kw.get("compare_annotations", False)
stack.append((obj1, obj2))
while stack:
left, right = stack.popleft()
if left is right:
continue
elif left is None or right is None:
# we know they are different so no match
return False
elif (left, right) in cache:
continue
cache.add((left, right))
visit_name = left.__visit_name__
if visit_name != right.__visit_name__:
return False
meth = getattr(self, "compare_%s" % visit_name, None)
if meth:
attributes_compared = meth(left, right, **kw)
if attributes_compared is COMPARE_FAILED:
return False
elif attributes_compared is SKIP_TRAVERSE:
continue
# attributes_compared is returned as a list of attribute
# names that were "handled" by the comparison method above.
# remaining attribute names in the _traverse_internals
# will be compared.
else:
attributes_compared = ()
for (
(left_attrname, left_visit_sym),
(right_attrname, right_visit_sym),
) in util.zip_longest(
left._traverse_internals,
right._traverse_internals,
fillvalue=(None, None),
):
if not compare_annotations and (
(left_attrname == "_annotations")
or (right_attrname == "_annotations")
):
continue
if (
left_attrname != right_attrname
or left_visit_sym is not right_visit_sym
):
return False
elif left_attrname in attributes_compared:
continue
dispatch = self.dispatch(left_visit_sym)
left_child = operator.attrgetter(left_attrname)(left)
right_child = operator.attrgetter(right_attrname)(right)
if left_child is None:
if right_child is not None:
return False
else:
continue
comparison = dispatch(
left, left_child, right, right_child, **kw
)
if comparison is COMPARE_FAILED:
return False
return True
def compare_inner(self, obj1, obj2, **kw):
comparator = self.__class__()
return comparator.compare(obj1, obj2, **kw)
def visit_has_cache_key(
self, left_parent, left, right_parent, right, **kw
):
if left._gen_cache_key(self.anon_map[0], []) != right._gen_cache_key(
self.anon_map[1], []
):
return COMPARE_FAILED
def visit_clauseelement(
self, left_parent, left, right_parent, right, **kw
):
self.stack.append((left, right))
def visit_fromclause_canonical_column_collection(
self, left_parent, left, right_parent, right, **kw
):
for lcol, rcol in util.zip_longest(left, right, fillvalue=None):
self.stack.append((lcol, rcol))
def visit_fromclause_derived_column_collection(
self, left_parent, left, right_parent, right, **kw
):
pass
def visit_string_clauseelement_dict(
self, left_parent, left, right_parent, right, **kw
):
for lstr, rstr in util.zip_longest(
sorted(left), sorted(right), fillvalue=None
):
if lstr != rstr:
return COMPARE_FAILED
self.stack.append((left[lstr], right[rstr]))
def visit_clauseelement_tuples(
self, left_parent, left, right_parent, right, **kw
):
for ltup, rtup in util.zip_longest(left, right, fillvalue=None):
if ltup is None or rtup is None:
return COMPARE_FAILED
for l, r in util.zip_longest(ltup, rtup, fillvalue=None):
self.stack.append((l, r))
def visit_clauseelement_list(
self, left_parent, left, right_parent, right, **kw
):
for l, r in util.zip_longest(left, right, fillvalue=None):
self.stack.append((l, r))
def _compare_unordered_sequences(self, seq1, seq2, **kw):
if seq1 is None:
return seq2 is None
completed = set()
for clause in seq1:
for other_clause in set(seq2).difference(completed):
if self.compare_inner(clause, other_clause, **kw):
completed.add(other_clause)
break
return len(completed) == len(seq1) == len(seq2)
def visit_clauseelement_unordered_set(
self, left_parent, left, right_parent, right, **kw
):
return self._compare_unordered_sequences(left, right, **kw)
def visit_fromclause_ordered_set(
self, left_parent, left, right_parent, right, **kw
):
for l, r in util.zip_longest(left, right, fillvalue=None):
self.stack.append((l, r))
def visit_string(self, left_parent, left, right_parent, right, **kw):
return left == right
def visit_string_list(self, left_parent, left, right_parent, right, **kw):
return left == right
def visit_anon_name(self, left_parent, left, right_parent, right, **kw):
return _resolve_name_for_compare(
left_parent, left, self.anon_map[0], **kw
) == _resolve_name_for_compare(
right_parent, right, self.anon_map[1], **kw
)
def visit_boolean(self, left_parent, left, right_parent, right, **kw):
return left == right
def visit_operator(self, left_parent, left, right_parent, right, **kw):
return left is right
def visit_type(self, left_parent, left, right_parent, right, **kw):
return left._compare_type_affinity(right)
def visit_plain_dict(self, left_parent, left, right_parent, right, **kw):
return left == right
def visit_dialect_options(
self, left_parent, left, right_parent, right, **kw
):
return left == right
def visit_annotations_key(
self, left_parent, left, right_parent, right, **kw
):
if left and right:
return (
left_parent._annotations_cache_key
== right_parent._annotations_cache_key
)
else:
return left == right
def visit_plain_obj(self, left_parent, left, right_parent, right, **kw):
return left == right
def visit_named_ddl_element(
self, left_parent, left, right_parent, right, **kw
):
if left is None:
if right is not None:
return COMPARE_FAILED
return left.name == right.name
def visit_prefix_sequence(
self, left_parent, left, right_parent, right, **kw
):
for (l_clause, l_str), (r_clause, r_str) in util.zip_longest(
left, right, fillvalue=(None, None)
):
if l_str != r_str:
return COMPARE_FAILED
else:
self.stack.append((l_clause, r_clause))
def visit_table_hint_list(
self, left_parent, left, right_parent, right, **kw
):
left_keys = sorted(left, key=lambda elem: (elem[0].fullname, elem[1]))
right_keys = sorted(
right, key=lambda elem: (elem[0].fullname, elem[1])
)
for (ltable, ldialect), (rtable, rdialect) in util.zip_longest(
left_keys, right_keys, fillvalue=(None, None)
):
if ldialect != rdialect:
return COMPARE_FAILED
elif left[(ltable, ldialect)] != right[(rtable, rdialect)]:
return COMPARE_FAILED
else:
self.stack.append((ltable, rtable))
def visit_statement_hint_list(
self, left_parent, left, right_parent, right, **kw
):
return left == right
def visit_unknown_structure(
self, left_parent, left, right_parent, right, **kw
):
raise NotImplementedError()
def visit_dml_ordered_values(
self, left_parent, left, right_parent, right, **kw
):
# sequence of tuple pairs
for (lk, lv), (rk, rv) in util.zip_longest(
left, right, fillvalue=(None, None)
):
if not self._compare_dml_values_or_ce(lk, rk, **kw):
return COMPARE_FAILED
def _compare_dml_values_or_ce(self, lv, rv, **kw):
lvce = hasattr(lv, "__clause_element__")
rvce = hasattr(rv, "__clause_element__")
if lvce != rvce:
return False
elif lvce and not self.compare_inner(lv, rv, **kw):
return False
elif not lvce and lv != rv:
return False
elif not self.compare_inner(lv, rv, **kw):
return False
return True
def visit_dml_values(self, left_parent, left, right_parent, right, **kw):
if left is None or right is None or len(left) != len(right):
return COMPARE_FAILED
if isinstance(left, collections_abc.Sequence):
for lv, rv in zip(left, right):
if not self._compare_dml_values_or_ce(lv, rv, **kw):
return COMPARE_FAILED
else:
for lk in left:
lv = left[lk]
if lk not in right:
return COMPARE_FAILED
rv = right[lk]
if not self._compare_dml_values_or_ce(lv, rv, **kw):
return COMPARE_FAILED
def visit_dml_multi_values(
self, left_parent, left, right_parent, right, **kw
):
for lseq, rseq in util.zip_longest(left, right, fillvalue=None):
if lseq is None or rseq is None:
return COMPARE_FAILED
for ld, rd in util.zip_longest(lseq, rseq, fillvalue=None):
if (
self.visit_dml_values(
left_parent, ld, right_parent, rd, **kw
)
is COMPARE_FAILED
):
return COMPARE_FAILED
def compare_clauselist(self, left, right, **kw):
if left.operator is right.operator:
if operators.is_associative(left.operator):
if self._compare_unordered_sequences(
left.clauses, right.clauses, **kw
):
return ["operator", "clauses"]
else:
return COMPARE_FAILED
else:
return ["operator"]
else:
return COMPARE_FAILED
def compare_binary(self, left, right, **kw):
if left.operator == right.operator:
if operators.is_commutative(left.operator):
if (
self.compare_inner(left.left, right.left, **kw)
and self.compare_inner(left.right, right.right, **kw)
) or (
self.compare_inner(left.left, right.right, **kw)
and self.compare_inner(left.right, right.left, **kw)
):
return ["operator", "negate", "left", "right"]
else:
return COMPARE_FAILED
else:
return ["operator", "negate"]
else:
return COMPARE_FAILED
def compare_bindparam(self, left, right, **kw):
compare_values = kw.pop("compare_values", True)
if compare_values:
return []
else:
# this means, "skip these, we already compared"
return ["callable", "value"]
class ColIdentityComparatorStrategy(TraversalComparatorStrategy):
def compare_column_element(
self, left, right, use_proxies=True, equivalents=(), **kw
):
"""Compare ColumnElements using proxies and equivalent collections.
This is a comparison strategy specific to the ORM.
"""
to_compare = (right,)
if equivalents and right in equivalents:
to_compare = equivalents[right].union(to_compare)
for oth in to_compare:
if use_proxies and left.shares_lineage(oth):
return SKIP_TRAVERSE
elif hash(left) == hash(right):
return SKIP_TRAVERSE
else:
return COMPARE_FAILED
def compare_column(self, left, right, **kw):
return self.compare_column_element(left, right, **kw)
def compare_label(self, left, right, **kw):
return self.compare_column_element(left, right, **kw)
def compare_table(self, left, right, **kw):
# tables compare on identity, since it's not really feasible to
# compare them column by column with the above rules
return SKIP_TRAVERSE if left is right else COMPARE_FAILED
| 33.191288
| 79
| 0.561227
|
4a13557a115409a096ed41e7cf7dcfe04a4118ed
| 134
|
py
|
Python
|
app/eventFrameAttributeTemplates/__init__.py
|
DeschutesBrewery/brewerypi
|
5459dfc6b1ed415920c13a8a7c9a2d3d3c82099f
|
[
"MIT"
] | 27
|
2017-11-27T05:01:05.000Z
|
2020-11-14T19:52:26.000Z
|
app/eventFrameAttributeTemplates/__init__.py
|
DeschutesBrewery/brewerypi
|
5459dfc6b1ed415920c13a8a7c9a2d3d3c82099f
|
[
"MIT"
] | 259
|
2017-11-23T00:43:26.000Z
|
2020-11-03T01:07:30.000Z
|
app/eventFrameAttributeTemplates/__init__.py
|
DeschutesBrewery/brewerypi
|
5459dfc6b1ed415920c13a8a7c9a2d3d3c82099f
|
[
"MIT"
] | 8
|
2018-10-29T04:39:29.000Z
|
2020-10-01T22:18:12.000Z
|
from flask import Blueprint
eventFrameAttributeTemplates = Blueprint("eventFrameAttributeTemplates", __name__)
from . import routes
| 22.333333
| 82
| 0.843284
|
4a135618a7d842f96b8bba06bf2ab3dd80a9311a
| 1,631
|
py
|
Python
|
BondsPractitioners/spiders/ajzq.py
|
hahadaxia/BondsPractitioners
|
a8d5ac82f3385ae5eabeb4e710b6807bf31ccdda
|
[
"MIT"
] | null | null | null |
BondsPractitioners/spiders/ajzq.py
|
hahadaxia/BondsPractitioners
|
a8d5ac82f3385ae5eabeb4e710b6807bf31ccdda
|
[
"MIT"
] | 5
|
2021-03-29T19:44:28.000Z
|
2022-03-02T15:14:55.000Z
|
BondsPractitioners/spiders/ajzq.py
|
hahadaxia/BondsPractitioners
|
a8d5ac82f3385ae5eabeb4e710b6807bf31ccdda
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import scrapy
import json
from scrapy import Selector
from BondsPractitioners.spiders import set_item
class AjzqSpider(scrapy.Spider):
name = 'ajzq'
allowed_domains = ['ajzq.com']
start_urls = ['http://www.ajzq.com/service/mainArticle/15453']
com_name = '爱建证券有限责任公司'
author = 'huangkai'
def parse(self, response):
res = json.loads(response.text)['data']['content']
tables = Selector(text=res).xpath('//table')
job = 'job'
# 处理在职业务人员
for tr in tables[0].xpath('.//tr')[1:]:
td = tr.xpath('.//td/p/span/text()').getall()
if len(td) == 4:
job = td[0]
if 3 <= len(td) <= 4:
yield set_item(['com', 'kind', 'state', 'job', 'name', 'dpt', 'duty'],
[self.com_name, '前台', '在职', job] + td[-3:])
# 处理在职中后台人员
for tr in tables[1].xpath('.//tr')[1:]:
td = tr.xpath('.//td/p/span/text()').getall()
if len(td) == 5:
job = td[0]
if 4 <= len(td) <= 5:
yield set_item(['com', 'kind', 'state', 'job', 'name', 'dpt', 'duty', 'phone'],
[self.com_name, '中后台', '在职', job] + td[-4:])
# 处理离职人员
for tr in tables[2].xpath('.//tr')[1:]:
td = tr.xpath('.//td/p/span/text()').getall()
if len(td) == 4:
job = td[0]
if 3 <= len(td) <= 4:
yield set_item(['com', 'state', 'name', 'ldate', 'dpt', 'duty'],
[self.com_name, '离职', job] + td[-3:])
| 35.456522
| 95
| 0.458614
|
4a13578af592cd7c80f526c50291ba29100e5062
| 933
|
py
|
Python
|
maluforce/validators.py
|
rmcferrao/maluforce
|
12c776dc129c8d778086e22fd8ad9de996816081
|
[
"MIT"
] | null | null | null |
maluforce/validators.py
|
rmcferrao/maluforce
|
12c776dc129c8d778086e22fd8ad9de996816081
|
[
"MIT"
] | null | null | null |
maluforce/validators.py
|
rmcferrao/maluforce
|
12c776dc129c8d778086e22fd8ad9de996816081
|
[
"MIT"
] | null | null | null |
import re
import os
def validId(id):
"""
[input]
* id - str, list with the affiliation_id to be evaluated
[output]
* list - ['Pagar.me','Mundi','Stone',None]
"""
strFlag = False
if type(id) is str:
id = [id]
strFlag = True
out = [None] * len(id)
re_map = {"Pagar.me":"[a-f\\d]{24}", "Mundi":"[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}","Stone":"[\\d]+"}
for index, value in enumerate(id):
for re_test in re_map:
if bool(re.fullmatch(re_map[re_test],value)):
out[index] = re_test
return out[0] if strFlag else out
def fixCNPJ(cnpj, n):
b = str(cnpj)
while len(b) < n:
b = "0" + b
return b
def path_formatter(path):
if path is not None:
if path[-1] != "/":
path+='/'
else:
path = os.getcwd() + "/"
return path
| 25.916667
| 144
| 0.493033
|
4a13589b6ef7b4a438327a1de524a4d91ef80211
| 5,908
|
py
|
Python
|
tensorflow/python/util/tf_decorator.py
|
TTrapper/tensorflow
|
64f0ebd33a7c868da3c8f1ea15adf358c578f227
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/util/tf_decorator.py
|
TTrapper/tensorflow
|
64f0ebd33a7c868da3c8f1ea15adf358c578f227
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/util/tf_decorator.py
|
TTrapper/tensorflow
|
64f0ebd33a7c868da3c8f1ea15adf358c578f227
|
[
"Apache-2.0"
] | 1
|
2019-01-08T07:16:49.000Z
|
2019-01-08T07:16:49.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base TFDecorator class and utility functions for working with decorators.
There are two ways to create decorators that TensorFlow can introspect into.
This is important for documentation generation purposes, so that function
signatures aren't obscured by the (*args, **kwds) signature that decorators
often provide.
1. Call `tf_decorator.make_decorator` on your wrapper function. If your
decorator is stateless, or can capture all of the variables it needs to work
with through lexical closure, this is the simplest option. Create your wrapper
function as usual, but instead of returning it, return
`tf_decorator.make_decorator(your_wrapper)`. This will attach some decorator
introspection metadata onto your wrapper and return it.
Example:
def print_hello_before_calling(target):
def wrapper(*args, **kwargs):
print('hello')
return target(*args, **kwargs)
return tf_decorator.make_decorator(wrapper)
2. Derive from TFDecorator. If your decorator needs to be stateful, you can
implement it in terms of a TFDecorator. Store whatever state you need in your
derived class, and implement the `__call__` method to do your work before
calling into your target. You can retrieve the target via
`super(MyDecoratorClass, self).decorated_target`, and call it with whatever
parameters it needs.
Example:
class CallCounter(tf_decorator.TFDecorator):
def __init__(self, target):
super(CallCounter, self).__init__('count_calls', target)
self.call_count = 0
def __call__(self, *args, **kwargs):
self.call_count += 1
return super(CallCounter, self).decorated_target(*args, **kwargs)
def count_calls(target):
return CallCounter(target)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools as _functools
import traceback as _traceback
def make_decorator(target,
decorator_func,
decorator_name=None,
decorator_doc='',
decorator_argspec=None):
"""Make a decorator from a wrapper and a target.
Args:
target: The final callable to be wrapped.
decorator_func: The wrapper function.
decorator_name: The name of the decorator. If `None`, the name of the
function calling make_decorator.
decorator_doc: Documentation specific to this application of
`decorator_func` to `target`.
decorator_argspec: The new callable signature of this decorator.
Returns:
The `decorator_func` argument with new metadata attached.
"""
if decorator_name is None:
frame = _traceback.extract_stack(limit=2)[0]
# frame name is tuple[2] in python2, and object.name in python3
decorator_name = getattr(frame, 'name', frame[2]) # Caller's name
decorator = TFDecorator(decorator_name, target, decorator_doc,
decorator_argspec)
setattr(decorator_func, '_tf_decorator', decorator)
decorator_func.__name__ = target.__name__
decorator_func.__module__ = target.__module__
decorator_func.__doc__ = decorator.__doc__
decorator_func.__wrapped__ = target
return decorator_func
def unwrap(maybe_tf_decorator):
"""Unwraps an object into a list of TFDecorators and a final target.
Args:
maybe_tf_decorator: Any callable object.
Returns:
A tuple whose first element is an list of TFDecorator-derived objects that
were applied to the final callable target, and whose second element is the
final undecorated callable target. If the `maybe_tf_decorator` parameter is
not decorated by any TFDecorators, the first tuple element will be an empty
list. The `TFDecorator` list is ordered from outermost to innermost
decorators.
"""
decorators = []
cur = maybe_tf_decorator
while True:
if isinstance(cur, TFDecorator):
decorators.append(cur)
elif hasattr(cur, '_tf_decorator'):
decorators.append(getattr(cur, '_tf_decorator'))
else:
break
cur = decorators[-1].decorated_target
return decorators, cur
class TFDecorator(object):
"""Base class for all TensorFlow decorators.
TFDecorator captures and exposes the wrapped target, and provides details
about the current decorator.
"""
def __init__(self,
decorator_name,
target,
decorator_doc='',
decorator_argspec=None):
self._decorated_target = target
self._decorator_name = decorator_name
self._decorator_doc = decorator_doc
self._decorator_argspec = decorator_argspec
self.__name__ = target.__name__
if self._decorator_doc:
self.__doc__ = self._decorator_doc
elif target.__doc__:
self.__doc__ = target.__doc__
else:
self.__doc__ = ''
def __get__(self, obj, objtype):
return _functools.partial(self.__call__, obj)
def __call__(self, *args, **kwargs):
return self._decorated_target(*args, **kwargs)
@property
def decorated_target(self):
return self._decorated_target
@property
def decorator_name(self):
return self._decorator_name
@property
def decorator_doc(self):
return self._decorator_doc
@property
def decorator_argspec(self):
return self._decorator_argspec
| 34.549708
| 80
| 0.724949
|
4a1359255286102e4d5111ee3bd93ea75b79b482
| 87,054
|
py
|
Python
|
pygem/glacierdynamics.py
|
Wang518hongyu/PyGEM
|
1c9fa133133b3d463b1383d4792c535fa61c5b8d
|
[
"MIT"
] | 25
|
2019-06-12T21:08:24.000Z
|
2022-03-01T08:05:14.000Z
|
pygem/glacierdynamics.py
|
Wang518hongyu/PyGEM
|
1c9fa133133b3d463b1383d4792c535fa61c5b8d
|
[
"MIT"
] | 2
|
2020-04-23T14:08:00.000Z
|
2020-06-04T13:52:44.000Z
|
pygem/glacierdynamics.py
|
Wang518hongyu/PyGEM
|
1c9fa133133b3d463b1383d4792c535fa61c5b8d
|
[
"MIT"
] | 24
|
2019-06-12T19:48:40.000Z
|
2022-02-16T03:42:53.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 3 14:00:14 2020
@author: davidrounce
"""
from collections import OrderedDict
from time import gmtime, strftime
import numpy as np
#import pandas as pd
#import netCDF4
import xarray as xr
from oggm import cfg, utils
from oggm.core.flowline import FlowlineModel
from oggm.exceptions import InvalidParamsError
from oggm import __version__
import pygem.pygem_input as pygem_prms
cfg.initialize()
#%%
class MassRedistributionCurveModel(FlowlineModel):
"""Glacier geometry updated using mass redistribution curves; also known as the "delta-h method"
This uses mass redistribution curves from Huss et al. (2010) to update the glacier geometry
"""
def __init__(self, flowlines, mb_model=None, y0=0.,
inplace=False,
debug=True,
option_areaconstant=False, spinupyears=pygem_prms.ref_spinupyears,
constantarea_years=pygem_prms.constantarea_years,
**kwargs):
""" Instanciate the model.
Parameters
----------
flowlines : list
the glacier flowlines
mb_model : MassBalanceModel
the mass-balance model
y0 : int
initial year of the simulation
inplace : bool
whether or not to make a copy of the flowline objects for the run
setting to True implies that your objects will be modified at run
time by the model (can help to spare memory)
is_tidewater: bool, default: False
use the very basic parameterization for tidewater glaciers
mb_elev_feedback : str, default: 'annual'
'never', 'always', 'annual', or 'monthly': how often the
mass-balance should be recomputed from the mass balance model.
'Never' is equivalent to 'annual' but without elevation feedback
at all (the heights are taken from the first call).
check_for_boundaries: bool, default: True
raise an error when the glacier grows bigger than the domain
boundaries
"""
super(MassRedistributionCurveModel, self).__init__(flowlines, mb_model=mb_model, y0=y0, inplace=inplace,
mb_elev_feedback='annual', **kwargs)
self.option_areaconstant = option_areaconstant
self.constantarea_years = constantarea_years
self.spinupyears = spinupyears
self.glac_idx_initial = [fl.thick.nonzero()[0] for fl in flowlines]
self.y0 = 0
# widths_t0 = flowlines[0].widths_m
# area_v1 = widths_t0 * flowlines[0].dx_meter
# print('area v1:', area_v1.sum())
# area_v2 = np.copy(area_v1)
# area_v2[flowlines[0].thick == 0] = 0
# print('area v2:', area_v2.sum())
# HERE IS THE STUFF TO RECORD FOR EACH FLOWLINE!
self.calving_m3_since_y0 = 0. # total calving since time y0
assert len(flowlines) == 1, 'MassRedistributionCurveModel is not set up for multiple flowlines'
def run_until(self, y1, run_single_year=False):
"""Runs the model from the current year up to a given year date y1.
This function runs the model for the time difference y1-self.y0
If self.y0 has not been specified at some point, it is 0 and y1 will
be the time span in years to run the model for.
Parameters
----------
y1 : float
Upper time span for how long the model should run
"""
# We force timesteps to yearly timesteps
if run_single_year:
self.updategeometry(y1)
else:
years = np.arange(self.yr, y1)
for year in years:
self.updategeometry(year)
# Check for domain bounds
if self.check_for_boundaries:
if self.fls[-1].thick[-1] > 10:
raise RuntimeError('Glacier exceeds domain boundaries, '
'at year: {}'.format(self.yr))
# Check for NaNs
for fl in self.fls:
if np.any(~np.isfinite(fl.thick)):
raise FloatingPointError('NaN in numerical solution.')
def run_until_and_store(self, y1, run_path=None, diag_path=None,
store_monthly_step=None):
"""Runs the model and returns intermediate steps in xarray datasets.
This function repeatedly calls FlowlineModel.run_until for either
monthly or yearly time steps up till the upper time boundary y1.
Parameters
----------
y1 : int
Upper time span for how long the model should run (needs to be
a full year)
run_path : str
Path and filename where to store the model run dataset
diag_path : str
Path and filename where to store the model diagnostics dataset
store_monthly_step : Bool
If True (False) model diagnostics will be stored monthly (yearly).
If unspecified, we follow the update of the MB model, which
defaults to yearly (see __init__).
Returns
-------
run_ds : xarray.Dataset
stores the entire glacier geometry. It is useful to visualize the
glacier geometry or to restart a new run from a modelled geometry.
The glacier state is stored at the begining of each hydrological
year (not in between in order to spare disk space).
diag_ds : xarray.Dataset
stores a few diagnostic variables such as the volume, area, length
and ELA of the glacier.
"""
if int(y1) != y1:
raise InvalidParamsError('run_until_and_store only accepts '
'integer year dates.')
if not self.mb_model.hemisphere:
raise InvalidParamsError('run_until_and_store needs a '
'mass-balance model with an unambiguous '
'hemisphere.')
# time
yearly_time = np.arange(np.floor(self.yr), np.floor(y1)+1)
if store_monthly_step is None:
store_monthly_step = self.mb_step == 'monthly'
if store_monthly_step:
monthly_time = utils.monthly_timeseries(self.yr, y1)
else:
monthly_time = np.arange(np.floor(self.yr), np.floor(y1)+1)
sm = cfg.PARAMS['hydro_month_' + self.mb_model.hemisphere]
yrs, months = utils.floatyear_to_date(monthly_time)
cyrs, cmonths = utils.hydrodate_to_calendardate(yrs, months,
start_month=sm)
# init output
if run_path is not None:
self.to_netcdf(run_path)
ny = len(yearly_time)
if ny == 1:
yrs = [yrs]
cyrs = [cyrs]
months = [months]
cmonths = [cmonths]
nm = len(monthly_time)
sects = [(np.zeros((ny, fl.nx)) * np.NaN) for fl in self.fls]
widths = [(np.zeros((ny, fl.nx)) * np.NaN) for fl in self.fls]
bucket = [(np.zeros(ny) * np.NaN) for _ in self.fls]
diag_ds = xr.Dataset()
# Global attributes
diag_ds.attrs['description'] = 'OGGM model output'
diag_ds.attrs['oggm_version'] = __version__
diag_ds.attrs['calendar'] = '365-day no leap'
diag_ds.attrs['creation_date'] = strftime("%Y-%m-%d %H:%M:%S",
gmtime())
diag_ds.attrs['hemisphere'] = self.mb_model.hemisphere
diag_ds.attrs['water_level'] = self.water_level
# Coordinates
diag_ds.coords['time'] = ('time', monthly_time)
diag_ds.coords['hydro_year'] = ('time', yrs)
diag_ds.coords['hydro_month'] = ('time', months)
diag_ds.coords['calendar_year'] = ('time', cyrs)
diag_ds.coords['calendar_month'] = ('time', cmonths)
diag_ds['time'].attrs['description'] = 'Floating hydrological year'
diag_ds['hydro_year'].attrs['description'] = 'Hydrological year'
diag_ds['hydro_month'].attrs['description'] = 'Hydrological month'
diag_ds['calendar_year'].attrs['description'] = 'Calendar year'
diag_ds['calendar_month'].attrs['description'] = 'Calendar month'
# Variables and attributes
diag_ds['volume_m3'] = ('time', np.zeros(nm) * np.NaN)
diag_ds['volume_m3'].attrs['description'] = 'Total glacier volume'
diag_ds['volume_m3'].attrs['unit'] = 'm 3'
if self.is_marine_terminating:
diag_ds['volume_bsl_m3'] = ('time', np.zeros(nm) * np.NaN)
diag_ds['volume_bsl_m3'].attrs['description'] = ('Glacier volume '
'below '
'sea-level')
diag_ds['volume_bsl_m3'].attrs['unit'] = 'm 3'
diag_ds['volume_bwl_m3'] = ('time', np.zeros(nm) * np.NaN)
diag_ds['volume_bwl_m3'].attrs['description'] = ('Glacier volume '
'below '
'water-level')
diag_ds['volume_bwl_m3'].attrs['unit'] = 'm 3'
diag_ds['area_m2'] = ('time', np.zeros(nm) * np.NaN)
diag_ds['area_m2'].attrs['description'] = 'Total glacier area'
diag_ds['area_m2'].attrs['unit'] = 'm 2'
diag_ds['length_m'] = ('time', np.zeros(nm) * np.NaN)
diag_ds['length_m'].attrs['description'] = 'Glacier length'
diag_ds['length_m'].attrs['unit'] = 'm 3'
diag_ds['ela_m'] = ('time', np.zeros(nm) * np.NaN)
diag_ds['ela_m'].attrs['description'] = ('Annual Equilibrium Line '
'Altitude (ELA)')
diag_ds['ela_m'].attrs['unit'] = 'm a.s.l'
if self.is_tidewater:
diag_ds['calving_m3'] = ('time', np.zeros(nm) * np.NaN)
diag_ds['calving_m3'].attrs['description'] = ('Total accumulated '
'calving flux')
diag_ds['calving_m3'].attrs['unit'] = 'm 3'
diag_ds['calving_rate_myr'] = ('time', np.zeros(nm) * np.NaN)
diag_ds['calving_rate_myr'].attrs['description'] = 'Calving rate'
diag_ds['calving_rate_myr'].attrs['unit'] = 'm yr-1'
# Run
j = 0
for i, (yr, mo) in enumerate(zip(yearly_time[:-1], months[:-1])):
# Record initial parameters
if i == 0:
diag_ds['volume_m3'].data[i] = self.volume_m3
diag_ds['area_m2'].data[i] = self.area_m2
diag_ds['length_m'].data[i] = self.length_m
self.run_until(yr, run_single_year=True)
# Model run
if mo == 1:
for s, w, b, fl in zip(sects, widths, bucket, self.fls):
s[j, :] = fl.section
w[j, :] = fl.widths_m
if self.is_tidewater:
try:
b[j] = fl.calving_bucket_m3
except AttributeError:
pass
j += 1
# Diagnostics
diag_ds['volume_m3'].data[i+1] = self.volume_m3
diag_ds['area_m2'].data[i+1] = self.area_m2
diag_ds['length_m'].data[i+1] = self.length_m
if self.is_tidewater:
diag_ds['calving_m3'].data[i] = self.calving_m3_since_y0
diag_ds['calving_rate_myr'].data[i] = self.calving_rate_myr
if self.is_marine_terminating:
diag_ds['volume_bsl_m3'].data[i] = self.volume_bsl_m3
diag_ds['volume_bwl_m3'].data[i] = self.volume_bwl_m3
# to datasets
run_ds = []
for (s, w, b) in zip(sects, widths, bucket):
ds = xr.Dataset()
ds.attrs['description'] = 'OGGM model output'
ds.attrs['oggm_version'] = __version__
ds.attrs['calendar'] = '365-day no leap'
ds.attrs['creation_date'] = strftime("%Y-%m-%d %H:%M:%S",
gmtime())
ds.coords['time'] = yearly_time
ds['time'].attrs['description'] = 'Floating hydrological year'
varcoords = OrderedDict(time=('time', yearly_time),
year=('time', yearly_time))
ds['ts_section'] = xr.DataArray(s, dims=('time', 'x'),
coords=varcoords)
ds['ts_width_m'] = xr.DataArray(w, dims=('time', 'x'),
coords=varcoords)
if self.is_tidewater:
ds['ts_calving_bucket_m3'] = xr.DataArray(b, dims=('time', ),
coords=varcoords)
run_ds.append(ds)
# write output?
if run_path is not None:
encode = {'ts_section': {'zlib': True, 'complevel': 5},
'ts_width_m': {'zlib': True, 'complevel': 5},
}
for i, ds in enumerate(run_ds):
ds.to_netcdf(run_path, 'a', group='fl_{}'.format(i),
encoding=encode)
# Add other diagnostics
diag_ds.to_netcdf(run_path, 'a')
if diag_path is not None:
diag_ds.to_netcdf(diag_path)
return run_ds, diag_ds
def updategeometry(self, year):
"""Update geometry for a given year"""
# print('year:', year)
# Loop over flowlines
for fl_id, fl in enumerate(self.fls):
# Flowline state
heights = fl.surface_h.copy()
section_t0 = fl.section.copy()
thick_t0 = fl.thick.copy()
width_t0 = fl.widths_m.copy()
# CONSTANT AREAS
# Mass redistribution ignored for calibration and spinup years (glacier properties constant)
if (self.option_areaconstant) or (year < self.spinupyears) or (year < self.constantarea_years):
# run mass balance
glac_bin_massbalclim_annual = self.mb_model.get_annual_mb(heights, fls=self.fls, fl_id=fl_id,
year=year, debug=False)
# MASS REDISTRIBUTION
else:
# ----- FRONTAL ABLATION!!! -----
# if year == 0:
# print('\nHERE WE NEED THE GET FRONTAL ABLATION!\n')
# # First, remove volume lost to frontal ablation
# # changes to _t0 not _t1, since t1 will be done in the mass redistribution
# if glac_bin_frontalablation[:,step].max() > 0:
# # Frontal ablation loss [mwe]
# # fa_change tracks whether entire bin is lost or not
# fa_change = abs(glac_bin_frontalablation[:, step] * pygem_prms.density_water / pygem_prms.density_ice
# - icethickness_t0)
# fa_change[fa_change <= pygem_prms.tolerance] = 0
#
# if debug:
# bins_wfa = np.where(glac_bin_frontalablation[:,step] > 0)[0]
# print('glacier area t0:', glacier_area_t0[bins_wfa].round(3))
# print('ice thickness t0:', icethickness_t0[bins_wfa].round(1))
# print('frontalablation [m ice]:', (glac_bin_frontalablation[bins_wfa, step] *
# pygem_prms.density_water / pygem_prms.density_ice).round(1))
# print('frontal ablation [mice] vs icethickness:', fa_change[bins_wfa].round(1))
#
# # Check if entire bin is removed
# glacier_area_t0[np.where(fa_change == 0)[0]] = 0
# icethickness_t0[np.where(fa_change == 0)[0]] = 0
# width_t0[np.where(fa_change == 0)[0]] = 0
# # Otherwise, reduce glacier area such that glacier retreats and ice thickness remains the same
# # A_1 = (V_0 - V_loss) / h_1, units: A_1 = (m ice * km2) / (m ice) = km2
# glacier_area_t0[np.where(fa_change != 0)[0]] = (
# (glacier_area_t0[np.where(fa_change != 0)[0]] *
# icethickness_t0[np.where(fa_change != 0)[0]] -
# glacier_area_t0[np.where(fa_change != 0)[0]] *
# glac_bin_frontalablation[np.where(fa_change != 0)[0], step] * pygem_prms.density_water
# / pygem_prms.density_ice) / icethickness_t0[np.where(fa_change != 0)[0]])
#
# if debug:
# print('glacier area t1:', glacier_area_t0[bins_wfa].round(3))
# print('ice thickness t1:', icethickness_t0[bins_wfa].round(1))
# Redistribute mass if glacier was not fully removed by frontal ablation
if len(section_t0.nonzero()[0]) > 0:
# Mass redistribution according to Huss empirical curves
# Annual glacier mass balance [m ice s-1]
glac_bin_massbalclim_annual = self.mb_model.get_annual_mb(heights, fls=self.fls, fl_id=fl_id,
year=year, debug=False)
sec_in_year = (self.mb_model.dates_table.loc[12*year:12*(year+1)-1,'daysinmonth'].values.sum()
* 24 * 3600)
# print(' volume change [m3]:', (glac_bin_massbalclim_annual * sec_in_year *
# (width_t0 * fl.dx_meter)).sum())
# print(glac_bin_masssbalclim_annual)
# print(sec_in_year)
# print(width_t0.sum())
# print(fl.dx_meter)
# print(width_t0 * fl.dx_meter)
# # Debugging block
# debug_years = [71]
# if year in debug_years:
# print(year, glac_bin_massbalclim_annual)
# print('section t0:', section_t0)
# print('thick_t0:', thick_t0)
# print('width_t0:', width_t0)
# print(self.glac_idx_initial[fl_id])
# print('heights:', heights)
self._massredistributionHuss(section_t0, thick_t0, width_t0, glac_bin_massbalclim_annual,
self.glac_idx_initial[fl_id], heights, sec_in_year=sec_in_year)
# Record glacier properties (volume [m3], area [m2], thickness [m], width [km])
# record the next year's properties as well
# 'year + 1' used so the glacier properties are consistent with mass balance computations
year = int(year) # required to ensure proper indexing with run_until_and_store (10/21/2020)
glacier_area = fl.widths_m * fl.dx_meter
glacier_area[fl.thick == 0] = 0
self.mb_model.glac_bin_area_annual[:,year+1] = glacier_area
self.mb_model.glac_bin_icethickness_annual[:,year+1] = fl.thick
self.mb_model.glac_bin_width_annual[:,year+1] = fl.widths_m
self.mb_model.glac_wide_area_annual[year+1] = glacier_area.sum()
self.mb_model.glac_wide_volume_annual[year+1] = (fl.section * fl.dx_meter).sum()
#%%%% ====== START OF MASS REDISTRIBUTION CURVE
def _massredistributionHuss(self, section_t0, thick_t0, width_t0, glac_bin_massbalclim_annual,
glac_idx_initial, heights, debug=False, hindcast=0, sec_in_year=365*24*3600):
"""
Mass redistribution according to empirical equations from Huss and Hock (2015) accounting for retreat/advance.
glac_idx_initial is required to ensure that the glacier does not advance to area where glacier did not exist
before (e.g., retreat and advance over a vertical cliff)
Note: since OGGM uses the DEM, heights along the flowline do not necessarily decrease, i.e., there can be
overdeepenings along the flowlines that occur as the glacier retreats. This is problematic for 'adding' a bin
downstream in cases of glacier advance because you'd be moving new ice to a higher elevation. To avoid this
unrealistic case, in the event that this would occur, the overdeepening will simply fill up with ice first until
it reaches an elevation where it would put new ice into a downstream bin.
Parameters
----------
section_t0 : np.ndarray
Glacier cross-sectional area (m2) from previous year for each elevation bin
thick_t0 : np.ndarray
Glacier ice thickness [m] from previous year for each elevation bin
width_t0 : np.ndarray
Glacier width [m] from previous year for each elevation bin
glac_bin_massbalclim_annual : np.ndarray
Climatic mass balance [m ice s-1] for each elevation bin and year
glac_idx_initial : np.ndarray
Initial glacier indices
debug : Boolean
option to turn on print statements for development or debugging of code (default False)
Returns
-------
Updates the flowlines automatically, so does not return anything
"""
# Glacier area [m2]
glacier_area_t0 = width_t0 * self.fls[0].dx_meter
glacier_area_t0[thick_t0 == 0] = 0
# Annual glacier-wide volume change [m3]
# units: [m ice / s] * [s] * [m2] = m3 ice
glacier_volumechange = (glac_bin_massbalclim_annual * sec_in_year * glacier_area_t0).sum()
# For hindcast simulations, volume change is the opposite
if hindcast == 1:
glacier_volumechange = -1 * glacier_volumechange
if debug:
print('\nDebugging Mass Redistribution Huss function\n')
print('glacier volume change:', glacier_volumechange)
# If volume loss is less than the glacier volume, then redistribute mass loss/gains across the glacier;
# otherwise, the glacier disappears (area and thickness were already set to zero above)
glacier_volume_total = (self.fls[0].section * self.fls[0].dx_meter).sum()
if -1 * glacier_volumechange < glacier_volume_total:
# Determine where glacier exists
glac_idx_t0 = self.fls[0].thick.nonzero()[0]
# Compute ice thickness [m ice], glacier area [m2], ice thickness change [m ice] after redistribution
if pygem_prms.option_massredistribution == 1:
icethickness_change, glacier_volumechange_remaining = (
self._massredistributioncurveHuss(section_t0, thick_t0, width_t0, glac_idx_t0,
glacier_volumechange, glac_bin_massbalclim_annual,
heights, debug=False))
if debug:
# print('ice thickness change:', icethickness_change)
print('\nmax icethickness change:', np.round(icethickness_change.max(),3),
'\nmin icethickness change:', np.round(icethickness_change.min(),3),
'\nvolume remaining:', glacier_volumechange_remaining)
nloop = 0
# Glacier retreat
# if glacier retreats (ice thickness == 0), volume change needs to be redistributed over glacier again
while glacier_volumechange_remaining < 0:
if debug:
print('\n\nGlacier retreating (loop ' + str(nloop) + '):')
section_t0_retreated = self.fls[0].section.copy()
thick_t0_retreated = self.fls[0].thick.copy()
width_t0_retreated = self.fls[0].widths_m.copy()
glacier_volumechange_remaining_retreated = glacier_volumechange_remaining.copy()
glac_idx_t0_retreated = thick_t0_retreated.nonzero()[0]
glacier_area_t0_retreated = width_t0_retreated * self.fls[0].dx_meter
glacier_area_t0_retreated[thick_t0 == 0] = 0
# Set climatic mass balance for the case when there are less than 3 bins
# distribute the remaining glacier volume change over the entire glacier (remaining bins)
massbalclim_retreat = np.zeros(thick_t0_retreated.shape)
massbalclim_retreat[glac_idx_t0_retreated] = (glacier_volumechange_remaining /
glacier_area_t0_retreated.sum() / sec_in_year)
# Mass redistribution
if pygem_prms.option_massredistribution == 1:
# Option 1: apply mass redistribution using Huss' empirical geometry change equations
icethickness_change, glacier_volumechange_remaining = (
self._massredistributioncurveHuss(
section_t0_retreated, thick_t0_retreated, width_t0_retreated, glac_idx_t0_retreated,
glacier_volumechange_remaining_retreated, massbalclim_retreat, heights, debug=False))
# Avoid rounding errors that get loop stuck
if abs(glacier_volumechange_remaining) < 1:
glacier_volumechange_remaining = 0
if debug:
print('ice thickness change:', icethickness_change)
print('\nmax icethickness change:', np.round(icethickness_change.max(),3),
'\nmin icethickness change:', np.round(icethickness_change.min(),3),
'\nvolume remaining:', glacier_volumechange_remaining)
nloop += 1
# Glacier advances
# based on ice thickness change exceeding threshold
# Overview:
# 1. Add new bin and fill it up to a maximum of terminus average ice thickness
# 2. If additional volume after adding new bin, then redistribute mass gain across all bins again,
# i.e., increase the ice thickness and width
# 3. Repeat adding a new bin and redistributing the mass until no addiitonal volume is left
while (icethickness_change > pygem_prms.icethickness_advancethreshold).any() == True:
if debug:
print('advancing glacier')
# Record glacier area and ice thickness before advance corrections applied
section_t0_raw = self.fls[0].section.copy()
thick_t0_raw = self.fls[0].thick.copy()
width_t0_raw = self.fls[0].widths_m.copy()
glacier_area_t0_raw = width_t0_raw * self.fls[0].dx_meter
if debug:
print('\n\nthickness t0:', thick_t0_raw)
print('glacier area t0:', glacier_area_t0_raw)
print('width_t0_raw:', width_t0_raw,'\n\n')
# Index bins that are advancing
icethickness_change[icethickness_change <= pygem_prms.icethickness_advancethreshold] = 0
glac_idx_advance = icethickness_change.nonzero()[0]
# Update ice thickness based on maximum advance threshold [m ice]
self.fls[0].thick[glac_idx_advance] = (self.fls[0].thick[glac_idx_advance] -
(icethickness_change[glac_idx_advance] - pygem_prms.icethickness_advancethreshold))
glacier_area_t1 = self.fls[0].widths_m.copy() * self.fls[0].dx_meter
# Advance volume [m3]
advance_volume = ((glacier_area_t0_raw[glac_idx_advance] * thick_t0_raw[glac_idx_advance]).sum()
- (glacier_area_t1[glac_idx_advance] * self.fls[0].thick[glac_idx_advance]).sum())
# Set the cross sectional area of the next bin
advance_section = advance_volume / self.fls[0].dx_meter
# Index of bin to add
glac_idx_t0 = self.fls[0].thick.nonzero()[0]
min_elev = self.fls[0].surface_h[glac_idx_t0].min()
glac_idx_bin2add = (
np.where(self.fls[0].surface_h ==
self.fls[0].surface_h[np.where(self.fls[0].surface_h < min_elev)[0]].max())[0][0])
section_2add = self.fls[0].section.copy()
section_2add[glac_idx_bin2add] = advance_section
self.fls[0].section = section_2add
# Advance characteristics
# Indices that define the glacier terminus
glac_idx_terminus = (
glac_idx_t0[(heights[glac_idx_t0] - heights[glac_idx_t0].min()) /
(heights[glac_idx_t0].max() - heights[glac_idx_t0].min()) * 100
< pygem_prms.terminus_percentage])
# For glaciers with so few bands that the terminus is not identified (ex. <= 4 bands for 20% threshold),
# then use the information from all the bands
if glac_idx_terminus.shape[0] <= 1:
glac_idx_terminus = glac_idx_t0.copy()
if debug:
print('glacier index terminus:',glac_idx_terminus)
# Average area of glacier terminus [m2]
# exclude the bin at the terminus, since this bin may need to be filled first
try:
minelev_idx = np.where(heights == heights[glac_idx_terminus].min())[0][0]
glac_idx_terminus_removemin = list(glac_idx_terminus)
glac_idx_terminus_removemin.remove(minelev_idx)
terminus_thickness_avg = np.mean(self.fls[0].thick[glac_idx_terminus_removemin])
except:
glac_idx_terminus_initial = (
glac_idx_initial[(heights[glac_idx_initial] - heights[glac_idx_initial].min()) /
(heights[glac_idx_initial].max() - heights[glac_idx_initial].min()) * 100
< pygem_prms.terminus_percentage])
if glac_idx_terminus_initial.shape[0] <= 1:
glac_idx_terminus_initial = glac_idx_initial.copy()
minelev_idx = np.where(heights == heights[glac_idx_terminus_initial].min())[0][0]
glac_idx_terminus_removemin = list(glac_idx_terminus_initial)
glac_idx_terminus_removemin.remove(minelev_idx)
terminus_thickness_avg = np.mean(self.fls[0].thick[glac_idx_terminus_removemin])
# If last bin exceeds terminus thickness average then fill up the bin to average and redistribute mass
if self.fls[0].thick[glac_idx_bin2add] > terminus_thickness_avg:
self.fls[0].thick[glac_idx_bin2add] = terminus_thickness_avg
# Redistribute remaining mass
volume_added2bin = self.fls[0].section[glac_idx_bin2add] * self.fls[0].dx_meter
advance_volume -= volume_added2bin
# With remaining advance volume, add a bin or redistribute over existing bins if no bins left
if advance_volume > 0:
# Indices for additional bins below the terminus
glac_idx_t1 = np.where(glacier_area_t1 > 0)[0]
below_glac_idx = np.where(heights < heights[glac_idx_t1].min())[0]
# if no more bins below, then distribute volume over the glacier without further adjustments
# this occurs with OGGM flowlines when the terminus is in an overdeepening, so we just fill up
# the overdeepening
if len(below_glac_idx) == 0:
# Revert to the initial section, which also updates the thickness and width automatically
self.fls[0].section = section_t0_raw
# set icethickness change and advance_volume to 0 to break the loop
icethickness_change[icethickness_change > 0] = 0
advance_volume = 0
# otherwise, redistribute mass
else:
glac_idx_t0 = self.fls[0].thick.nonzero()[0]
glacier_area_t0 = self.fls[0].widths_m.copy() * self.fls[0].dx_meter
glac_bin_massbalclim_annual = np.zeros(self.fls[0].thick.shape)
glac_bin_massbalclim_annual[glac_idx_t0] = (glacier_volumechange_remaining /
glacier_area_t0.sum() / sec_in_year)
icethickness_change, glacier_volumechange_remaining = (
self._massredistributioncurveHuss(
self.fls[0].section.copy(), self.fls[0].thick.copy(), self.fls[0].widths_m.copy(),
glac_idx_t0, advance_volume, glac_bin_massbalclim_annual, heights, debug=False))
def _massredistributioncurveHuss(self, section_t0, thick_t0, width_t0, glac_idx_t0, glacier_volumechange,
massbalclim_annual, heights, debug=False):
"""
Apply the mass redistribution curves from Huss and Hock (2015).
This is paired with massredistributionHuss, which takes into consideration retreat and advance.
Parameters
----------
section_t0 : np.ndarray
Glacier cross-sectional area [m2] from previous year for each elevation bin
thick_t0 : np.ndarray
Glacier ice thickness [m] from previous year for each elevation bin
width_t0 : np.ndarray
Glacier width [m] from previous year for each elevation bin
glac_idx_t0 : np.ndarray
glacier indices for present timestep
glacier_volumechange : float
glacier-wide volume change [m3 ice] based on the annual climatic mass balance
massbalclim_annual : np.ndarray
Annual climatic mass balance [m ice s-1] for each elevation bin for a single year
Returns
-------
icethickness_change : np.ndarray
Ice thickness change [m] for each elevation bin
glacier_volumechange_remaining : float
Glacier volume change remaining [m3 ice]; occurs if there is less ice than melt in a bin, i.e., retreat
"""
if debug:
print('\nDebugging mass redistribution curve Huss\n')
# Apply Huss redistribution if there are at least 3 elevation bands; otherwise, use the mass balance
# Glacier area used to select parameters
glacier_area_t0 = width_t0 * self.fls[0].dx_meter
glacier_area_t0[thick_t0 == 0] = 0
# Apply mass redistribution curve
if glac_idx_t0.shape[0] > 3:
# Select the factors for the normalized ice thickness change curve based on glacier area
if glacier_area_t0.sum() > 20:
[gamma, a, b, c] = [6, -0.02, 0.12, 0]
elif glacier_area_t0.sum() > 5:
[gamma, a, b, c] = [4, -0.05, 0.19, 0.01]
else:
[gamma, a, b, c] = [2, -0.30, 0.60, 0.09]
# reset variables
elevrange_norm = np.zeros(glacier_area_t0.shape)
icethicknesschange_norm = np.zeros(glacier_area_t0.shape)
# Normalized elevation range [-]
# (max elevation - bin elevation) / (max_elevation - min_elevation)
elevrange_norm[glacier_area_t0 > 0] = ((heights[glac_idx_t0].max() - heights[glac_idx_t0]) /
(heights[glac_idx_t0].max() - heights[glac_idx_t0].min()))
# using indices as opposed to elevations automatically skips bins on the glacier that have no area
# such that the normalization is done only on bins where the glacier lies
# Normalized ice thickness change [-]
icethicknesschange_norm[glacier_area_t0 > 0] = ((elevrange_norm[glacier_area_t0 > 0] + a)**gamma +
b*(elevrange_norm[glacier_area_t0 > 0] + a) + c)
# delta_h = (h_n + a)**gamma + b*(h_n + a) + c
# indexing is faster here
# limit the icethicknesschange_norm to between 0 - 1 (ends of fxns not exactly 0 and 1)
icethicknesschange_norm[icethicknesschange_norm > 1] = 1
icethicknesschange_norm[icethicknesschange_norm < 0] = 0
# Huss' ice thickness scaling factor, fs_huss [m ice]
# units: m3 / (m2 * [-]) * (1000 m / 1 km) = m ice
fs_huss = glacier_volumechange / (glacier_area_t0 * icethicknesschange_norm).sum()
if debug:
print('fs_huss:', fs_huss)
# Volume change [m3 ice]
bin_volumechange = icethicknesschange_norm * fs_huss * glacier_area_t0
# Otherwise, compute volume change in each bin based on the climatic mass balance
else:
bin_volumechange = massbalclim_annual * glacier_area_t0
if debug:
print('-----\n')
vol_before = section_t0 * self.fls[0].dx_meter
# Update cross sectional area (updating thickness does not conserve mass in OGGM!)
# volume change divided by length (dx); units m2
section_change = bin_volumechange / self.fls[0].dx_meter
self.fls[0].section = utils.clip_min(self.fls[0].section + section_change, 0)
# Ice thickness change [m ice]
icethickness_change = self.fls[0].thick - thick_t0
# Glacier volume
vol_after = self.fls[0].section * self.fls[0].dx_meter
if debug:
print('vol_chg_wanted:', bin_volumechange.sum())
print('vol_chg:', (vol_after.sum() - vol_before.sum()))
print('\n-----')
# Compute the remaining volume change
bin_volumechange_remaining = (bin_volumechange - (self.fls[0].section * self.fls[0].dx_meter -
section_t0 * self.fls[0].dx_meter))
# remove values below tolerance to avoid rounding errors
bin_volumechange_remaining[abs(bin_volumechange_remaining) < pygem_prms.tolerance] = 0
# Glacier volume change remaining - if less than zero, then needed for retreat
glacier_volumechange_remaining = bin_volumechange_remaining.sum()
if debug:
print(glacier_volumechange_remaining)
return icethickness_change, glacier_volumechange_remaining
#%%
## ------ FLOWLINEMODEL FOR MODEL DIAGNOSTICS WITH OGGM (10/30/2020) -----
#import copy
#from functools import partial
#from oggm.core.inversion import find_sia_flux_from_thickness
#
#class FlowlineModel(object):
# """Interface to the actual model"""
#
# def __init__(self, flowlines, mb_model=None, y0=0., glen_a=None,
# fs=None, inplace=False, smooth_trib_influx=True,
# is_tidewater=False, is_lake_terminating=False,
# mb_elev_feedback='annual', check_for_boundaries=None,
# water_level=None):
# """Create a new flowline model from the flowlines and a MB model.
# Parameters
# ----------
# flowlines : list
# a list of :py:class:`oggm.Flowline` instances, sorted by order
# mb_model : :py:class:`oggm.core.massbalance.MassBalanceModel`
# the MB model to use
# y0 : int
# the starting year of the simulation
# glen_a : float
# glen's parameter A
# fs: float
# sliding parameter
# inplace : bool
# whether or not to make a copy of the flowline objects for the run
# setting to True implies that your objects will be modified at run
# time by the model (can help to spare memory)
# smooth_trib_influx : bool
# whether to smooth the mass influx from the incoming tributary.
# The fefault is to use a gaussian kernel on a 9 grid points
# window.
# is_tidewater: bool, default: False
# is this a tidewater glacier?
# is_lake_terminating: bool, default: False
# is this a lake terminating glacier?
# mb_elev_feedback : str, default: 'annual'
# 'never', 'always', 'annual', or 'monthly': how often the
# mass-balance should be recomputed from the mass balance model.
# 'Never' is equivalent to 'annual' but without elevation feedback
# at all (the heights are taken from the first call).
# check_for_boundaries : bool
# whether the model should raise an error when the glacier exceeds
# the domain boundaries. The default is to follow
# PARAMS['error_when_glacier_reaches_boundaries']
# """
#
# widths_t0 = flowlines[0].widths_m
# area_v1 = widths_t0 * flowlines[0].dx_meter
# print('area v1:', area_v1.sum())
# area_v2 = np.copy(area_v1)
# area_v2[flowlines[0].thick == 0] = 0
# print('area v2:', area_v2.sum())
# print('thickness:', flowlines[0].thick)
#
# self.is_tidewater = is_tidewater
# self.is_lake_terminating = is_lake_terminating
# self.is_marine_terminating = is_tidewater and not is_lake_terminating
#
# if water_level is None:
# self.water_level = 0
# if self.is_lake_terminating:
# if not flowlines[-1].has_ice():
# raise InvalidParamsError('Set `water_level` for lake '
# 'terminating glaciers in '
# 'idealized runs')
# # Arbitrary water level 1m below last grid points elevation
# min_h = flowlines[-1].surface_h[flowlines[-1].thick > 0][-1]
# self.water_level = (min_h -
# cfg.PARAMS['free_board_lake_terminating'])
# else:
# self.water_level = water_level
#
# # Mass balance
# self.mb_elev_feedback = mb_elev_feedback.lower()
# if self.mb_elev_feedback in ['never', 'annual']:
# self.mb_step = 'annual'
# elif self.mb_elev_feedback in ['always', 'monthly']:
# self.mb_step = 'monthly'
# self.mb_model = mb_model
#
# # Defaults
# if glen_a is None:
# glen_a = cfg.PARAMS['glen_a']
# if fs is None:
# fs = cfg.PARAMS['fs']
# self.glen_a = glen_a
# self.fs = fs
# self.glen_n = cfg.PARAMS['glen_n']
# self.rho = cfg.PARAMS['ice_density']
# if check_for_boundaries is None:
# check_for_boundaries = cfg.PARAMS[('error_when_glacier_reaches_'
# 'boundaries')]
# self.check_for_boundaries = check_for_boundaries
#
# # we keep glen_a as input, but for optimisation we stick to "fd"
# self._fd = 2. / (cfg.PARAMS['glen_n']+2) * self.glen_a
#
# # Calving shenanigans
# self.calving_m3_since_y0 = 0. # total calving since time y0
# self.calving_rate_myr = 0.
#
# self.y0 = None
# self.t = None
# self.reset_y0(y0)
#
# self.fls = None
# self._tributary_indices = None
# self.reset_flowlines(flowlines, inplace=inplace,
# smooth_trib_influx=smooth_trib_influx)
#
# @property
# def mb_model(self):
# return self._mb_model
#
# @mb_model.setter
# def mb_model(self, value):
# # We need a setter because the MB func is stored as an attr too
# _mb_call = None
# if value:
# if self.mb_elev_feedback in ['always', 'monthly']:
# _mb_call = value.get_monthly_mb
# elif self.mb_elev_feedback in ['annual', 'never']:
# _mb_call = value.get_annual_mb
# else:
# raise ValueError('mb_elev_feedback not understood')
# self._mb_model = value
# self._mb_call = _mb_call
# self._mb_current_date = None
# self._mb_current_out = dict()
# self._mb_current_heights = dict()
#
# def reset_y0(self, y0):
# """Reset the initial model time"""
# self.y0 = y0
# self.t = 0
#
# def reset_flowlines(self, flowlines, inplace=False,
# smooth_trib_influx=True):
# """Reset the initial model flowlines"""
#
# if not inplace:
# flowlines = copy.deepcopy(flowlines)
#
# try:
# len(flowlines)
# except TypeError:
# flowlines = [flowlines]
#
# self.fls = flowlines
#
# # list of tributary coordinates and stuff
# trib_ind = []
# for fl in self.fls:
# # Important also
# fl.water_level = self.water_level
# if fl.flows_to is None:
# trib_ind.append((None, None, None, None))
# continue
# idl = self.fls.index(fl.flows_to)
# ide = fl.flows_to_indice
# if not smooth_trib_influx:
# gk = 1
# id0 = ide
# id1 = ide+1
# elif fl.flows_to.nx >= 9:
# gk = cfg.GAUSSIAN_KERNEL[9]
# id0 = ide-4
# id1 = ide+5
# elif fl.flows_to.nx >= 7:
# gk = cfg.GAUSSIAN_KERNEL[7]
# id0 = ide-3
# id1 = ide+4
# elif fl.flows_to.nx >= 5:
# gk = cfg.GAUSSIAN_KERNEL[5]
# id0 = ide-2
# id1 = ide+3
# trib_ind.append((idl, id0, id1, gk))
#
# self._tributary_indices = trib_ind
#
# @property
# def yr(self):
# return self.y0 + self.t / cfg.SEC_IN_YEAR
#
# @property
# def area_m2(self):
# return np.sum([f.area_m2 for f in self.fls])
#
# @property
# def volume_m3(self):
# return np.sum([f.volume_m3 for f in self.fls])
#
# @property
# def volume_km3(self):
# return self.volume_m3 * 1e-9
#
# @property
# def volume_bsl_m3(self):
# return np.sum([f.volume_bsl_m3 for f in self.fls])
#
# @property
# def volume_bsl_km3(self):
# return self.volume_bsl_m3 * 1e-9
#
# @property
# def volume_bwl_m3(self):
# return np.sum([f.volume_bwl_m3 for f in self.fls])
#
# @property
# def volume_bwl_km3(self):
# return self.volume_bwl_m3 * 1e-9
#
# @property
# def area_km2(self):
# return self.area_m2 * 1e-6
#
# @property
# def length_m(self):
# return self.fls[-1].length_m
#
# def get_mb(self, heights, year=None, fl_id=None, fls=None):
# """Get the mass balance at the requested height and time.
# Optimized so that no mb model call is necessary at each step.
# """
#
# # Do we even have to optimise?
# if self.mb_elev_feedback == 'always':
# return self._mb_call(heights, year=year, fl_id=fl_id, fls=fls)
#
# # Ok, user asked for it
# if fl_id is None:
# raise ValueError('Need fls_id')
#
# if self.mb_elev_feedback == 'never':
# # The very first call we take the heights
# if fl_id not in self._mb_current_heights:
# # We need to reset just this tributary
# self._mb_current_heights[fl_id] = heights
# # All calls we replace
# heights = self._mb_current_heights[fl_id]
#
# date = utils.floatyear_to_date(year)
# if self.mb_elev_feedback in ['annual', 'never']:
# # ignore month changes
# date = (date[0], date[0])
#
# if self._mb_current_date == date:
# if fl_id not in self._mb_current_out:
# # We need to reset just this tributary
# self._mb_current_out[fl_id] = self._mb_call(heights,
# year=year,
# fl_id=fl_id,
# fls=fls)
# else:
# # We need to reset all
# self._mb_current_date = date
# self._mb_current_out = dict()
# self._mb_current_out[fl_id] = self._mb_call(heights,
# year=year,
# fl_id=fl_id,
# fls=fls)
#
# return self._mb_current_out[fl_id]
#
# def to_netcdf(self, path):
# """Creates a netcdf group file storing the state of the model."""
#
# flows_to_id = []
# for trib in self._tributary_indices:
# flows_to_id.append(trib[0] if trib[0] is not None else -1)
#
# ds = xr.Dataset()
# try:
# ds.attrs['description'] = 'OGGM model output'
# ds.attrs['oggm_version'] = __version__
# ds.attrs['calendar'] = '365-day no leap'
# ds.attrs['creation_date'] = strftime("%Y-%m-%d %H:%M:%S", gmtime())
# ds['flowlines'] = ('flowlines', np.arange(len(flows_to_id)))
# ds['flows_to_id'] = ('flowlines', flows_to_id)
# ds.to_netcdf(path)
# for i, fl in enumerate(self.fls):
# ds = fl.to_dataset()
# ds.to_netcdf(path, 'a', group='fl_{}'.format(i))
# finally:
# ds.close()
#
# def check_domain_end(self):
# """Returns False if the glacier reaches the domains bound."""
# return np.isclose(self.fls[-1].thick[-1], 0)
#
# def step(self, dt):
# """Advance the numerical simulation of one single step.
# Important: the step dt is a maximum boundary that is *not* guaranteed
# to be met if dt is too large for the underlying numerical
# implementation. However, ``step(dt)`` should never cross the desired
# time step, i.e. if dt is small enough to ensure stability, step
# should match it.
# The caller will know how much has been actually advanced by looking
# at the output of ``step()`` or by monitoring ``self.t`` or `self.yr``
# Parameters
# ----------
# dt : float
# the step length in seconds
# Returns
# -------
# the actual dt chosen by the numerical implementation. Guaranteed to
# be dt or lower.
# """
# raise NotImplementedError
#
# def run_until(self, y1):
# """Runs the model from the current year up to a given year date y1.
# This function runs the model for the time difference y1-self.y0
# If self.y0 has not been specified at some point, it is 0 and y1 will
# be the time span in years to run the model for.
# Parameters
# ----------
# y1 : float
# Upper time span for how long the model should run
# """
#
# # We force timesteps to monthly frequencies for consistent results
# # among use cases (monthly or yearly output) and also to prevent
# # "too large" steps in the adaptive scheme.
# ts = utils.monthly_timeseries(self.yr, y1)
#
# # Add the last date to be sure we end on it
# ts = np.append(ts, y1)
#
# # Loop over the steps we want to meet
# for y in ts:
# t = (y - self.y0) * cfg.SEC_IN_YEAR
# # because of CFL, step() doesn't ensure that the end date is met
# # lets run the steps until we reach our desired date
# while self.t < t:
# self.step(t-self.t)
#
# # Check for domain bounds
# if self.check_for_boundaries:
# if self.fls[-1].thick[-1] > 10:
# raise RuntimeError('Glacier exceeds domain boundaries, '
# 'at year: {}'.format(self.yr))
#
# # Check for NaNs
# for fl in self.fls:
# if np.any(~np.isfinite(fl.thick)):
# raise FloatingPointError('NaN in numerical solution, '
# 'at year: {}'.format(self.yr))
#
# def run_until_and_store(self, y1, run_path=None, diag_path=None,
# store_monthly_step=None):
# """Runs the model and returns intermediate steps in xarray datasets.
# This function repeatedly calls FlowlineModel.run_until for either
# monthly or yearly time steps up till the upper time boundary y1.
# Parameters
# ----------
# y1 : int
# Upper time span for how long the model should run (needs to be
# a full year)
# run_path : str
# Path and filename where to store the model run dataset
# diag_path : str
# Path and filename where to store the model diagnostics dataset
# store_monthly_step : Bool
# If True (False) model diagnostics will be stored monthly (yearly).
# If unspecified, we follow the update of the MB model, which
# defaults to yearly (see __init__).
# Returns
# -------
# run_ds : xarray.Dataset
# stores the entire glacier geometry. It is useful to visualize the
# glacier geometry or to restart a new run from a modelled geometry.
# The glacier state is stored at the begining of each hydrological
# year (not in between in order to spare disk space).
# diag_ds : xarray.Dataset
# stores a few diagnostic variables such as the volume, area, length
# and ELA of the glacier.
# """
#
# if int(y1) != y1:
# raise InvalidParamsError('run_until_and_store only accepts '
# 'integer year dates.')
#
# if not self.mb_model.hemisphere:
# raise InvalidParamsError('run_until_and_store needs a '
# 'mass-balance model with an unambiguous '
# 'hemisphere.')
# # time
# yearly_time = np.arange(np.floor(self.yr), np.floor(y1)+1)
#
# if store_monthly_step is None:
# store_monthly_step = self.mb_step == 'monthly'
#
# if store_monthly_step:
# monthly_time = utils.monthly_timeseries(self.yr, y1)
# else:
# monthly_time = np.arange(np.floor(self.yr), np.floor(y1)+1)
#
# sm = cfg.PARAMS['hydro_month_' + self.mb_model.hemisphere]
#
# yrs, months = utils.floatyear_to_date(monthly_time)
# cyrs, cmonths = utils.hydrodate_to_calendardate(yrs, months,
# start_month=sm)
#
# # init output
# if run_path is not None:
# self.to_netcdf(run_path)
# ny = len(yearly_time)
# if ny == 1:
# yrs = [yrs]
# cyrs = [cyrs]
# months = [months]
# cmonths = [cmonths]
# nm = len(monthly_time)
# sects = [(np.zeros((ny, fl.nx)) * np.NaN) for fl in self.fls]
# widths = [(np.zeros((ny, fl.nx)) * np.NaN) for fl in self.fls]
# bucket = [(np.zeros(ny) * np.NaN) for _ in self.fls]
# diag_ds = xr.Dataset()
#
# # Global attributes
# diag_ds.attrs['description'] = 'OGGM model output'
# diag_ds.attrs['oggm_version'] = __version__
# diag_ds.attrs['calendar'] = '365-day no leap'
# diag_ds.attrs['creation_date'] = strftime("%Y-%m-%d %H:%M:%S",
# gmtime())
# diag_ds.attrs['hemisphere'] = self.mb_model.hemisphere
# diag_ds.attrs['water_level'] = self.water_level
#
# # Coordinates
# diag_ds.coords['time'] = ('time', monthly_time)
# diag_ds.coords['hydro_year'] = ('time', yrs)
# diag_ds.coords['hydro_month'] = ('time', months)
# diag_ds.coords['calendar_year'] = ('time', cyrs)
# diag_ds.coords['calendar_month'] = ('time', cmonths)
#
# diag_ds['time'].attrs['description'] = 'Floating hydrological year'
# diag_ds['hydro_year'].attrs['description'] = 'Hydrological year'
# diag_ds['hydro_month'].attrs['description'] = 'Hydrological month'
# diag_ds['calendar_year'].attrs['description'] = 'Calendar year'
# diag_ds['calendar_month'].attrs['description'] = 'Calendar month'
#
# # Variables and attributes
# diag_ds['volume_m3'] = ('time', np.zeros(nm) * np.NaN)
# diag_ds['volume_m3'].attrs['description'] = 'Total glacier volume'
# diag_ds['volume_m3'].attrs['unit'] = 'm 3'
# if self.is_marine_terminating:
# diag_ds['volume_bsl_m3'] = ('time', np.zeros(nm) * np.NaN)
# diag_ds['volume_bsl_m3'].attrs['description'] = ('Glacier volume '
# 'below '
# 'sea-level')
# diag_ds['volume_bsl_m3'].attrs['unit'] = 'm 3'
# diag_ds['volume_bwl_m3'] = ('time', np.zeros(nm) * np.NaN)
# diag_ds['volume_bwl_m3'].attrs['description'] = ('Glacier volume '
# 'below '
# 'water-level')
# diag_ds['volume_bwl_m3'].attrs['unit'] = 'm 3'
#
# diag_ds['area_m2'] = ('time', np.zeros(nm) * np.NaN)
# diag_ds['area_m2'].attrs['description'] = 'Total glacier area'
# diag_ds['area_m2'].attrs['unit'] = 'm 2'
# diag_ds['length_m'] = ('time', np.zeros(nm) * np.NaN)
# diag_ds['length_m'].attrs['description'] = 'Glacier length'
# diag_ds['length_m'].attrs['unit'] = 'm 3'
# diag_ds['ela_m'] = ('time', np.zeros(nm) * np.NaN)
# diag_ds['ela_m'].attrs['description'] = ('Annual Equilibrium Line '
# 'Altitude (ELA)')
# diag_ds['ela_m'].attrs['unit'] = 'm a.s.l'
# if self.is_tidewater:
# diag_ds['calving_m3'] = ('time', np.zeros(nm) * np.NaN)
# diag_ds['calving_m3'].attrs['description'] = ('Total accumulated '
# 'calving flux')
# diag_ds['calving_m3'].attrs['unit'] = 'm 3'
# diag_ds['calving_rate_myr'] = ('time', np.zeros(nm) * np.NaN)
# diag_ds['calving_rate_myr'].attrs['description'] = 'Calving rate'
# diag_ds['calving_rate_myr'].attrs['unit'] = 'm yr-1'
#
# # Run
# j = 0
# for i, (yr, mo) in enumerate(zip(monthly_time, months)):
# self.run_until(yr)
# # Model run
# if mo == 1:
# for s, w, b, fl in zip(sects, widths, bucket, self.fls):
# s[j, :] = fl.section
# w[j, :] = fl.widths_m
# if self.is_tidewater:
# try:
# b[j] = fl.calving_bucket_m3
# except AttributeError:
# pass
# j += 1
# # Diagnostics
# diag_ds['volume_m3'].data[i] = self.volume_m3
# diag_ds['area_m2'].data[i] = self.area_m2
# diag_ds['length_m'].data[i] = self.length_m
# try:
# ela_m = self.mb_model.get_ela(year=yr, fls=self.fls,
# fl_id=len(self.fls)-1)
# diag_ds['ela_m'].data[i] = ela_m
# except BaseException:
# # We really don't want to stop the model for some ELA issues
# diag_ds['ela_m'].data[i] = np.NaN
#
# if self.is_tidewater:
# diag_ds['calving_m3'].data[i] = self.calving_m3_since_y0
# diag_ds['calving_rate_myr'].data[i] = self.calving_rate_myr
# if self.is_marine_terminating:
# diag_ds['volume_bsl_m3'].data[i] = self.volume_bsl_m3
# diag_ds['volume_bwl_m3'].data[i] = self.volume_bwl_m3
#
# # to datasets
# run_ds = []
# for (s, w, b) in zip(sects, widths, bucket):
# ds = xr.Dataset()
# ds.attrs['description'] = 'OGGM model output'
# ds.attrs['oggm_version'] = __version__
# ds.attrs['calendar'] = '365-day no leap'
# ds.attrs['creation_date'] = strftime("%Y-%m-%d %H:%M:%S",
# gmtime())
# ds.coords['time'] = yearly_time
# ds['time'].attrs['description'] = 'Floating hydrological year'
# varcoords = OrderedDict(time=('time', yearly_time),
# year=('time', yearly_time))
# ds['ts_section'] = xr.DataArray(s, dims=('time', 'x'),
# coords=varcoords)
# ds['ts_width_m'] = xr.DataArray(w, dims=('time', 'x'),
# coords=varcoords)
# if self.is_tidewater:
# ds['ts_calving_bucket_m3'] = xr.DataArray(b, dims=('time', ),
# coords=varcoords)
# run_ds.append(ds)
#
# # write output?
# if run_path is not None:
# encode = {'ts_section': {'zlib': True, 'complevel': 5},
# 'ts_width_m': {'zlib': True, 'complevel': 5},
# }
# for i, ds in enumerate(run_ds):
# ds.to_netcdf(run_path, 'a', group='fl_{}'.format(i),
# encoding=encode)
# # Add other diagnostics
# diag_ds.to_netcdf(run_path, 'a')
#
# if diag_path is not None:
# diag_ds.to_netcdf(diag_path)
#
# return run_ds, diag_ds
#
# def run_until_equilibrium(self, rate=0.001, ystep=5, max_ite=200):
# """ Runs the model until an equilibrium state is reached.
# Be careful: This only works for CONSTANT (not time-dependant)
# mass-balance models.
# Otherwise the returned state will not be in equilibrium! Don't try to
# calculate an equilibrium state with a RandomMassBalance model!
# """
#
# ite = 0
# was_close_zero = 0
# t_rate = 1
# while (t_rate > rate) and (ite <= max_ite) and (was_close_zero < 5):
# ite += 1
# v_bef = self.volume_m3
# self.run_until(self.yr + ystep)
# v_af = self.volume_m3
# if np.isclose(v_bef, 0., atol=1):
# t_rate = 1
# was_close_zero += 1
# else:
# t_rate = np.abs(v_af - v_bef) / v_bef
# if ite > max_ite:
# raise RuntimeError('Did not find equilibrium.')
#
#def flux_gate_with_build_up(year, flux_value=None, flux_gate_yr=None):
# """Default scalar flux gate with build up period"""
# fac = 1 - (flux_gate_yr - year) / flux_gate_yr
# return flux_value * utils.clip_scalar(fac, 0, 1)
#
#class FluxBasedModel(FlowlineModel):
# """The flowline model used by OGGM in production.
# It solves for the SIA along the flowline(s) using a staggered grid. It
# computes the *ice flux* between grid points and transports the mass
# accordingly (also between flowlines).
# This model is numerically less stable than fancier schemes, but it
# is fast and works with multiple flowlines of any bed shape (rectangular,
# parabolic, trapeze, and any combination of them).
# We test that it conserves mass in most cases, but not on very stiff cliffs.
# """
#
# def __init__(self, flowlines, mb_model=None, y0=0., glen_a=None,
# fs=0., inplace=False, fixed_dt=None, cfl_number=None,
# min_dt=None, flux_gate_thickness=None,
# flux_gate=None, flux_gate_build_up=100,
# do_kcalving=None, calving_k=None, calving_use_limiter=None,
# calving_limiter_frac=None, water_level=None,
# **kwargs):
# """Instanciate the model.
# Parameters
# ----------
# flowlines : list
# the glacier flowlines
# mb_model : MassBakanceModel
# the mass-balance model
# y0 : int
# initial year of the simulation
# glen_a : float
# Glen's creep parameter
# fs : float
# Oerlemans sliding parameter
# inplace : bool
# whether or not to make a copy of the flowline objects for the run
# setting to True implies that your objects will be modified at run
# time by the model (can help to spare memory)
# fixed_dt : float
# set to a value (in seconds) to prevent adaptive time-stepping.
# cfl_number : float
# Defaults to cfg.PARAMS['cfl_number'].
# For adaptive time stepping (the default), dt is chosen from the
# CFL criterion (dt = cfl_number * dx / max_u).
# To choose the "best" CFL number we would need a stability
# analysis - we used an empirical analysis (see blog post) and
# settled on 0.02 for the default cfg.PARAMS['cfl_number'].
# min_dt : float
# Defaults to cfg.PARAMS['cfl_min_dt'].
# At high velocities, time steps can become very small and your
# model might run very slowly. In production, it might be useful to
# set a limit below which the model will just error.
# is_tidewater: bool, default: False
# is this a tidewater glacier?
# is_lake_terminating: bool, default: False
# is this a lake terminating glacier?
# mb_elev_feedback : str, default: 'annual'
# 'never', 'always', 'annual', or 'monthly': how often the
# mass-balance should be recomputed from the mass balance model.
# 'Never' is equivalent to 'annual' but without elevation feedback
# at all (the heights are taken from the first call).
# check_for_boundaries: bool, default: True
# raise an error when the glacier grows bigger than the domain
# boundaries
# flux_gate_thickness : float or array
# flux of ice from the left domain boundary (and tributaries).
# Units of m of ice thickness. Note that unrealistic values won't be
# met by the model, so this is really just a rough guidance.
# It's better to use `flux_gate` instead.
# flux_gate : float or function or array of floats or array of functions
# flux of ice from the left domain boundary (and tributaries)
# (unit: m3 of ice per second). If set to a high value, consider
# changing the flux_gate_buildup time. You can also provide
# a function (or an array of functions) returning the flux
# (unit: m3 of ice per second) as a function of time.
# This is overriden by `flux_gate_thickness` if provided.
# flux_gate_buildup : int
# number of years used to build up the flux gate to full value
# do_kcalving : bool
# switch on the k-calving parameterisation. Ignored if not a
# tidewater glacier. Use the option from PARAMS per default
# calving_k : float
# the calving proportionality constant (units: yr-1). Use the
# one from PARAMS per default
# calving_use_limiter : bool
# whether to switch on the calving limiter on the parameterisation
# makes the calving fronts thicker but the model is more stable
# calving_limiter_frac : float
# limit the front slope to a fraction of the calving front.
# "3" means 1/3. Setting it to 0 limits the slope to sea-level.
# water_level : float
# the water level. It should be zero m a.s.l, but:
# - sometimes the frontal elevation is unrealistically high (or low).
# - lake terminating glaciers
# - other uncertainties
# The default is 0. For lake terminating glaciers,
# it is inferred from PARAMS['free_board_lake_terminating'].
# The best way to set the water level for real glaciers is to use
# the same as used for the inversion (this is what
# `robust_model_run` does for you)
# """
# super(FluxBasedModel, self).__init__(flowlines, mb_model=mb_model,
# y0=y0, glen_a=glen_a, fs=fs,
# inplace=inplace,
# water_level=water_level,
# **kwargs)
#
# self.fixed_dt = fixed_dt
# if min_dt is None:
# min_dt = cfg.PARAMS['cfl_min_dt']
# if cfl_number is None:
# cfl_number = cfg.PARAMS['cfl_number']
# self.min_dt = min_dt
# self.cfl_number = cfl_number
#
# # Do we want to use shape factors?
# self.sf_func = None
# use_sf = cfg.PARAMS.get('use_shape_factor_for_fluxbasedmodel')
# if use_sf == 'Adhikari' or use_sf == 'Nye':
# self.sf_func = utils.shape_factor_adhikari
# elif use_sf == 'Huss':
# self.sf_func = utils.shape_factor_huss
#
# # Calving params
# if do_kcalving is None:
# do_kcalving = cfg.PARAMS['use_kcalving_for_run']
# self.do_calving = do_kcalving and self.is_tidewater
# if calving_k is None:
# calving_k = cfg.PARAMS['calving_k']
# self.calving_k = calving_k / cfg.SEC_IN_YEAR
# if calving_use_limiter is None:
# calving_use_limiter = cfg.PARAMS['calving_use_limiter']
# self.calving_use_limiter = calving_use_limiter
# if calving_limiter_frac is None:
# calving_limiter_frac = cfg.PARAMS['calving_limiter_frac']
# if calving_limiter_frac > 0:
# raise NotImplementedError('calving limiter other than 0 not '
# 'implemented yet')
# self.calving_limiter_frac = calving_limiter_frac
#
# # Flux gate
# self.flux_gate = utils.tolist(flux_gate, length=len(self.fls))
# self.flux_gate_m3_since_y0 = 0.
# if flux_gate_thickness is not None:
# # Compute the theoretical ice flux from the slope at the top
# flux_gate_thickness = utils.tolist(flux_gate_thickness,
# length=len(self.fls))
# self.flux_gate = []
# for fl, fgt in zip(self.fls, flux_gate_thickness):
# # We set the thickness to the desired value so that
# # the widths work ok
# fl = copy.deepcopy(fl)
# fl.thick = fl.thick * 0 + fgt
# slope = (fl.surface_h[0] - fl.surface_h[1]) / fl.dx_meter
# if slope == 0:
# raise ValueError('I need a slope to compute the flux')
# flux = find_sia_flux_from_thickness(slope,
# fl.widths_m[0],
# fgt,
# shape=fl.shape_str[0],
# glen_a=self.glen_a,
# fs=self.fs)
# self.flux_gate.append(flux)
#
# # convert the floats to function calls
# for i, fg in enumerate(self.flux_gate):
# if fg is None:
# continue
# try:
# # Do we have a function? If yes all good
# fg(self.yr)
# except TypeError:
# # If not, make one
# self.flux_gate[i] = partial(flux_gate_with_build_up,
# flux_value=fg,
# flux_gate_yr=(flux_gate_build_up +
# self.y0))
#
# # Optim
# self.slope_stag = []
# self.thick_stag = []
# self.section_stag = []
# self.u_stag = []
# self.shapefac_stag = []
# self.flux_stag = []
# self.trib_flux = []
# for fl, trib in zip(self.fls, self._tributary_indices):
# nx = fl.nx
# # This is not staggered
# self.trib_flux.append(np.zeros(nx))
# # We add an additional fake grid point at the end of tributaries
# if trib[0] is not None:
# nx = fl.nx + 1
# # +1 is for the staggered grid
# self.slope_stag.append(np.zeros(nx+1))
# self.thick_stag.append(np.zeros(nx+1))
# self.section_stag.append(np.zeros(nx+1))
# self.u_stag.append(np.zeros(nx+1))
# self.shapefac_stag.append(np.ones(nx+1)) # beware the ones!
# self.flux_stag.append(np.zeros(nx+1))
#
# def step(self, dt):
# """Advance one step."""
#
# # Just a check to avoid useless computations
# if dt <= 0:
# raise InvalidParamsError('dt needs to be strictly positive')
#
# # Simple container
# mbs = []
#
# # Loop over tributaries to determine the flux rate
# for fl_id, fl in enumerate(self.fls):
#
# # This is possibly less efficient than zip() but much clearer
# trib = self._tributary_indices[fl_id]
# slope_stag = self.slope_stag[fl_id]
# thick_stag = self.thick_stag[fl_id]
# section_stag = self.section_stag[fl_id]
# sf_stag = self.shapefac_stag[fl_id]
# flux_stag = self.flux_stag[fl_id]
# trib_flux = self.trib_flux[fl_id]
# u_stag = self.u_stag[fl_id]
# flux_gate = self.flux_gate[fl_id]
#
# # Flowline state
# surface_h = fl.surface_h
# thick = fl.thick
# section = fl.section
# dx = fl.dx_meter
#
# # If it is a tributary, we use the branch it flows into to compute
# # the slope of the last grid point
# is_trib = trib[0] is not None
# if is_trib:
# fl_to = self.fls[trib[0]]
# ide = fl.flows_to_indice
# surface_h = np.append(surface_h, fl_to.surface_h[ide])
# thick = np.append(thick, thick[-1])
# section = np.append(section, section[-1])
# elif self.do_calving and self.calving_use_limiter:
# # We lower the max possible ice deformation
# # by clipping the surface slope here. It is completely
# # arbitrary but reduces ice deformation at the calving front.
# # I think that in essence, it is also partly
# # a "calving process", because this ice deformation must
# # be less at the calving front. The result is that calving
# # front "free boards" are quite high.
# # Note that 0 is arbitrary, it could be any value below SL
# surface_h = utils.clip_min(surface_h, self.water_level)
#
# # Staggered gradient
# slope_stag[0] = 0
# slope_stag[1:-1] = (surface_h[0:-1] - surface_h[1:]) / dx
# slope_stag[-1] = slope_stag[-2]
#
# # Staggered thick
# thick_stag[1:-1] = (thick[0:-1] + thick[1:]) / 2.
# thick_stag[[0, -1]] = thick[[0, -1]]
#
# if self.sf_func is not None:
# # TODO: maybe compute new shape factors only every year?
# sf = self.sf_func(fl.widths_m, fl.thick, fl.is_rectangular)
# if is_trib:
# # for inflowing tributary, the sf makes no sense
# sf = np.append(sf, 1.)
# sf_stag[1:-1] = (sf[0:-1] + sf[1:]) / 2.
# sf_stag[[0, -1]] = sf[[0, -1]]
#
# # Staggered velocity (Deformation + Sliding)
# # _fd = 2/(N+2) * self.glen_a
# N = self.glen_n
# rhogh = (self.rho*cfg.G*slope_stag)**N
# u_stag[:] = (thick_stag**(N+1)) * self._fd * rhogh * sf_stag**N + \
# (thick_stag**(N-1)) * self.fs * rhogh
#
# # Staggered section
# section_stag[1:-1] = (section[0:-1] + section[1:]) / 2.
# section_stag[[0, -1]] = section[[0, -1]]
#
# # Staggered flux rate
# flux_stag[:] = u_stag * section_stag
#
# # Add boundary condition
# if flux_gate is not None:
# flux_stag[0] = flux_gate(self.yr)
#
# # CFL condition
# if not self.fixed_dt:
# maxu = np.max(np.abs(u_stag))
# if maxu > cfg.FLOAT_EPS:
# cfl_dt = self.cfl_number * dx / maxu
# else:
# cfl_dt = dt
#
# # Update dt only if necessary
# if cfl_dt < dt:
# dt = cfl_dt
# if cfl_dt < self.min_dt:
# raise RuntimeError(
# 'CFL error: required time step smaller '
# 'than the minimum allowed: '
# '{:.1f}s vs {:.1f}s.'.format(cfl_dt, self.min_dt))
#
# # Since we are in this loop, reset the tributary flux
# trib_flux[:] = 0
#
# # We compute MB in this loop, before mass-redistribution occurs,
# # so that MB models which rely on glacier geometry to decide things
# # (like PyGEM) can do wo with a clean glacier state
# mbs.append(self.get_mb(fl.surface_h, self.yr,
# fl_id=fl_id, fls=self.fls))
#
# # Time step
# if self.fixed_dt:
# # change only if step dt is larger than the chosen dt
# if self.fixed_dt < dt:
# dt = self.fixed_dt
#
# # A second loop for the mass exchange
# for fl_id, fl in enumerate(self.fls):
#
# flx_stag = self.flux_stag[fl_id]
# trib_flux = self.trib_flux[fl_id]
# tr = self._tributary_indices[fl_id]
#
# dx = fl.dx_meter
#
# is_trib = tr[0] is not None
#
# # For these we had an additional grid point
# if is_trib:
# flx_stag = flx_stag[:-1]
#
# # Mass-balance
# widths = fl.widths_m
# mb = mbs[fl_id]
#
# # Allow parabolic beds to grow
# mb = dt * mb * np.where((mb > 0.) & (widths == 0), 10., widths)
#
## print('mass balance (m ice for time step):', mb)
#
# ice_thickness = np.where(widths > 0, fl.section / widths, 0)
#
# ice_thickness_plus_mb = ice_thickness + mb
#
# ice_thickness_missing = np.where(ice_thickness_plus_mb > 0, 0, ice_thickness_plus_mb)
#
## print('missing ice thickness w/o flux (m3):', (ice_thickness_missing * widths * dx).sum())
#
#
# # Update section with ice flow and mass balance
# new_section = (fl.section + (flx_stag[0:-1] - flx_stag[1:])*dt/dx +
# trib_flux*dt/dx + mb)
#
# volume_change_unaccounted = np.where(new_section > 0, 0, new_section * dx)
## print(volume_change_unaccounted)
## print('surface h:', surface_h)
## print('slope:', ((surface_h[1:] - surface_h[:-1]) / dx))
## print('dx:', dx)
## print('volume change unaccounted:', volume_change_unaccounted.sum())
#
# # Keep positive values only and store
#
# old_section = np.copy(fl.section)
#
# fl.section = utils.clip_min(new_section, 0)
#
## old_volume = (old_section * dx).sum()
## updated_volume = (fl.section * dx).sum()
## print('\nvolume prior (section * dx):', old_volume)
## print(' volume updated :', updated_volume)
## volume_change = updated_volume - old_volume
## print(' volume change before/after:', updated_volume - old_volume)
##
## vol_change_from_mb = (mb * widths * dx).sum()
## print(' volume change from mb :', vol_change_from_mb)
##
## mb_4timestep = volume_change / (widths * dx).sum() / dt * 365 * 24 * 3600 * 0.9
## print(' mb (mwea) for time step :', mb_4timestep)
#
#
#
#
## vol_change = (mb * widths * dx).sum()
## print('volume change from mb (m3):', vol_change)
## print('difference volume change:', vol_change - (updated_volume - old_volume))
#
#
# # If we use a flux-gate, store the total volume that came in
# self.flux_gate_m3_since_y0 += flx_stag[0] * dt
#
# # Add the last flux to the tributary
# # this works because the lines are sorted in order
# if is_trib:
# # tr tuple: line_index, start, stop, gaussian_kernel
# self.trib_flux[tr[0]][tr[1]:tr[2]] += \
# utils.clip_min(flx_stag[-1], 0) * tr[3]
#
# # --- The rest is for calving only ---
# self.calving_rate_myr = 0.
#
# # If tributary, do calving only if we are not transferring mass
# if is_trib and flx_stag[-1] > 0:
# continue
#
# # No need to do calving in these cases either
# if not self.do_calving or not fl.has_ice():
# continue
#
# # We do calving only if the last glacier bed pixel is below water
# # (this is to avoid calving elsewhere than at the front)
# if fl.bed_h[fl.thick > 0][-1] > self.water_level:
# continue
#
# # We do calving only if there is some ice above wl
# last_above_wl = np.nonzero((fl.surface_h > self.water_level) &
# (fl.thick > 0))[0][-1]
# if fl.bed_h[last_above_wl] > self.water_level:
# continue
#
# # OK, we're really calving
# section = fl.section
#
# # Calving law
# h = fl.thick[last_above_wl]
# d = h - (fl.surface_h[last_above_wl] - self.water_level)
# k = self.calving_k
# q_calving = k * d * h * fl.widths_m[last_above_wl]
# # Add to the bucket and the diagnostics
# fl.calving_bucket_m3 += q_calving * dt
# self.calving_m3_since_y0 += q_calving * dt
# self.calving_rate_myr = (q_calving / section[last_above_wl] *
# cfg.SEC_IN_YEAR)
#
# # See if we have ice below sea-water to clean out first
# below_sl = (fl.surface_h < self.water_level) & (fl.thick > 0)
# to_remove = np.sum(section[below_sl]) * fl.dx_meter
# if 0 < to_remove < fl.calving_bucket_m3:
# # This is easy, we remove everything
# section[below_sl] = 0
# fl.calving_bucket_m3 -= to_remove
# elif to_remove > 0:
# # We can only remove part of if
# section[below_sl] = 0
# section[last_above_wl+1] = ((to_remove - fl.calving_bucket_m3)
# / fl.dx_meter)
# fl.calving_bucket_m3 = 0
#
# # The rest of the bucket might calve an entire grid point
# vol_last = section[last_above_wl] * fl.dx_meter
# if fl.calving_bucket_m3 > vol_last:
# fl.calving_bucket_m3 -= vol_last
# section[last_above_wl] = 0
#
# # We update the glacier with our changes
# fl.section = section
#
# # Next step
# self.t += dt
# return dt
#
# def get_diagnostics(self, fl_id=-1):
# """Obtain model diagnostics in a pandas DataFrame.
# Parameters
# ----------
# fl_id : int
# the index of the flowline of interest, from 0 to n_flowline-1.
# Default is to take the last (main) one
# Returns
# -------
# a pandas DataFrame, which index is distance along flowline (m). Units:
# - surface_h, bed_h, ice_tick, section_width: m
# - section_area: m2
# - slope: -
# - ice_flux, tributary_flux: m3 of *ice* per second
# - ice_velocity: m per second (depth-section integrated)
# """
# import pandas as pd
#
# fl = self.fls[fl_id]
# nx = fl.nx
#
# df = pd.DataFrame(index=fl.dx_meter * np.arange(nx))
# df.index.name = 'distance_along_flowline'
# df['surface_h'] = fl.surface_h
# df['bed_h'] = fl.bed_h
# df['ice_thick'] = fl.thick
# df['section_width'] = fl.widths_m
# df['section_area'] = fl.section
#
# # Staggered
# var = self.slope_stag[fl_id]
# df['slope'] = (var[1:nx+1] + var[:nx])/2
# var = self.flux_stag[fl_id]
# df['ice_flux'] = (var[1:nx+1] + var[:nx])/2
# var = self.u_stag[fl_id]
# df['ice_velocity'] = (var[1:nx+1] + var[:nx])/2
# var = self.shapefac_stag[fl_id]
# df['shape_fac'] = (var[1:nx+1] + var[:nx])/2
#
# # Not Staggered
# df['tributary_flux'] = self.trib_flux[fl_id]
#
# return df
| 47.389222
| 133
| 0.54341
|
4a13597501a4db3bfab9383aa4a3faf1a30ce78d
| 15,717
|
py
|
Python
|
twilio/rest/ip_messaging/v2/service/user/user_binding.py
|
NCPlayz/twilio-python
|
08898a4a1a43b636a64c9e98fbb0b6ee1792c687
|
[
"MIT"
] | null | null | null |
twilio/rest/ip_messaging/v2/service/user/user_binding.py
|
NCPlayz/twilio-python
|
08898a4a1a43b636a64c9e98fbb0b6ee1792c687
|
[
"MIT"
] | 1
|
2021-06-02T00:27:34.000Z
|
2021-06-02T00:27:34.000Z
|
exercise/venv/lib/python3.7/site-packages/twilio/rest/ip_messaging/v2/service/user/user_binding.py
|
assuzzanne/notifications-dispatcher-api
|
81ae0eab417a1dbc0ae6b1778ebfdd71591c3c5b
|
[
"MIT"
] | null | null | null |
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import serialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class UserBindingList(ListResource):
""" """
def __init__(self, version, service_sid, user_sid):
"""
Initialize the UserBindingList
:param Version version: Version that contains the resource
:param service_sid: The SID of the Service that the resource is associated with
:param user_sid: The SID of the User with the binding
:returns: twilio.rest.chat.v2.service.user.user_binding.UserBindingList
:rtype: twilio.rest.chat.v2.service.user.user_binding.UserBindingList
"""
super(UserBindingList, self).__init__(version)
# Path Solution
self._solution = {'service_sid': service_sid, 'user_sid': user_sid, }
self._uri = '/Services/{service_sid}/Users/{user_sid}/Bindings'.format(**self._solution)
def stream(self, binding_type=values.unset, limit=None, page_size=None):
"""
Streams UserBindingInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param UserBindingInstance.BindingType binding_type: The push technology used by the User Binding resources to read
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.chat.v2.service.user.user_binding.UserBindingInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(binding_type=binding_type, page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, binding_type=values.unset, limit=None, page_size=None):
"""
Lists UserBindingInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param UserBindingInstance.BindingType binding_type: The push technology used by the User Binding resources to read
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.chat.v2.service.user.user_binding.UserBindingInstance]
"""
return list(self.stream(binding_type=binding_type, limit=limit, page_size=page_size, ))
def page(self, binding_type=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
"""
Retrieve a single page of UserBindingInstance records from the API.
Request is executed immediately
:param UserBindingInstance.BindingType binding_type: The push technology used by the User Binding resources to read
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of UserBindingInstance
:rtype: twilio.rest.chat.v2.service.user.user_binding.UserBindingPage
"""
params = values.of({
'BindingType': serialize.map(binding_type, lambda e: e),
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return UserBindingPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of UserBindingInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of UserBindingInstance
:rtype: twilio.rest.chat.v2.service.user.user_binding.UserBindingPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return UserBindingPage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a UserBindingContext
:param sid: The SID of the User Binding resource to fetch
:returns: twilio.rest.chat.v2.service.user.user_binding.UserBindingContext
:rtype: twilio.rest.chat.v2.service.user.user_binding.UserBindingContext
"""
return UserBindingContext(
self._version,
service_sid=self._solution['service_sid'],
user_sid=self._solution['user_sid'],
sid=sid,
)
def __call__(self, sid):
"""
Constructs a UserBindingContext
:param sid: The SID of the User Binding resource to fetch
:returns: twilio.rest.chat.v2.service.user.user_binding.UserBindingContext
:rtype: twilio.rest.chat.v2.service.user.user_binding.UserBindingContext
"""
return UserBindingContext(
self._version,
service_sid=self._solution['service_sid'],
user_sid=self._solution['user_sid'],
sid=sid,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.IpMessaging.V2.UserBindingList>'
class UserBindingPage(Page):
""" """
def __init__(self, version, response, solution):
"""
Initialize the UserBindingPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param service_sid: The SID of the Service that the resource is associated with
:param user_sid: The SID of the User with the binding
:returns: twilio.rest.chat.v2.service.user.user_binding.UserBindingPage
:rtype: twilio.rest.chat.v2.service.user.user_binding.UserBindingPage
"""
super(UserBindingPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of UserBindingInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.chat.v2.service.user.user_binding.UserBindingInstance
:rtype: twilio.rest.chat.v2.service.user.user_binding.UserBindingInstance
"""
return UserBindingInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
user_sid=self._solution['user_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.IpMessaging.V2.UserBindingPage>'
class UserBindingContext(InstanceContext):
""" """
def __init__(self, version, service_sid, user_sid, sid):
"""
Initialize the UserBindingContext
:param Version version: Version that contains the resource
:param service_sid: The SID of the Service to fetch the resource from
:param user_sid: The SID of the User with the binding
:param sid: The SID of the User Binding resource to fetch
:returns: twilio.rest.chat.v2.service.user.user_binding.UserBindingContext
:rtype: twilio.rest.chat.v2.service.user.user_binding.UserBindingContext
"""
super(UserBindingContext, self).__init__(version)
# Path Solution
self._solution = {'service_sid': service_sid, 'user_sid': user_sid, 'sid': sid, }
self._uri = '/Services/{service_sid}/Users/{user_sid}/Bindings/{sid}'.format(**self._solution)
def fetch(self):
"""
Fetch a UserBindingInstance
:returns: Fetched UserBindingInstance
:rtype: twilio.rest.chat.v2.service.user.user_binding.UserBindingInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return UserBindingInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
user_sid=self._solution['user_sid'],
sid=self._solution['sid'],
)
def delete(self):
"""
Deletes the UserBindingInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete('delete', self._uri)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.IpMessaging.V2.UserBindingContext {}>'.format(context)
class UserBindingInstance(InstanceResource):
""" """
class BindingType(object):
GCM = "gcm"
APN = "apn"
FCM = "fcm"
def __init__(self, version, payload, service_sid, user_sid, sid=None):
"""
Initialize the UserBindingInstance
:returns: twilio.rest.chat.v2.service.user.user_binding.UserBindingInstance
:rtype: twilio.rest.chat.v2.service.user.user_binding.UserBindingInstance
"""
super(UserBindingInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload['sid'],
'account_sid': payload['account_sid'],
'service_sid': payload['service_sid'],
'date_created': deserialize.iso8601_datetime(payload['date_created']),
'date_updated': deserialize.iso8601_datetime(payload['date_updated']),
'endpoint': payload['endpoint'],
'identity': payload['identity'],
'user_sid': payload['user_sid'],
'credential_sid': payload['credential_sid'],
'binding_type': payload['binding_type'],
'message_types': payload['message_types'],
'url': payload['url'],
}
# Context
self._context = None
self._solution = {
'service_sid': service_sid,
'user_sid': user_sid,
'sid': sid or self._properties['sid'],
}
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: UserBindingContext for this UserBindingInstance
:rtype: twilio.rest.chat.v2.service.user.user_binding.UserBindingContext
"""
if self._context is None:
self._context = UserBindingContext(
self._version,
service_sid=self._solution['service_sid'],
user_sid=self._solution['user_sid'],
sid=self._solution['sid'],
)
return self._context
@property
def sid(self):
"""
:returns: The unique string that identifies the resource
:rtype: unicode
"""
return self._properties['sid']
@property
def account_sid(self):
"""
:returns: The SID of the Account that created the resource
:rtype: unicode
"""
return self._properties['account_sid']
@property
def service_sid(self):
"""
:returns: The SID of the Service that the resource is associated with
:rtype: unicode
"""
return self._properties['service_sid']
@property
def date_created(self):
"""
:returns: The ISO 8601 date and time in GMT when the resource was created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The ISO 8601 date and time in GMT when the resource was last updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def endpoint(self):
"""
:returns: The unique endpoint identifier for the User Binding
:rtype: unicode
"""
return self._properties['endpoint']
@property
def identity(self):
"""
:returns: The string that identifies the resource's User
:rtype: unicode
"""
return self._properties['identity']
@property
def user_sid(self):
"""
:returns: The SID of the User with the binding
:rtype: unicode
"""
return self._properties['user_sid']
@property
def credential_sid(self):
"""
:returns: The SID of the Credential for the binding
:rtype: unicode
"""
return self._properties['credential_sid']
@property
def binding_type(self):
"""
:returns: The push technology to use for the binding
:rtype: UserBindingInstance.BindingType
"""
return self._properties['binding_type']
@property
def message_types(self):
"""
:returns: The Programmable Chat message types the binding is subscribed to
:rtype: unicode
"""
return self._properties['message_types']
@property
def url(self):
"""
:returns: The absolute URL of the User Binding resource
:rtype: unicode
"""
return self._properties['url']
def fetch(self):
"""
Fetch a UserBindingInstance
:returns: Fetched UserBindingInstance
:rtype: twilio.rest.chat.v2.service.user.user_binding.UserBindingInstance
"""
return self._proxy.fetch()
def delete(self):
"""
Deletes the UserBindingInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.IpMessaging.V2.UserBindingInstance {}>'.format(context)
| 34.093275
| 123
| 0.623592
|
4a1359aee43e9473b2bad07a38a5381b7651301c
| 476
|
py
|
Python
|
utils/rq_queryjob.py
|
scopedsecurity/crackq
|
6c2d79b21e3a9dafb6182867f3cac8cbae3c081f
|
[
"MIT"
] | null | null | null |
utils/rq_queryjob.py
|
scopedsecurity/crackq
|
6c2d79b21e3a9dafb6182867f3cac8cbae3c081f
|
[
"MIT"
] | 1
|
2020-06-29T20:07:11.000Z
|
2020-06-29T20:07:11.000Z
|
utils/rq_queryjob.py
|
scopedsecurity/crackq
|
6c2d79b21e3a9dafb6182867f3cac8cbae3c081f
|
[
"MIT"
] | null | null | null |
import datetime
import rq
import sys
from rq import use_connection, Queue
from redis import Redis
if len(sys.argv) < 2:
print('Usage: ./{} <queue-name> <job_id>')
exit(1)
redis_con = Redis('redis', 6379)
redis_q = Queue(sys.argv[1], connection=redis_con)
job = redis_q.fetch_job(sys.argv[2])
print('Description: {}'.format(job.description))
print('Result: {}'.format(job.result))
print('Execution info: {}'.format(job.exc_info))
print('Meta {}'.format(job.meta))
| 22.666667
| 50
| 0.69958
|
4a135a187726caf9103ad1b307d11940a023d05e
| 12,162
|
py
|
Python
|
fossology/__init__.py
|
alpianon/fossology-python
|
c38abcaea507f8ef93aa2901aa6078977bfa2df1
|
[
"MIT"
] | null | null | null |
fossology/__init__.py
|
alpianon/fossology-python
|
c38abcaea507f8ef93aa2901aa6078977bfa2df1
|
[
"MIT"
] | null | null | null |
fossology/__init__.py
|
alpianon/fossology-python
|
c38abcaea507f8ef93aa2901aa6078977bfa2df1
|
[
"MIT"
] | null | null | null |
# Copyright 2019-2021 Siemens AG
# SPDX-License-Identifier: MIT
import re
import logging
import requests
from datetime import date, timedelta
from typing import Dict, List
from fossology.obj import (
Agents,
Upload,
User,
File,
TokenScope,
SearchTypes,
get_options,
)
from fossology.folders import Folders
from fossology.uploads import Uploads
from fossology.jobs import Jobs
from fossology.report import Report
from fossology.exceptions import (
AuthenticationError,
AuthorizationError,
FossologyApiError,
)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def search_headers(
searchType: SearchTypes = SearchTypes.ALLFILES,
upload: Upload = None,
filename: str = None,
tag: str = None,
filesizemin: int = None,
filesizemax: int = None,
license: str = None,
copyright: str = None,
group: str = None,
) -> Dict:
headers = {"searchType": searchType.value}
if upload:
headers["uploadId"] = str(upload.id)
if filename:
headers["filename"] = filename
if tag:
headers["tag"] = tag
if filesizemin:
headers["filesizemin"] = filesizemin
if filesizemax:
headers["filesizemax"] = filesizemax
if license:
headers["license"] = license
if copyright:
headers["copyright"] = copyright
if group:
headers["groupName"] = group
return headers
def fossology_token(
url, username, password, token_name, token_scope=TokenScope.READ, token_expire=None
):
"""Generate an API token using username/password
API endpoint: POST /tokens
:Example:
>>> from fossology import fossology_token
>>> from fossology.obj import TokenScope
>>> token = fossology_token("https://fossology.example.com", "Me", "MyPassword", "MyToken")
:param url: the URL of the Fossology server
:param username: name of the user the token will be generated for
:param password: the password of the user
:param name: the name of the token
:param scope: the scope of the token (default: READ)
:param expire: the expire date of the token (default: max. 30 days)
:type url: string
:type username: string
:type password: string
:type name: string
:type scope: TokenScope (default: TokenScope.READ)
:type expire: string, e.g. 2019-12-25
:return: the new token
:rtype: string
:raises AuthenticationError: if the username or password is incorrect
:raises FossologyApiError: if another error occurs
"""
data = {
"username": username,
"password": password,
"token_name": token_name,
"token_scope": token_scope.value,
}
if token_expire:
data["token_expire"] = token_expire
else:
now = date.today()
data["token_expire"] = str(now + timedelta(days=30))
try:
response = requests.post(url + "/api/v1/tokens", data=data)
if response.status_code == 201:
token = response.json()["Authorization"]
return re.sub("Bearer ", "", token)
elif response.status_code == 404:
description = "Authentication error"
raise AuthenticationError(description, response)
else:
description = "Error while generating new token"
raise FossologyApiError(description, response)
except requests.exceptions.ConnectionError as error:
exit(f"Server {url} does not seem to be running or is unreachable: {error}")
class Fossology(Folders, Uploads, Jobs, Report):
"""Main Fossology API class
Authentication against a running Fossology instance is performed using an API token.
:Example:
>>> from fossology import Fossology
>>> foss = Fossology(FOSS_URL, FOSS_TOKEN, username)
.. note::
The class instantiation exits if the session with the Fossology server
can't be established
:param url: URL of the Fossology instance
:param token: The API token generated using the Fossology UI
:param name: The name of the token owner
:type url: str
:type token: str
:type name: str
:raises AuthenticationError: if the user couldn't be found
"""
def __init__(self, url, token, name):
self.host = url
self.token = token
self.name = name
self.users = list()
self.folders = list()
self.api = f"{self.host}/api/v1"
self.session = requests.Session()
self.session.headers.update({"Authorization": f"Bearer {self.token}"})
self.user = self._auth()
self.version = self.get_version()
self.rootFolder = self.detail_folder(self.user.rootFolderId)
self.folders = self.list_folders()
logger.info(
f"Authenticated as {self.user.name} against {self.host} using API version {self.version}"
)
def _auth(self):
"""Perform the first API request and populate user variables
:return: the authenticated user's details
:rtype: User
:raises AuthenticationError: if the user couldn't be found
"""
self.users = self.list_users()
for user in self.users:
if user.name == self.name:
self.user = user
return self.user
description = f"User {self.name} was not found on {self.host}"
raise AuthenticationError(description)
def close(self):
self.session.close()
def get_version(self):
"""Get API version from the server
API endpoint: GET /version
:return: the API version string
:rtype: string
:raises FossologyApiError: if the REST call failed
"""
response = self.session.get(f"{self.api}/version")
if response.status_code == 200:
return response.json()["version"]
else:
description = "Error while getting API version"
raise FossologyApiError(description, response)
def detail_user(self, user_id):
"""Get details of Fossology user.
API Endpoint: GET /users/{id}
:param id: the ID of the user
:type id: int
:return: the requested user's details
:rtype: User
:raises FossologyApiError: if the REST call failed
"""
response = self.session.get(f"{self.api}/users/{user_id}")
if response.status_code == 200:
user_agents = None
user_details = response.json()
if user_details.get("agents"):
user_agents = Agents.from_json(user_details["agents"])
user = User.from_json(user_details)
user.agents = user_agents
return user
else:
description = f"Error while getting details for user {user_id}"
raise FossologyApiError(description, response)
def list_users(self):
""" List all users from the Fossology instance
API Endpoint: GET /users
:return: the list of users
:rtype: list of User
:raises FossologyApiError: if the REST call failed
"""
response = self.session.get(f"{self.api}/users")
if response.status_code == 200:
users_list = list()
for user in response.json():
if user.get("name") == "Default User":
continue
if user.get("email"):
foss_user = User.from_json(user)
agents = user.get("agents")
if agents:
foss_user.agents = Agents.from_json(agents)
users_list.append(foss_user)
return users_list
else:
description = f"Unable to get a list of users from {self.host}"
raise FossologyApiError(description, response)
def delete_user(self, user):
"""Delete a Fossology user.
API Endpoint: DELETE /users/{id}
:param user: the user to be deleted
:type user: User
:raises FossologyApiError: if the REST call failed
"""
response = self.session.delete(f"{self.api}/users/{user.id}")
if response.status_code == 202:
return
else:
description = f"Error while deleting user {user.name} ({user.id})"
raise FossologyApiError(description, response)
def search(
self,
searchType: SearchTypes = SearchTypes.ALLFILES,
upload: Upload = None,
filename: str = None,
tag: str = None,
filesizemin: int = None,
filesizemax: int = None,
license: str = None,
copyright: str = None,
group: str = None,
):
"""Search for a specific file
API Endpoint: GET /search
:param searchType: Limit search to: directory, allfiles (default), containers
:param upload: Limit search to a specific upload
:param filename: Filename to find, can contain % as wild-card
:param tag: tag to find
:param filesizemin: Min filesize in bytes
:param filesizemax: Max filesize in bytes
:param license: License search filter
:param copyright: Copyright search filter
:param group: the group name to choose while performing search (default: None)
:type searchType: one of SearchTypes Enum
:type upload: Upload
:type filename: string
:type tag: string
:type filesizemin: int
:type filesizemax: int
:type license: string
:type copyright: string
:type group: string
:return: list of items corresponding to the search criteria
:rtype: JSON
:raises FossologyApiError: if the REST call failed
:raises AuthorizationError: if the user can't access the group
"""
headers = search_headers(
searchType,
upload,
filename,
tag,
filesizemin,
filesizemax,
license,
copyright,
group,
)
response = self.session.get(f"{self.api}/search", headers=headers)
if response.status_code == 200:
return response.json()
elif response.status_code == 403:
description = f"Searching {get_options(group)}not authorized"
raise AuthorizationError(description, response)
else:
description = "Unable to get a result with the given search criteria"
raise FossologyApiError(description, response)
def filesearch(
self, filelist: List = [], group: str = None,
):
"""Search for files from hash sum
API Endpoint: POST /filesearch
The response does not generate Python objects yet, the plain JSON data is simply returned.
:param filelist: the list of files (or containers) hashes to search for (default: [])
:param group: the group name to choose while performing search (default: None)
:type filelist: list
:return: list of items corresponding to the search criteria
:type group: string
:rtype: JSON
:raises FossologyApiError: if the REST call failed
:raises AuthorizationError: if the user can't access the group
"""
headers = {}
if group:
headers["groupName"] = group
response = self.session.post(
f"{self.api}/filesearch", headers=headers, json=filelist
)
if response.status_code == 200:
all_files = []
for hash_file in response.json():
if hash_file.get("findings"):
all_files.append(File.from_json(hash_file))
else:
return "Unable to get a result with the given filesearch criteria"
return all_files
elif response.status_code == 403:
description = f"Searching {get_options(group)}not authorized"
raise AuthorizationError(description, response)
else:
description = "Unable to get a result with the given filesearch criteria"
raise FossologyApiError(description, response)
| 32.693548
| 101
| 0.61429
|
4a135ae9bd0248b7d2693db0274eccc1aa18bdc3
| 3,900
|
py
|
Python
|
models/objects/objects.py
|
jdj2261/lets-do-mujoco
|
3786526b89a45b6a9c300ab168ae1bf4c7a32e39
|
[
"MIT"
] | 2
|
2022-01-08T15:58:22.000Z
|
2022-02-21T03:52:26.000Z
|
models/objects/objects.py
|
jdj2261/lets-do-mujoco
|
3786526b89a45b6a9c300ab168ae1bf4c7a32e39
|
[
"MIT"
] | null | null | null |
models/objects/objects.py
|
jdj2261/lets-do-mujoco
|
3786526b89a45b6a9c300ab168ae1bf4c7a32e39
|
[
"MIT"
] | null | null | null |
import copy
import xml.etree.ElementTree as ET
import numpy as np
from models.base import MujocoXML
from utils.mjcf_utils import string_to_array, array_to_string, new_joint, new_geom
from utils.transform_utils import euler2mat, mat2quat
class MujocoObject:
def __init__(self):
pass
@property
def base_xpos(self):
raise NotImplementedError
@property
def base_ori(self):
raise NotImplementedError
def get_collision(self):
raise NotImplementedError
class MujocoXMLObject(MujocoXML, MujocoObject):
"""
MujocoObjects that are loaded from xml files
"""
def __init__(self, fname, name, pos, rot, joints):
MujocoXML.__init__(self, fname)
self.name = name
assert np.array(rot).shape == (3,), "Orientation type is Euler!!"
rot = mat2quat(euler2mat(rot))[[3,0,1,2]]
self._joints = joints
self._base_object = self.worldbody.find("./body")
self._bottom_site = self.worldbody.find("./body/site[@name='bottom_site']")
self._top_site = self.worldbody.find("./body/site[@name='top_site']")
self._horizontal_radius_site = self.worldbody.find("./body/site[@name='horizontal_radius_site']")
self._base_object.set("pos", array_to_string(pos))
self._base_object.set("quat", array_to_string(rot))
self._base_size = None
@property
def bottom_offset(self):
return string_to_array(self._bottom_site.get("pos"))
@property
def top_offset(self):
return string_to_array(self._top_site.get("pos"))
@property
def horizontal_radius(self):
return string_to_array(self._horizontal_radius_site.get("pos"))[0]
@property
def base_object(self):
return self._base_object
@base_object.setter
def base_object(self, base_name):
assert type(base_name) == str
self._base_object = self.worldbody.find("{}".format(base_name))
@property
def base_xpos(self):
return string_to_array(self._base_object.get("pos"))
@base_xpos.setter
def base_xpos(self, pos):
self._base_object.set("pos", array_to_string(pos))
@property
def base_ori(self):
return string_to_array(self._base_object.get("quat"))
@base_ori.setter
def base_ori(self, rot):
assert np.array(rot) == (3,), "Orientation type is Euler!!"
rot = mat2quat(euler2mat(rot))[[3,0,1,2]]
self._base_object.set("quat", array_to_string(rot))
@property
def base_size(self):
return string_to_array(self._base_object.get("size"))
@base_size.setter
def base_size(self, size):
self._base_size = self._base_object.set("size", array_to_string(size))
def get_collision(self):
collision = copy.deepcopy(self.worldbody.find("./body/body[@name='collision']"))
collision.attrib.pop("name")
col_name = self.name+"_col"
geoms = collision.findall("geom")
duplicate_geoms = copy.deepcopy(geoms)
if self.name is not None:
collision.attrib["name"] = col_name
if len(geoms) == 1:
geoms[0].set("name", col_name+"-0")
else:
for i in range(len(geoms)):
geoms[i].set("name", "{}-{}".format(col_name, i))
geom_group = duplicate_geoms[0].get("group")
duplicate_geoms[0].set("group", "1")
if int(geom_group) == 1:
duplicate_geoms[0].set("group", "0")
collision.append(ET.Element("geom", attrib=duplicate_geoms[0].attrib))
collision.set("pos", array_to_string(self.base_xpos))
collision.set("quat", array_to_string(self.base_ori))
if self._joints is not None:
collision.append(new_joint(name=col_name+"_joint", **self._joints[0]))
return collision
| 31.2
| 105
| 0.633333
|
4a135bf18f72570dbe9b822af09b674664c4b1da
| 643
|
py
|
Python
|
cgi/teste_utilidade.py
|
redienhcs/estrelabot
|
69f7d3c287e3b532018f0ea4f377356e00d5522d
|
[
"MIT"
] | null | null | null |
cgi/teste_utilidade.py
|
redienhcs/estrelabot
|
69f7d3c287e3b532018f0ea4f377356e00d5522d
|
[
"MIT"
] | null | null | null |
cgi/teste_utilidade.py
|
redienhcs/estrelabot
|
69f7d3c287e3b532018f0ea4f377356e00d5522d
|
[
"MIT"
] | null | null | null |
import win_unicode_console
win_unicode_console.enable()
#Ler o conteúdo de arquivos e descobrir se a resposta do chatbot foi útil
lista_de_arquivos = {
"eventos.yml",
"historia.yml",
"meio_ambiente.yml",
"perguntas_frequentes.yml"
}
conteudo_arquivos = ""
for arquivo in lista_de_arquivos:
file = open(arquivo,'r', encoding="utf_8_sig")
conteudo_arquivos += file.read()
frase_gerada = "A prefeitura de Estrela se localiza na Rua Júlio de Castilhos, 380"
resultado_busca = conteudo_arquivos.find( frase_gerada)
if resultado_busca > -1 :
print("Segurir preencher o formulário")
pass
print(resultado_busca)
| 23.814815
| 83
| 0.74339
|
4a135def9c26028dfe16f22fc4233131b099d0a9
| 633
|
py
|
Python
|
backend/hqlib/__init__.py
|
ICTU/quality-report
|
f6234e112228ee7cfe6476c2d709fe244579bcfe
|
[
"Apache-2.0"
] | 25
|
2016-11-25T10:41:24.000Z
|
2021-07-03T14:02:49.000Z
|
backend/hqlib/__init__.py
|
ICTU/quality-report
|
f6234e112228ee7cfe6476c2d709fe244579bcfe
|
[
"Apache-2.0"
] | 783
|
2016-09-19T12:10:21.000Z
|
2021-01-04T20:39:15.000Z
|
backend/hqlib/__init__.py
|
ICTU/quality-report
|
f6234e112228ee7cfe6476c2d709fe244579bcfe
|
[
"Apache-2.0"
] | 15
|
2015-03-25T13:52:49.000Z
|
2021-03-08T17:17:56.000Z
|
"""
Copyright 2012-2019 Ministerie van Sociale Zaken en Werkgelegenheid
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
NAME = "HQ"
VERSION = "2.93.9"
| 33.315789
| 72
| 0.777251
|
4a135e09a36dad04e773cb99a5f02c06a4ec498e
| 19,646
|
py
|
Python
|
pycti/entities/opencti_stix_observable.py
|
Cix-16/client-python
|
91fd5a874f9bf3de9d6ae6dc12fecd4791fb3caf
|
[
"Apache-2.0"
] | null | null | null |
pycti/entities/opencti_stix_observable.py
|
Cix-16/client-python
|
91fd5a874f9bf3de9d6ae6dc12fecd4791fb3caf
|
[
"Apache-2.0"
] | null | null | null |
pycti/entities/opencti_stix_observable.py
|
Cix-16/client-python
|
91fd5a874f9bf3de9d6ae6dc12fecd4791fb3caf
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
import json
from pycti.utils.constants import CustomProperties
from pycti.utils.opencti_stix2 import SPEC_VERSION
class StixObservable:
def __init__(self, opencti):
self.opencti = opencti
self.properties = """
id
stix_id_key
entity_type
name
description
observable_value
created_at
updated_at
createdByRef {
node {
id
entity_type
stix_id_key
stix_label
name
alias
description
created
modified
... on Organization {
organization_class
}
}
relation {
id
}
}
tags {
edges {
node {
id
tag_type
value
color
}
relation {
id
}
}
}
markingDefinitions {
edges {
node {
id
entity_type
stix_id_key
definition_type
definition
level
color
created
modified
}
relation {
id
}
}
}
externalReferences {
edges {
node {
id
entity_type
stix_id_key
source_name
description
url
hash
external_id
created
modified
}
relation {
id
}
}
}
indicators {
edges {
node {
id
entity_type
stix_id_key
valid_from
valid_until
score
pattern_type
indicator_pattern
created
modified
}
relation {
id
}
}
}
"""
"""
List StixObservable objects
:param types: the array of types
:param filters: the filters to apply
:param search: the search keyword
:param first: return the first n rows from the after ID (or the beginning if not set)
:param after: ID of the first row
:return List of StixObservable objects
"""
def list(self, **kwargs):
types = kwargs.get("types", None)
filters = kwargs.get("filters", None)
search = kwargs.get("search", None)
first = kwargs.get("first", 500)
after = kwargs.get("after", None)
order_by = kwargs.get("orderBy", None)
order_mode = kwargs.get("orderMode", None)
custom_attributes = kwargs.get("customAttributes", None)
get_all = kwargs.get("getAll", False)
with_pagination = kwargs.get("withPagination", False)
if get_all:
first = 500
self.opencti.log(
"info", "Listing StixObservables with filters " + json.dumps(filters) + "."
)
query = (
"""
query StixObservables($types: [String], $filters: [StixObservablesFiltering], $search: String, $first: Int, $after: ID, $orderBy: StixObservablesOrdering, $orderMode: OrderingMode) {
stixObservables(types: $types, filters: $filters, search: $search, first: $first, after: $after, orderBy: $orderBy, orderMode: $orderMode) {
edges {
node {
"""
+ (custom_attributes if custom_attributes is not None else self.properties)
+ """
}
}
pageInfo {
startCursor
endCursor
hasNextPage
hasPreviousPage
globalCount
}
}
}
"""
)
result = self.opencti.query(
query,
{
"types": types,
"filters": filters,
"search": search,
"first": first,
"after": after,
"orderBy": order_by,
"orderMode": order_mode,
},
)
if get_all:
final_data = []
data = self.opencti.process_multiple(result["data"]["stixObservables"])
final_data = final_data + data
while result["data"]["stixObservables"]["pageInfo"]["hasNextPage"]:
after = result["data"]["stixObservables"]["pageInfo"]["endCursor"]
self.opencti.log("info", "Listing StixObservables after " + after)
result = self.opencti.query(
query,
{
"types": types,
"filters": filters,
"search": search,
"first": first,
"after": after,
"orderBy": order_by,
"orderMode": order_mode,
},
)
data = self.opencti.process_multiple(result["data"]["stixObservables"])
final_data = final_data + data
return final_data
else:
return self.opencti.process_multiple(
result["data"]["stixObservables"], with_pagination
)
"""
Read a StixObservable object
:param id: the id of the StixObservable
:param filters: the filters to apply if no id provided
:return StixObservable object
"""
def read(self, **kwargs):
id = kwargs.get("id", None)
filters = kwargs.get("filters", None)
custom_attributes = kwargs.get("customAttributes", None)
if id is not None:
self.opencti.log("info", "Reading StixObservable {" + id + "}.")
query = (
"""
query StixObservable($id: String!) {
stixObservable(id: $id) {
"""
+ (
custom_attributes
if custom_attributes is not None
else self.properties
)
+ """
}
}
"""
)
result = self.opencti.query(query, {"id": id})
return self.opencti.process_multiple_fields(
result["data"]["stixObservable"]
)
elif filters is not None:
result = self.list(filters=filters, customAttributes=custom_attributes)
if len(result) > 0:
return result[0]
else:
return None
else:
self.opencti.log(
"error", "[opencti_stix_observable] Missing parameters: id or filters"
)
return None
"""
Create a Stix-Observable object
:param type: the type of the Observable
:return Stix-Observable object
"""
def create_raw(self, **kwargs):
type = kwargs.get("type", None)
observable_value = kwargs.get("observable_value", None)
description = kwargs.get("description", None)
id = kwargs.get("id", None)
stix_id_key = kwargs.get("stix_id_key", None)
created_by_ref = kwargs.get("createdByRef", None)
marking_definitions = kwargs.get("markingDefinitions", None)
tags = kwargs.get("tags", None)
create_indicator = kwargs.get("createIndicator", False)
if type is not None and observable_value is not None:
self.opencti.log(
"info",
"Creating Stix-Observable {"
+ observable_value
+ "} with indicator at "
+ str(create_indicator)
+ ".",
)
query = """
mutation StixObservableAdd($input: StixObservableAddInput) {
stixObservableAdd(input: $input) {
id
stix_id_key
entity_type
parent_types
}
}
"""
result = self.opencti.query(
query,
{
"input": {
"type": type,
"observable_value": observable_value,
"description": description,
"internal_id_key": id,
"stix_id_key": stix_id_key,
"createdByRef": created_by_ref,
"markingDefinitions": marking_definitions,
"tags": tags,
"createIndicator": create_indicator,
}
},
)
return self.opencti.process_multiple_fields(
result["data"]["stixObservableAdd"]
)
else:
self.opencti.log("error", "Missing parameters: type and observable_value")
"""
Create a Stix-Observable object only if it not exists, update it on request
:param name: the name of the Stix-Observable
:return Stix-Observable object
"""
def create(self, **kwargs):
type = kwargs.get("type", None)
observable_value = kwargs.get("observable_value", None)
description = kwargs.get("description", None)
id = kwargs.get("id", None)
stix_id_key = kwargs.get("stix_id_key", None)
created_by_ref = kwargs.get("createdByRef", None)
marking_definitions = kwargs.get("markingDefinitions", None)
tags = kwargs.get("tags", None)
create_indicator = kwargs.get("createIndicator", False)
update = kwargs.get("update", False)
custom_attributes = """
id
entity_type
description
createdByRef {
node {
id
}
}
"""
object_result = self.read(
filters=[{"key": "observable_value", "values": [observable_value]}],
customAttributes=custom_attributes,
)
if object_result is not None:
if update or object_result["createdByRefId"] == created_by_ref:
if (
description is not None
and object_result["description"] != "description"
):
self.update_field(
id=object_result["id"], key="description", value=description
)
object_result["description"] = description
return object_result
else:
return self.create_raw(
type=type,
observable_value=observable_value,
description=description,
id=id,
stix_id_key=stix_id_key,
createdByRef=created_by_ref,
markingDefinitions=marking_definitions,
tags=tags,
createIndicator=create_indicator,
)
"""
Update a Stix-Observable object field
:param id: the Stix-Observable id
:param key: the key of the field
:param value: the value of the field
:return The updated Stix-Observable object
"""
def update_field(self, **kwargs):
id = kwargs.get("id", None)
key = kwargs.get("key", None)
value = kwargs.get("value", None)
if id is not None and key is not None and value is not None:
self.opencti.log(
"info", "Updating Stix-Observable {" + id + "} field {" + key + "}."
)
query = """
mutation StixObservableEdit($id: ID!, $input: EditInput!) {
stixObservableEdit(id: $id) {
fieldPatch(input: $input) {
id
}
}
}
"""
result = self.opencti.query(
query, {"id": id, "input": {"key": key, "value": value}}
)
return self.opencti.process_multiple_fields(
result["data"]["stixObservableEdit"]["fieldPatch"]
)
else:
self.opencti.log(
"error",
"[opencti_stix_observable_update_field] Missing parameters: id and key and value",
)
return None
"""
Delete a Stix-Observable
:param id: the Stix-Observable id
:return void
"""
def delete(self, **kwargs):
id = kwargs.get("id", None)
if id is not None:
self.opencti.log("info", "Deleting Stix-Observable {" + id + "}.")
query = """
mutation StixObservableEdit($id: ID!) {
stixObservableEdit(id: $id) {
delete
}
}
"""
self.opencti.query(query, {"id": id})
else:
self.opencti.log(
"error", "[opencti_stix_observable_delete] Missing parameters: id"
)
return None
"""
Update the Identity author of a Stix-Observable object (created_by_ref)
:param id: the id of the Stix-Observable
:param identity_id: the id of the Identity
:return Boolean
"""
def update_created_by_ref(self, **kwargs):
id = kwargs.get("id", None)
stix_entity = kwargs.get("entity", None)
identity_id = kwargs.get("identity_id", None)
if id is not None and identity_id is not None:
if stix_entity is None:
custom_attributes = """
id
createdByRef {
node {
id
entity_type
stix_id_key
stix_label
name
alias
description
created
modified
... on Organization {
organization_class
}
}
relation {
id
}
}
"""
stix_entity = self.read(id=id, customAttributes=custom_attributes)
if stix_entity is None:
self.opencti.log(
"error", "Cannot update created_by_ref, entity not found"
)
return False
current_identity_id = None
current_relation_id = None
if stix_entity["createdByRef"] is not None:
current_identity_id = stix_entity["createdByRef"]["id"]
current_relation_id = stix_entity["createdByRef"]["remote_relation_id"]
# Current identity is the same
if current_identity_id == identity_id:
return True
else:
self.opencti.log(
"info",
"Updating author of Stix-Entity {"
+ id
+ "} with Identity {"
+ identity_id
+ "}",
)
# Current identity is different, delete the old relation
if current_relation_id is not None:
query = """
mutation StixObservableEdit($id: ID!, $relationId: ID!) {
stixObservableEdit(id: $id) {
relationDelete(relationId: $relationId) {
id
}
}
}
"""
self.opencti.query(
query, {"id": id, "relationId": current_relation_id}
)
# Add the new relation
query = """
mutation StixObservableEdit($id: ID!, $input: RelationAddInput) {
stixObservableEdit(id: $id) {
relationAdd(input: $input) {
id
}
}
}
"""
variables = {
"id": id,
"input": {
"fromRole": "so",
"toId": identity_id,
"toRole": "creator",
"through": "created_by_ref",
},
}
self.opencti.query(query, variables)
else:
self.opencti.log("error", "Missing parameters: id and identity_id")
return False
"""
Export an Stix Observable object in STIX2
:param id: the id of the Stix Observable
:return Stix Observable object
"""
def to_stix2(self, **kwargs):
id = kwargs.get("id", None)
mode = kwargs.get("mode", "simple")
max_marking_definition_entity = kwargs.get(
"max_marking_definition_entity", None
)
entity = kwargs.get("entity", None)
if id is not None and entity is None:
entity = self.read(id=id)
if entity is not None:
stix_observable = dict()
stix_observable["id"] = entity["stix_id_key"]
stix_observable["type"] = entity["entity_type"]
stix_observable["spec_version"] = SPEC_VERSION
stix_observable["value"] = entity["observable_value"]
stix_observable[CustomProperties.OBSERVABLE_TYPE] = entity["entity_type"]
stix_observable[CustomProperties.OBSERVABLE_VALUE] = entity[
"observable_value"
]
stix_observable["created"] = self.opencti.stix2.format_date(
entity["created_at"]
)
stix_observable["modified"] = self.opencti.stix2.format_date(
entity["updated_at"]
)
stix_observable[CustomProperties.ID] = entity["id"]
return self.opencti.stix2.prepare_export(
entity, stix_observable, mode, max_marking_definition_entity
)
else:
self.opencti.log("error", "Missing parameters: id or entity")
| 35.082143
| 194
| 0.438664
|
4a1360709325241b35168f2e3652e0a4ea3dd626
| 777
|
py
|
Python
|
education4less/users/migrations/0001_initial.py
|
xarielx/education4less
|
24de746d859a859a7413917fcbbb2dcdaa440521
|
[
"MIT"
] | 1
|
2020-02-05T02:21:25.000Z
|
2020-02-05T02:21:25.000Z
|
education4less/users/migrations/0001_initial.py
|
xarielx/education4less
|
24de746d859a859a7413917fcbbb2dcdaa440521
|
[
"MIT"
] | 8
|
2020-02-12T03:03:26.000Z
|
2022-02-10T09:56:49.000Z
|
education4less/users/migrations/0001_initial.py
|
xarielx/education4less
|
24de746d859a859a7413917fcbbb2dcdaa440521
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.4 on 2019-12-05 09:40
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(default='default.jpg', upload_to='profile_pics')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 29.884615
| 121
| 0.646075
|
4a1360cf3427a730a4467c5d1059d09a85ef3175
| 54
|
py
|
Python
|
pyqt_custom_titlebar_setter/__init__.py
|
yjg30737/pyqt_custom_titlebar_setter
|
af0e41d4f3a11d37d085cf2226b8baf9d74d18ef
|
[
"MIT"
] | null | null | null |
pyqt_custom_titlebar_setter/__init__.py
|
yjg30737/pyqt_custom_titlebar_setter
|
af0e41d4f3a11d37d085cf2226b8baf9d74d18ef
|
[
"MIT"
] | null | null | null |
pyqt_custom_titlebar_setter/__init__.py
|
yjg30737/pyqt_custom_titlebar_setter
|
af0e41d4f3a11d37d085cf2226b8baf9d74d18ef
|
[
"MIT"
] | null | null | null |
from .customTitlebarSetter import CustomTitlebarSetter
| 54
| 54
| 0.925926
|
4a1360f7dd8efb2a0a84610055d6b28bcb58856c
| 3,919
|
py
|
Python
|
app/api/v1/answers/routes.py
|
Adoniswalker/StackOverflow-lite
|
3679d1e8bf23aca73338e0fa9ff51de63ba85f7d
|
[
"MIT"
] | 16
|
2018-08-11T13:07:07.000Z
|
2021-04-06T00:44:34.000Z
|
app/api/v1/answers/routes.py
|
PeterCHK/stackoverflow-lite
|
1d3e46bce494235d608fbec68925ee88b3b83513
|
[
"MIT"
] | 13
|
2018-08-14T12:48:38.000Z
|
2021-06-01T22:31:53.000Z
|
app/api/v1/answers/routes.py
|
PeterCHK/stackoverflow-lite
|
1d3e46bce494235d608fbec68925ee88b3b83513
|
[
"MIT"
] | 14
|
2018-08-10T11:05:13.000Z
|
2021-12-24T13:10:41.000Z
|
from flask import Blueprint, request, make_response, jsonify, session
from flask.views import MethodView
from app.answers.models import Answer
from app.questions.models import Question
from ....utils import jwt_required
answers_blueprint = Blueprint('answers', __name__)
class AnswersAPIView(MethodView):
@jwt_required
def put(self, question_id=None, answer_id=None):
data = request.get_json(force=True)
data['question_id'] = question_id
data['answer_id'] = answer_id
data['user_id'] = session.get('user_id')
response = Answer(data).update()
if response.get('errors'):
response_object = {
'message': response.get('errors')
}
return make_response(jsonify(response_object)), 400
response_object = {
'message': 'Update successful'
}
return make_response(jsonify(response_object)), 200
@jwt_required
def delete(self, question_id=None, answer_id=None):
data = dict()
data['question_id'] = question_id
data['answer_id'] = answer_id
data['user_id'] = session.get('user_id')
answer = Answer(data)
# check permission
if len(answer.answer_author()) < 1:
response_object = {
'message': 'Unauthorized'
}
return make_response(jsonify(response_object)), 401
response = answer.delete()
if not response:
response_object = {
'message': 'Answer id does not exist'
}
return make_response(jsonify(response_object)), 400
response_object = {
'message': 'Answer deleted successful'
}
return make_response(jsonify(response_object)), 200
@jwt_required
def post(self, question_id=None):
data = request.get_json(force=True)
data['question_id'], data['user_id'] = question_id, session.get('user_id')
answer = Answer(data)
response = answer.save()
if response:
response_object = {'message': response}
return make_response(jsonify(response_object)), 201
response_object = {
'message': 'Unknown question id. Try a different id.'
}
return make_response(jsonify(response_object)), 400
class AnswersListAPIView(MethodView):
"""
List API Resource
"""
def get(self, answer_id=None):
data = dict()
data['answer_id'] = answer_id
data['user_id'] = session.get('user_id')
if answer_id:
results = Answer(data).filter_by()
if len(results) < 1:
response_object = {
'results': 'Answer not found'
}
return make_response(jsonify(response_object)), 404
response_object = {
'results': results
}
return (jsonify(response_object)), 200
response_object = {'results': Answer(data).query()}
return (jsonify(response_object)), 200
# Define the API resources
create_view = AnswersAPIView.as_view('create_api')
list_view = AnswersListAPIView.as_view('list_api')
# Add Rules for API Endpoints
answers_blueprint.add_url_rule(
'/api/v1/questions/<string:question_id>/answers',
view_func=create_view,
methods=['POST']
)
answers_blueprint.add_url_rule(
'/api/v1/questions/<string:question_id>/answers/<string:answer_id>',
view_func=create_view,
methods=['PUT', 'DELETE']
)
answers_blueprint.add_url_rule(
'/api/v1/questions/answers/<string:answer_id>',
view_func=create_view,
methods=['PUT', 'DELETE']
)
answers_blueprint.add_url_rule(
'/api/v1/questions/answers',
view_func=list_view,
methods=['GET']
)
answers_blueprint.add_url_rule(
'/api/v1/questions/answers/<string:answer_id>',
view_func=list_view,
methods=['GET']
)
| 30.858268
| 82
| 0.620822
|
4a1365a5daf7dc673402aedad52f4f17dfc588d3
| 59
|
py
|
Python
|
theme_rc.py
|
pm-str/CountDown-More
|
90eed19b3d5e417d474f1d79e07c6740f5a9a53d
|
[
"MIT"
] | null | null | null |
theme_rc.py
|
pm-str/CountDown-More
|
90eed19b3d5e417d474f1d79e07c6740f5a9a53d
|
[
"MIT"
] | null | null | null |
theme_rc.py
|
pm-str/CountDown-More
|
90eed19b3d5e417d474f1d79e07c6740f5a9a53d
|
[
"MIT"
] | null | null | null |
# proxy file for theme modules
from static.theme import *
| 14.75
| 30
| 0.762712
|
4a1365f334d8bcc888c44e6e6695b4e52df6b8aa
| 3,718
|
py
|
Python
|
src/data.py
|
waterminer/pixivSpider
|
7a9beb3dd150850b9dec67e170fc89781d684f7f
|
[
"MIT"
] | 2
|
2021-05-12T10:02:05.000Z
|
2021-05-15T17:21:57.000Z
|
src/data.py
|
waterminer/pixivSpider
|
7a9beb3dd150850b9dec67e170fc89781d684f7f
|
[
"MIT"
] | null | null | null |
src/data.py
|
waterminer/pixivSpider
|
7a9beb3dd150850b9dec67e170fc89781d684f7f
|
[
"MIT"
] | 1
|
2021-05-15T16:31:32.000Z
|
2021-05-15T16:31:32.000Z
|
from _datetime import datetime
from bs4 import BeautifulSoup
from src import connect
import os
import re
import json
# 定义常量
now = datetime.now()
time = now.strftime("%Y%m%d_%H%M%S")
date_now = now.strftime("%Y%m%d")
path = './Downloads'
# 提取规则
rank_re = re.compile(r'data-rank="(.*?)"')
date_re = re.compile(r'data-date="(.*?)"')
title_re = re.compile(r"(?<=data-title=\"|data-title=\')(.*)(?=\" data-user-name=|\' data-user-name=)")
artist_re = re.compile(r'data-user-name="(.*?)"')
view_count_re = re.compile(r'data-view-count="(.*?)"')
id_re = re.compile(r'data-id="(.*?)"')
original_re = re.compile(r'"original":"(.*?)"')
ext_re = re.compile(r'(jpg|png|gif)')
# 获取榜单的方法
def get_rank(proxies, num, database={}):
for i in range(1, num + 1):
i = str(i)
rank_url = 'https://www.pixiv.net/ranking.php?p=' + i
req = connect.ask_url(rank_url, proxies)
rank_bs = BeautifulSoup(req.text, "lxml")
for section in rank_bs.find_all("section", class_="ranking-item"):
item = str(section)
rank = re.findall(rank_re, item)[0]
artworks_id = re.findall(id_re, item)[0]
title = re.findall(title_re, item)[0]
artist = re.findall(artist_re, item)[0]
date = re.findall(date_re, item)[0]
view = re.findall(view_count_re, item)[0]
item_data = {
'rank': rank,
'id': artworks_id,
'title': title,
'artist': artist,
'date': date,
'view': view
}
database[artworks_id] = item_data
'''
# 以下用于检查输出结果
print("#" + rank + "\ntitle: " + title + "\nartist: " + artist + "\nid: " + id + "\ndate: " + date +
"\nview: " + view + "\n")
'''
return database
# 保存图片的方法
def save(picture, name, ext):
print("正在保存这张图: " + name)
if not os.path.exists(path):
os.makedirs('Downloads')
save_path = path + '/' + str(name) + '.' + ext
with open(save_path, 'wb') as fp:
try:
fp.write(picture)
except Exception as e:
print(e)
fp.close()
# 下载榜单图片的方法
def get_rank_picture_source(database, proxies, switch=0):
for artworks_id in database:
url = 'https://www.pixiv.net/ajax/illust/' + artworks_id + '/pages?lang=zh'
req = connect.ask_url(url, proxies)
json_obj = json.loads(json.dumps(req.json()))
i = 0
if switch == 0:
url = json_obj['body'][0]['urls']['original']
ext = re.findall(ext_re, url)[0]
picture = connect.ask_url(url, proxies)
name = str(artworks_id + "_" + str(i))
save(picture.content, name, ext)
elif switch == 1:
for urls_list in json_obj['body']:
url = urls_list['urls']['original']
ext = re.findall(ext_re, url)[0]
picture = connect.ask_url(url, proxies)
name = str(artworks_id + "_" + str(i))
save(picture.content, name, ext)
i = i + 1
else:
print("选项错误!")
exit(1)
# 下载原图的方法
def get_picture_source(artworks_id, proxies):
artworks_id = str(artworks_id)
url = 'https://www.pixiv.net/ajax/illust/' + artworks_id + '/pages?lang=zh'
req = connect.ask_url(url, proxies)
json_obj = json.loads(json.dumps(req.json()))
i = 0
for urls_list in json_obj['body']:
url = urls_list['urls']['original']
ext = re.findall(ext_re, url)[0]
picture = connect.ask_url(url, proxies)
save(picture.content, artworks_id + "_" + str(i), ext)
i = i + 1
| 34.110092
| 113
| 0.541958
|
4a136638f0ed8519183ccc86783c80893dfd656e
| 1,857
|
py
|
Python
|
corehq/apps/export/tests/test_inferred_schema.py
|
dimagilg/commcare-hq
|
ea1786238eae556bb7f1cbd8d2460171af1b619c
|
[
"BSD-3-Clause"
] | 471
|
2015-01-10T02:55:01.000Z
|
2022-03-29T18:07:18.000Z
|
corehq/apps/export/tests/test_inferred_schema.py
|
dimagilg/commcare-hq
|
ea1786238eae556bb7f1cbd8d2460171af1b619c
|
[
"BSD-3-Clause"
] | 14,354
|
2015-01-01T07:38:23.000Z
|
2022-03-31T20:55:14.000Z
|
corehq/apps/export/tests/test_inferred_schema.py
|
dimagilg/commcare-hq
|
ea1786238eae556bb7f1cbd8d2460171af1b619c
|
[
"BSD-3-Clause"
] | 175
|
2015-01-06T07:16:47.000Z
|
2022-03-29T13:27:01.000Z
|
from django.test import SimpleTestCase
from corehq.apps.export.models import (
ExportItem,
InferredExportGroupSchema,
InferredSchema,
PathNode,
)
class InferredSchemaTest(SimpleTestCase):
def test_put_group_schema(self):
schema = InferredSchema(
domain='inferred-domain',
case_type='inferred',
)
group_schema = schema.put_group_schema(
[PathNode(name='random'), PathNode(name='table')],
)
self.assertTrue(isinstance(group_schema, InferredExportGroupSchema))
self.assertTrue(group_schema.inferred)
self.assertEqual(len(schema.group_schemas), 1)
# After putting same group schema, should not re-add group schema
group_schema = schema.put_group_schema(
[PathNode(name='random'), PathNode(name='table')],
)
self.assertEqual(len(schema.group_schemas), 1)
class InferredExportGroupSchemaTest(SimpleTestCase):
def test_put_export_item(self):
group_schema = InferredExportGroupSchema(
path=[PathNode(name='random')]
)
item = group_schema.put_item(
[PathNode(name='random'), PathNode(name='inferred')],
inferred_from='Test',
)
item = group_schema.put_item(
[PathNode(name='random'), PathNode(name='inferred')],
inferred_from='TestTwo',
)
self.assertTrue(isinstance(item, ExportItem))
self.assertTrue(item.inferred)
self.assertEqual(item.inferred_from, set(['Test', 'TestTwo']))
self.assertEqual(len(group_schema.items), 1)
# After putting same item, should not re-add group schema
item = group_schema.put_item(
[PathNode(name='random'), PathNode(name='inferred')],
)
self.assertEqual(len(group_schema.items), 1)
| 32.578947
| 76
| 0.640819
|
4a136788a27cc22dc5de95c46187fc6538a13320
| 8,528
|
py
|
Python
|
samples/2D_mixing_nobubble/case.py
|
henryleberre/MFC-develop
|
59e94af2d4304b41c9b52280dfd99a300e2664ec
|
[
"MIT"
] | 2
|
2022-02-26T16:00:42.000Z
|
2022-02-28T23:25:06.000Z
|
samples/2D_mixing_nobubble/case.py
|
henryleberre/MFC-develop
|
59e94af2d4304b41c9b52280dfd99a300e2664ec
|
[
"MIT"
] | 44
|
2021-12-07T04:36:24.000Z
|
2022-03-30T16:29:00.000Z
|
samples/2D_mixing_nobubble/case.py
|
henryleberre/MFC
|
0638f7766ead02a69c386d2acd25af8b85f7d194
|
[
"MIT"
] | 2
|
2021-12-07T18:59:08.000Z
|
2021-12-07T19:37:14.000Z
|
#!/usr/bin/env python2
import math
import json
# x0 = 10.E-06
x0 = 1.
p0 = 101325.
rho0 = 1000.
u0 = math.sqrt( p0/rho0 )
c0 = 1475.
n_tait = 7.1
B_tait = 306.E+06 / p0
gamma_gas = 1.4
# Velocity
uu = 4. / u0
#Cavitation number
Ca = 1.
Ly = 0.5/x0
Lx = Ly*2/x0
Ny = 79
Nx = Ny*2+1
dx = Lx/float(Nx)
dy = Ly/float(Ny)
# Time stepping parameters
cfl = 0.3
dt = cfl*dx/(c0/u0)
T = 15.
Ntfinal = int(T/dt)
Ntrestart = int(Ntfinal/5.)
# Init
# t_start = 0
# Nfiles = 5E1
# t_save = int(math.ceil(Ntrestart/float(Nfiles)))
# Nt = t_save*Nfiles
# Ntrestart = Nt
# bc_y = 8
# if restart_name == 'run':
# Simulate
t_start = 0
# t_start = Ntrestart
Nfiles = 5E2
t_save = int(math.ceil((Ntfinal-t_start)/float(Nfiles)))
Nt = t_save*Nfiles
bc_y = 5
# elif restart_name != 'init':
# sys.exit("incorrect restart parameter")
ang = 1.
myr0 = 1.E+00
vf0 = 1.E-12
# ==============================================================================
# Configuring case dictionary
print(json.dumps({
# Logistics ================================================================
'case_dir' : '\'.\'',
'run_time_info' : 'T',
# ==========================================================================
# Computational Domain Parameters ==========================================
'x_domain%beg' : -Lx/2.,
'x_domain%end' : Lx/2.,
'y_domain%beg' : -Ly/2.,
'y_domain%end' : Ly/2.,
'cyl_coord' : 'F',
'm' : Nx,
'n' : Ny,
'p' : 0,
'dt' : dt,
't_step_start' : t_start,
't_step_stop' : Nt,
't_step_save' : t_save ,
# ==========================================================================
# Simulation Algorithm Parameters ==========================================
'num_patches' : 2,
'model_eqns' : 2,
'alt_soundspeed' : 'F',
'num_fluids' : 2,
'adv_alphan' : 'T',
'mpp_lim' : 'T',
'mixture_err' : 'T',
'time_stepper' : 3,
'weno_vars' : 2,
'weno_order' : 3,
'weno_eps' : 1.E-16,
'mapped_weno' : 'T',
'null_weights' : 'F',
# 'mp_weno' : 'T',
'weno_Re_flux' : 'F',
'riemann_solver' : 2,
'wave_speeds' : 1,
'avg_state' : 2,
'bc_x%beg' : -1,
'bc_x%end' : -1,
'bc_y%beg' : -bc_y,
'bc_y%end' : -bc_y,
# ==========================================================================
# Formatted Database Files Structure Parameters ============================
'format' : 1,
'precision' : 2,
'prim_vars_wrt' :'T',
'parallel_io' :'T',
'probe_wrt' :'T',
'num_probes' : 1,
'probe(1)%x' : 0.,
'probe(1)%y' : 0.,
'fd_order' : 1,
# 'schlieren_wrt' :'F',
# ==========================================================================
# Patch 1 ==================================================================
'patch_icpp(1)%geometry' : 3,
'patch_icpp(1)%x_centroid' : 0.,
'patch_icpp(1)%y_centroid' : 0.,
'patch_icpp(1)%length_x' : Lx,
'patch_icpp(1)%length_y' : Ly,
'patch_icpp(1)%alpha_rho(1)' : (1.-vf0)*1.,
'patch_icpp(1)%alpha_rho(2)' : (1.-vf0)*1.E-12,
# 'patch_icpp(1)%alpha_rho(3)' : vf0*1.E-3,
'patch_icpp(1)%alpha(1)' : (1-vf0)*1.,
'patch_icpp(1)%alpha(2)' : (1-vf0)*1.E-12,
# 'patch_icpp(1)%alpha(3)' : vf0,
'patch_icpp(1)%vel(1)' : uu,
'patch_icpp(1)%vel(2)' : 0.00,
'patch_icpp(1)%pres' : 1.00,
'patch_icpp(1)%r0' : 1.E+00,
'patch_icpp(1)%v0' : 0.0E+00,
# ==========================================================================
# Patch 2 ==================================================================
'patch_icpp(2)%geometry' : 4,
'patch_icpp(2)%alter_patch(1)' : 'T',
'patch_icpp(2)%x_centroid' : 0.,
'patch_icpp(2)%y_centroid' : 0.,
'patch_icpp(2)%normal(1)' : math.sin(math.pi*ang/180.),
'patch_icpp(2)%normal(2)' : -math.cos(math.pi*ang/180.),
'patch_icpp(2)%alpha_rho(1)' : (1.-vf0)*1.E-12,
'patch_icpp(2)%alpha_rho(2)' : (1.-vf0)*1.,
# 'patch_icpp(2)%alpha_rho(3)' : vf0*1.E-3,
'patch_icpp(2)%alpha(1)' : (1-vf0)*1.E-12,
'patch_icpp(2)%alpha(2)' : (1-vf0)*1.,
# 'patch_icpp(2)%alpha(3)' : vf0,
'patch_icpp(2)%vel(1)' : -1.*uu,
'patch_icpp(2)%vel(2)' : 0.0,
'patch_icpp(2)%pres' : 1.0,
# 'patch_icpp(2)%r0' : 1.E+00,
# 'patch_icpp(2)%v0' : 0.0E+00,
# 'patch_icpp(2)%normal(1)' : 0.00624987793326E+00,
# 'patch_icpp(2)%normal(2)' :-0.99998046932219E+00,
# 'patch_icpp(2)%length_x' : Lx,
# 'patch_icpp(2)%length_y' : Ly/2.,
# ==========================================================================
# # ========================================================================
# 'patch_icpp(3)%geometry' : 2,
# 'patch_icpp(3)%x_centroid' : 5.E-01,
# 'patch_icpp(3)%y_centroid' : 0.5,
# 'patch_icpp(3)%radius' : 0.1,
# 'patch_icpp(3)%alter_patch(1)' : 'T',
# 'patch_icpp(3)%alter_patch(2)' : 'T',
# 'patch_icpp(3)%alpha_rho(1)' : (1-vf0)*1.E+00,
# 'patch_icpp(3)%vel(1)' : 0.00,
# 'patch_icpp(3)%vel(2)' : 0.00,
# 'patch_icpp(3)%pres' : 1.00,
# 'patch_icpp(3)%alpha(1)' : vf0,
# 'patch_icpp(3)%r0' : 1.E+00,
# 'patch_icpp(3)%v0' : 0.0E+00,
# # ========================================================================
# SHB: Bubbles ============================================================
# 'perturb_flow' : 'T',
# 'perturb_flow_fluid' : 1,
# =========================================================================
# Fluids Physical Parameters ==============================================
# Surrounding liquid
'fluid_pp(1)%gamma' : 1.E+00/(n_tait-1.E+00),
'fluid_pp(1)%pi_inf' : n_tait*B_tait/(n_tait-1.),
'fluid_pp(2)%gamma' : 1.E+00/(n_tait-1.E+00),
'fluid_pp(2)%pi_inf' : n_tait*B_tait/(n_tait-1.),
# 'fluid_pp(3)%gamma' : 1./(gamma_gas-1.),
# 'fluid_pp(3)%pi_inf' : 0.0E+00,
# =========================================================================
# SHB: Tait EOS ===========================================================
# 'pref' : p0,
# 'rhoref' : rho0,
# =========================================================================
# Bubbles =================================================================
#'bubbles' : 'T',
#'bubble_model' : 2,
#'polytropic' : 'T',
## 'polydisperse' : 'T',
#'R0_type' : 1,
##'polydisperse' : 'F',
## 'poly_sigma' : 0.1,
#'thermal' : 3,
#'R0ref' : myr0,
## 'nb' : 3,
#'nb' : 1,
#'Ca' : Ca,
## 'Web' : We,
## 'Re_inv' : Re_inv,
##'qbmm' : 'T',
##'nnode' : 4,
##'dist_type' : 2,
##'sigR' : 0.1,
##'sigV' : 0.1,
##'rhoRV' : 0.0,
## =========================================================================
}))
# ==============================================================================
| 37.403509
| 80
| 0.325985
|
4a1369a0c5855597133e62268eff66efca4d9f03
| 903
|
py
|
Python
|
chap5/surface_3d.py
|
wang420349864/dlcv_for_beginners
|
080c7d3bbb4a68e4fb79e33231ccc666ada16dcc
|
[
"BSD-3-Clause"
] | 1,424
|
2017-01-04T12:08:01.000Z
|
2022-03-31T02:57:24.000Z
|
chap5/surface_3d.py
|
wang420349864/dlcv_for_beginners
|
080c7d3bbb4a68e4fb79e33231ccc666ada16dcc
|
[
"BSD-3-Clause"
] | 39
|
2017-03-16T08:48:28.000Z
|
2021-03-03T11:30:23.000Z
|
chap5/surface_3d.py
|
wang420349864/dlcv_for_beginners
|
080c7d3bbb4a68e4fb79e33231ccc666ada16dcc
|
[
"BSD-3-Clause"
] | 703
|
2017-02-22T19:35:45.000Z
|
2022-03-21T01:31:27.000Z
|
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
np.random.seed(42)
n_grids = 51
c = n_grids / 2
nf = 2
x = np.linspace(0, 1, n_grids)
y = np.linspace(0, 1, n_grids)
X, Y = np.meshgrid(x, y)
spectrum = np.zeros((n_grids, n_grids), dtype=np.complex)
noise = [np.complex(x, y) for x, y in np.random.uniform(-1,1,((2*nf+1)**2/2, 2))]
noisy_block = np.concatenate((noise, [0j], np.conjugate(noise[::-1])))
spectrum[c-nf:c+nf+1, c-nf:c+nf+1] = noisy_block.reshape((2*nf+1, 2*nf+1))
Z = np.real(np.fft.ifft2(np.fft.ifftshift(spectrum)))
fig = plt.figure('3D surface & wire')
ax = fig.add_subplot(1, 2, 1, projection='3d')
ax.plot_surface(X, Y, Z, alpha=0.7, cmap='jet', rstride=1, cstride=1, lw=0)
ax = fig.add_subplot(1, 2, 2, projection='3d')
ax.plot_wireframe(X, Y, Z, rstride=3, cstride=3, lw=0.5)
plt.show()
| 28.21875
| 81
| 0.635659
|
4a1369e5dfaa46fd825829f077c076e1ec0c6352
| 5,526
|
py
|
Python
|
saleor/registration/views.py
|
X10project/rob_photography
|
baaeed11e13d1f4977c24f5f6601b1c6fbcf39b5
|
[
"BSD-3-Clause"
] | 3
|
2015-12-30T19:06:27.000Z
|
2021-10-06T04:23:36.000Z
|
saleor/registration/views.py
|
X10project/rob_photography
|
baaeed11e13d1f4977c24f5f6601b1c6fbcf39b5
|
[
"BSD-3-Clause"
] | null | null | null |
saleor/registration/views.py
|
X10project/rob_photography
|
baaeed11e13d1f4977c24f5f6601b1c6fbcf39b5
|
[
"BSD-3-Clause"
] | 4
|
2019-09-17T11:39:41.000Z
|
2022-01-24T10:22:50.000Z
|
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import login as auth_login, logout as auth_logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import (
login as django_login_view, password_change)
from django.core.urlresolvers import reverse
from django.shortcuts import redirect
from django.template.response import TemplateResponse
from django.utils import timezone
from django.utils.encoding import smart_text
from django.utils.translation import ugettext_lazy as _
from . import forms
from .models import EmailConfirmationRequest, EmailChangeRequest
from . import utils
now = timezone.now
def login(request):
local_host = utils.get_local_host(request)
ctx = {
'facebook_login_url': utils.get_facebook_login_url(local_host),
'google_login_url': utils.get_google_login_url(local_host)}
return django_login_view(request, authentication_form=forms.LoginForm,
extra_context=ctx)
def logout(request):
auth_logout(request)
messages.success(request, _('You have been successfully logged out.'))
return redirect(settings.LOGIN_REDIRECT_URL)
def oauth_callback(request, service):
local_host = utils.get_local_host(request)
form = forms.OAuth2CallbackForm(service=service, local_host=local_host,
data=request.GET)
if form.is_valid():
try:
user = form.get_authenticated_user()
except ValueError as e:
messages.error(request, smart_text(e))
else:
auth_login(request, user=user)
messages.success(request, _('You are now logged in.'))
return redirect(settings.LOGIN_REDIRECT_URL)
else:
for dummy_field, errors in form.errors.items():
for error in errors:
messages.error(request, error)
return redirect('registration:login')
def request_email_confirmation(request):
local_host = utils.get_local_host(request)
form = forms.RequestEmailConfirmationForm(local_host=local_host,
data=request.POST or None)
if form.is_valid():
form.send()
msg = _('Confirmation email has been sent. '
'Please check your inbox.')
messages.success(request, msg)
return redirect(settings.LOGIN_REDIRECT_URL)
return TemplateResponse(request,
'registration/request_email_confirmation.html',
{'form': form})
@login_required
def request_email_change(request):
form = forms.RequestEmailChangeForm(
local_host=utils.get_local_host(request), user=request.user,
data=request.POST or None)
if form.is_valid():
form.send()
msg = _('Confirmation email has been sent. '
'Please check your inbox.')
messages.success(request, msg)
return redirect(settings.LOGIN_REDIRECT_URL)
return TemplateResponse(
request, 'registration/request_email_confirmation.html',
{'form': form})
def confirm_email(request, token):
if not request.POST:
try:
email_confirmation_request = EmailConfirmationRequest.objects.get(
token=token, valid_until__gte=now())
# TODO: cronjob (celery task) to delete stale tokens
except EmailConfirmationRequest.DoesNotExist:
return TemplateResponse(request, 'registration/invalid_token.html')
user = email_confirmation_request.get_authenticated_user()
email_confirmation_request.delete()
auth_login(request, user)
messages.success(request, _('You are now logged in.'))
form = forms.SetOrRemovePasswordForm(user=request.user,
data=request.POST or None)
if form.is_valid():
form.save()
messages.success(request, _('Password has been successfully changed.'))
return redirect(settings.LOGIN_REDIRECT_URL)
return TemplateResponse(
request, 'registration/set_password.html', {'form': form})
def change_email(request, token):
try:
email_change_request = EmailChangeRequest.objects.get(
token=token, valid_until__gte=now())
# TODO: cronjob (celery task) to delete stale tokens
except EmailChangeRequest.DoesNotExist:
return TemplateResponse(request, 'registration/invalid_token.html')
# if another user is logged in, we need to log him out, to allow the email
# owner confirm his identity
if (request.user.is_authenticated() and
request.user != email_change_request.user):
auth_logout(request)
if not request.user.is_authenticated():
query = urlencode({
'next': request.get_full_path(),
'email': email_change_request.user.email})
login_url = utils.url(path=settings.LOGIN_URL, query=query)
return redirect(login_url)
request.user.email = email_change_request.email
request.user.save()
email_change_request.delete()
messages.success(request, _('Your email has been successfully changed'))
return redirect(settings.LOGIN_REDIRECT_URL)
def change_password(request):
return password_change(
request, template_name='registration/change_password.html',
post_change_redirect=reverse('profile:details'))
| 36.84
| 79
| 0.685668
|
4a136b23e54347a6391f282bc63162be739ee2fa
| 193
|
py
|
Python
|
DQMServices/Components/python/DQMScalInfo_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
DQMServices/Components/python/DQMScalInfo_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
DQMServices/Components/python/DQMScalInfo_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
dqmscalInfo = DQMEDAnalyzer('DQMScalInfo',
dqmScalFolder = cms.untracked.string('Scal')
)
| 27.571429
| 56
| 0.80829
|
4a136c243e79c7caddbfed4009eecec520829e08
| 2,788
|
py
|
Python
|
examples/yleareena_example.py
|
CKVal/pychromecast
|
ea179bd5664475273a6b6d46b3266e6858ce3b01
|
[
"MIT"
] | null | null | null |
examples/yleareena_example.py
|
CKVal/pychromecast
|
ea179bd5664475273a6b6d46b3266e6858ce3b01
|
[
"MIT"
] | null | null | null |
examples/yleareena_example.py
|
CKVal/pychromecast
|
ea179bd5664475273a6b6d46b3266e6858ce3b01
|
[
"MIT"
] | null | null | null |
"""
Example on how to use the Yle Areena Controller
"""
# pylint: disable=invalid-name, import-outside-toplevel
import argparse
import logging
import sys
from time import sleep
import zeroconf
import pychromecast
from pychromecast.controllers.yleareena import YleAreenaController
logger = logging.getLogger(__name__)
# Change to the name of your Chromecast
CAST_NAME = "My Chromecast"
parser = argparse.ArgumentParser(
description="Example on how to use the Yle Areena Controller."
)
parser.add_argument("--show-debug", help="Enable debug log", action="store_true")
parser.add_argument(
"--cast", help='Name of cast device (default: "%(default)s")', default=CAST_NAME
)
parser.add_argument("--program", help="Areena Program ID", default="1-50097921")
parser.add_argument("--audio_language", help="audio_language", default="")
parser.add_argument("--text_language", help="text_language", default="off")
args = parser.parse_args()
if args.show_debug:
logging.basicConfig(level=logging.DEBUG)
if args.show_zeroconf_debug:
print("Zeroconf version: " + zeroconf.__version__)
logging.getLogger("zeroconf").setLevel(logging.DEBUG)
def get_kaltura_id(program_id):
"""
Dive into the yledl internals and fetch the kaltura player id.
This can be used with Chromecast
"""
# yledl is not available in CI, silence import warnings
from yledl.streamfilters import StreamFilters # pylint: disable=import-error
from yledl.http import HttpClient # pylint: disable=import-error
from yledl.localization import TranslationChooser # pylint: disable=import-error
from yledl.extractors import extractor_factory # pylint: disable=import-error
from yledl.titleformatter import TitleFormatter # pylint: disable=import-error
title_formatter = TitleFormatter()
language_chooser = TranslationChooser("fin")
httpclient = HttpClient(None)
stream_filters = StreamFilters()
url = "https://areena.yle.fi/{}".format(program_id)
extractor = extractor_factory(url, stream_filters, language_chooser, httpclient)
pid = extractor.program_id_from_url(url)
info = extractor.program_info_for_pid(pid, url, title_formatter, None)
return info.media_id.split("-")[-1]
chromecasts, browser = pychromecast.get_listed_chromecasts(friendly_names=[args.cast])
if not chromecasts:
print('No chromecast with name "{}" discovered'.format(args.cast))
sys.exit(1)
cast = chromecasts[0]
# Start socket client's worker thread and wait for initial status update
cast.wait()
yt = YleAreenaController()
cast.register_handler(yt)
yt.play_areena_media(
get_kaltura_id(args.program),
audio_language=args.audio_language,
text_language=args.text_language,
)
sleep(10)
# Shut down discovery
browser.stop_discovery()
| 31.681818
| 86
| 0.756815
|
4a136c7945d43544a64c7bea17cf0e0972a6ecec
| 5,023
|
py
|
Python
|
xldlib/general/mapping/functions.py
|
Alexhuszagh/XLDiscoverer
|
60937b1f7f2e23af4219eb26519d6b83fb4232d6
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
xldlib/general/mapping/functions.py
|
Alexhuszagh/XLDiscoverer
|
60937b1f7f2e23af4219eb26519d6b83fb4232d6
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
xldlib/general/mapping/functions.py
|
Alexhuszagh/XLDiscoverer
|
60937b1f7f2e23af4219eb26519d6b83fb4232d6
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
'''
General/Mapping/functions
_________________________
Functions (unbound methods in Python 2.x) shared between
mapping objects.
:copyright: (c) 2015 The Regents of the University of California.
:license: GNU GPL, see licenses/GNU GPLv3.txt for more details.
'''
# load modules/submodules
import os
import numpy as np
import six
from xldlib.definitions import Json
from xldlib.utils import serialization
__all__ = [
'load_document',
'save',
'save_changed',
'serializable',
'tojson',
'updatechecker',
'update_setitem'
]
# PUBLIC
# ------
def tojson(python_object):
'''
Custom serializer for the JSON dumps from Python objects
Args:
python_object (object): any python or python-wrapped object
'''
# no native support for numpy-like bools:
# https://bugs.python.org/issue18303
if isinstance(python_object, np.bool_):
return bool(python_object)
# also no support on Python3 for numpy-like ints
# int_ and int do not work, since specify 64 bit precision
elif isinstance(python_object, np.integer):
return int(python_object)
# same thing for floats
elif isinstance(python_object, np.floating):
return float(python_object)
elif six.PY3 and isinstance(python_object, bytes):
return python_object.decode('utf-8')
elif isinstance(python_object, set):
return list(python_object)
elif hasattr(python_object, "_asdict"):
# namedlist type
return list(python_object)
else:
raise TypeError("Unrecognized object {}".format(python_object))
def save_changed(self):
'''Dump the changed JSON configurations to `self.path`'''
if self.path is not None:
_save(self.changed, self.path)
def save(self):
'''Dump the JSON configurations to `self.path`'''
if self.path is not None:
_save(self, self.path)
def load_document(self):
'''
Load object data from `self.path`
Returns (dict): stored object data
'''
if self.path is not None and os.path.exists(self.path):
with open(self.path, "r") as loads:
document = Json.load(loads)
return document
def update_setitem(self, *args, **kwds):
'''
Update conf from dict/iterable in `*args` and keyword
arguments in `**kwds`,
Uses `__setitem__` to allow any method overrides to identify
mapping data changes.
'''
updatechecker(self, args, kwds)
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 1:
other = args[0]
if isinstance(other, dict):
_pairwise_iterator(self, other.items())
elif hasattr(other, "keys"):
_single_iterator(self, other, other.keys())
else:
_pairwise_iterator(self, other)
_pairwise_iterator(self, kwds.items())
def updatechecker(self, args, kwds):
'''Verify `self.update` has been called with suitable arguments'''
if len(args) > 1:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args)+1,))
elif not args and not kwds:
raise TypeError('update() takes at least 1 argument (0 given)')
# SERIALIZATION
# -------------
@serialization.tojson
def to_list(self):
'''
Serialize data for object reconstruction to JSON
Returns (dict): serialized data
'''
return dict(self)
def from_dict(cls, data):
'''
Deserialize JSON data into object constructor
Args:
data (dict, mapping): serialized object data
Returns (object): class instance
'''
return cls(data)
def serializable(name):
'''Add serialization methods to a named sequence'''
def decorator(cls):
'''Add serialization methods to class'''
registered = serialization.register(name)(cls)
registered.__json__ = to_list
registered.loadjson = classmethod(from_dict)
return registered
return decorator
# PRIVATE
# -------
def _save(obj, path):
'''
Serialize `obj` to JSON and dump to `path`.
Args:
obj (JSON-compatible): any JSON-serializable python object
path (str): path to file on disk
'''
with open(path, "w") as dump:
Json.dump(obj, dump, sort_keys=True, indent=4, default=tojson)
def _pairwise_iterator(self, iterable):
'''
Exhaust iterator containing pair-wise elements, that is,
key-value pairs, and sequentially add items to self.
Args:
iterable (iterable): iterable with 2 items per element
'''
for key, value in iterable:
self[key] = value
def _single_iterator(self, other, iterable):
'''
Exhaust iterator containing single elements corresponding
to object keys, and sequentially add key and other[element]
to self.
Args:
iterable (iterable): iterable with a single item per element
'''
for key in iterable:
self[key] = other[key]
| 23.041284
| 71
| 0.644436
|
4a136c7fd1095282daa2a16060348ee5e1cb12a3
| 465
|
py
|
Python
|
fdk_client/platform/models/Rule.py
|
kavish-d/fdk-client-python
|
a1023eb530473322cb52e095fc4ceb226c1e6037
|
[
"MIT"
] | null | null | null |
fdk_client/platform/models/Rule.py
|
kavish-d/fdk-client-python
|
a1023eb530473322cb52e095fc4ceb226c1e6037
|
[
"MIT"
] | null | null | null |
fdk_client/platform/models/Rule.py
|
kavish-d/fdk-client-python
|
a1023eb530473322cb52e095fc4ceb226c1e6037
|
[
"MIT"
] | null | null | null |
"""Platform Models."""
from marshmallow import fields, Schema
from marshmallow.validate import OneOf
from ..enums import *
from ..models.BaseSchema import BaseSchema
class Rule(BaseSchema):
# Cart swagger.json
key = fields.Float(required=False)
value = fields.Float(required=False)
max = fields.Float(required=False)
discount_qty = fields.Float(required=False)
min = fields.Float(required=False)
| 13.676471
| 47
| 0.670968
|
4a136cfb44b6e5433294f2d54f886b3cedb65a0c
| 2,953
|
py
|
Python
|
tests/test_config.py
|
admdev8/darglint
|
d2d0f45861cfe7ed8d0a916eca181b144ed77cba
|
[
"MIT"
] | 1
|
2020-08-30T11:18:40.000Z
|
2020-08-30T11:18:40.000Z
|
tests/test_config.py
|
admdev8/darglint
|
d2d0f45861cfe7ed8d0a916eca181b144ed77cba
|
[
"MIT"
] | 4
|
2020-08-30T11:18:52.000Z
|
2020-08-30T12:18:17.000Z
|
tests/test_config.py
|
Smirenost/darglint
|
d2d0f45861cfe7ed8d0a916eca181b144ed77cba
|
[
"MIT"
] | null | null | null |
"""Tests configuration scripts."""
from random import (
choice,
randint,
)
from string import ascii_letters
from unittest import (
mock,
TestCase,
)
from darglint.config import (
walk_path,
POSSIBLE_CONFIG_FILENAMES,
find_config_file_in_path,
get_logger,
LogLevel,
)
from .utils import (
ConfigurationContext,
)
class WalkPathTestCase(TestCase):
"""Tests the walk_path function."""
@mock.patch('darglint.config.os.getcwd')
def test_at_root_yields_only_once(self, mock_getcwd):
"""We should only get root once. # noqa"""
mock_getcwd.return_value = '/'
path_walker = walk_path()
self.assertEqual(next(path_walker), '/')
with self.assertRaises(StopIteration):
next(path_walker)
@mock.patch('darglint.config.os.getcwd')
def test_really_long_path(self, mock_getcwd):
directories = [
''.join([
choice(ascii_letters + '_-')
for _ in range(randint(1, 10))
])
for __ in range(randint(10, 30))
]
cwd = '/' + '/'.join(directories)
mock_getcwd.return_value = cwd
path_walker = walk_path()
paths_walked = [x for x in path_walker]
self.assertEqual(
len(paths_walked),
len(directories) + 1,
'Should have had {} but had {} paths.'.format(
len(directories),
len(paths_walked) + 1,
)
)
class FindConfigFileInPathTestCase(TestCase):
"""Test that the config file is being found."""
@mock.patch('darglint.config.configparser.ConfigParser')
@mock.patch('darglint.config.os.listdir')
def test_filename_checked(self, mock_listdir, mock_ConfigParser):
"""Check that only the necessary filenames are identified. # noqa """
fake_files = [
''.join([choice(ascii_letters + '_-')
for _ in range(randint(5, 10))]) for _ in range(10)
]
mock_listdir.return_value = (
fake_files + list(POSSIBLE_CONFIG_FILENAMES)
)
config_parser = mock.MagicMock()
mock_ConfigParser.return_value = config_parser
contents_checked = list()
def read_file(filename):
contents_checked.append(filename)
return mock.MagicMock()
config_parser.read = read_file
find_config_file_in_path('./')
self.assertEqual(
set(contents_checked),
{'./' + x for x in POSSIBLE_CONFIG_FILENAMES}
)
class LoggingTestCase(TestCase):
def test_log_level_set_by_config(self):
with ConfigurationContext():
logger = get_logger()
self.assertEqual(logger.level, LogLevel.CRITICAL.value)
with ConfigurationContext(log_level=LogLevel.ERROR):
logger = get_logger()
self.assertEqual(logger.level, LogLevel.ERROR.value)
| 28.669903
| 78
| 0.608534
|
4a136d0412be7942223f017989366172b2b04c1e
| 2,029
|
py
|
Python
|
library/pyjamas/History.safari.py
|
allbuttonspressed/pyjs
|
c726fdead530eb63ee4763ae15daaa58d84cd58f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
library/pyjamas/History.safari.py
|
allbuttonspressed/pyjs
|
c726fdead530eb63ee4763ae15daaa58d84cd58f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
library/pyjamas/History.safari.py
|
allbuttonspressed/pyjs
|
c726fdead530eb63ee4763ae15daaa58d84cd58f
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2019-11-18T14:17:59.000Z
|
2019-11-18T14:17:59.000Z
|
def init():
JS("""
// Check for existence of the history frame.
var historyFrame = $doc.getElementById('__pygwt_historyFrame');
if (!historyFrame)
return false;
// Get the initial token from the url's hash component.
var hash = $wnd.location.hash;
if (hash.length > 0)
$wnd.__historyToken = decodeURI(hash.substring(1)).replace("%23", "#");
else
$wnd.__historyToken = '';
// Initialize the history iframe. If '__historyToken' already exists, then
// we're probably backing into the app, so _don't_ set the iframe's location.
var tokenElement = null;
if (historyFrame.contentWindow) {
var doc = historyFrame.contentWindow.document;
tokenElement = doc ? doc.getElementById('__historyToken') : null;
}
if (tokenElement)
$wnd.__historyToken = tokenElement.value;
else
historyFrame.src = 'history.html?' + encodeURI($wnd.__historyToken).replace("#", "%23");
// Create the timer that checks the browser's url hash every 1/4 s.
$wnd.__checkHistory = function() {
var token = '', hash = $wnd.location.hash;
if (hash.length > 0)
token = decodeURI(hash.substring(1)).replace("%23", "#");
if (token != $wnd.__historyToken) {
$wnd.__historyToken = token;
@{{newItem}}(token);
@{{onHistoryChanged}}(token);
}
$wnd.setTimeout('__checkHistory()', 250);
};
// Kick off the timer.
$wnd.__checkHistory();
return true;
""")
def newItem(historyToken):
JS("""
// Safari gets into a weird state (issue 2905) when setting the hash
// component of the url to an empty string, but works fine as long as you
// at least add a '#' to the end of the url. So we get around this by
// recreating the url, rather than just setting location.hash.
$wnd.location = $wnd.location.href.split('#')[0] + '#' +
encodeURI(@{{historyToken}}).replace("#", "%23");
""")
| 32.206349
| 96
| 0.60276
|
4a136d37248188bcc810baca093fc6d63d3d2d87
| 1,349
|
py
|
Python
|
requires.py
|
openstack-charmers/charm-interface-neutron-api
|
f3c33764381ae999045b3661b6c7e1d6cce315be
|
[
"Apache-2.0"
] | null | null | null |
requires.py
|
openstack-charmers/charm-interface-neutron-api
|
f3c33764381ae999045b3661b6c7e1d6cce315be
|
[
"Apache-2.0"
] | null | null | null |
requires.py
|
openstack-charmers/charm-interface-neutron-api
|
f3c33764381ae999045b3661b6c7e1d6cce315be
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# the reactive framework unfortunately does not grok `import as` in conjunction
# with decorators on class instance methods, so we have to revert to `from ...`
# imports
from charms.reactive import (
Endpoint,
clear_flag,
set_flag,
when_all,
when_not,
)
class NeutronAPIRequires(Endpoint):
@when_all('endpoint.{endpoint_name}.joined',
'endpoint.{endpoint_name}.changed.neutron-api-ready')
def joined(self):
clear_flag(
self.expand_name(
'endpoint.{endpoint_name}.changed.neutron-api-ready'))
set_flag(self.expand_name('{endpoint_name}.available'))
@when_not('endpoint.{endpoint_name}.joined')
def broken(self):
clear_flag(self.expand_name('{endpoint_name}.available'))
| 34.589744
| 79
| 0.716827
|
4a136ee826a38a5dba36cb8cbd14f7140945d149
| 15,493
|
py
|
Python
|
sichu/cabinet/migrations/0009_auto__add_bookborrowrecord2.py
|
ax003d/sichu_web
|
f01002f169fb5a683996bd5987572d55f1fa7c3b
|
[
"MIT"
] | 55
|
2016-04-05T15:42:21.000Z
|
2018-07-19T07:13:09.000Z
|
sichu/cabinet/migrations/0009_auto__add_bookborrowrecord2.py
|
ax003d/sichu_web
|
f01002f169fb5a683996bd5987572d55f1fa7c3b
|
[
"MIT"
] | null | null | null |
sichu/cabinet/migrations/0009_auto__add_bookborrowrecord2.py
|
ax003d/sichu_web
|
f01002f169fb5a683996bd5987572d55f1fa7c3b
|
[
"MIT"
] | 18
|
2016-04-05T15:40:13.000Z
|
2018-03-15T23:50:27.000Z
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'BookBorrowRecord2'
db.create_table(u'cabinet_bookborrowrecord2', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('ownership', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cabinet.BookOwnership'])),
('borrower', self.gf('django.db.models.fields.CharField')(max_length=64)),
('borrow_date', self.gf('django.db.models.fields.DateTimeField')()),
('returned_date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('remark', self.gf('django.db.models.fields.CharField')(max_length=256, blank=True)),
))
db.send_create_signal(u'cabinet', ['BookBorrowRecord2'])
def backwards(self, orm):
# Deleting model 'BookBorrowRecord2'
db.delete_table(u'cabinet_bookborrowrecord2')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'cabinet.book': {
'Meta': {'object_name': 'Book'},
'author': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'cover': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'douban_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isbn': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'cabinet.bookborrowrecord': {
'Meta': {'object_name': 'BookBorrowRecord'},
'borrow_date': ('django.db.models.fields.DateTimeField', [], {}),
'borrower': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ownership': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cabinet.BookOwnership']"}),
'planed_return_date': ('django.db.models.fields.DateField', [], {'blank': 'True'}),
'returned_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'cabinet.bookborrowrecord2': {
'Meta': {'object_name': 'BookBorrowRecord2'},
'borrow_date': ('django.db.models.fields.DateTimeField', [], {}),
'borrower': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ownership': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cabinet.BookOwnership']"}),
'remark': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'returned_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'cabinet.bookborrowrequest': {
'Meta': {'object_name': 'BookBorrowRequest'},
'bo_ship': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cabinet.BookOwnership']"}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'planed_return_date': ('django.db.models.fields.DateField', [], {}),
'remark': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'requester': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'cabinet.bookcabinet': {
'Meta': {'object_name': 'BookCabinet'},
'books': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['cabinet.BookOwnership']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'remark': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'})
},
u'cabinet.bookcomment': {
'Meta': {'object_name': 'BookComment'},
'book': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cabinet.Book']"}),
'content': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'cabinet.bookownership': {
'Meta': {'object_name': 'BookOwnership'},
'book': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cabinet.Book']"}),
'has_ebook': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'remark': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'1'", 'max_length': '16'}),
'visible': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
u'cabinet.bookownershiptaguse': {
'Meta': {'object_name': 'BookOwnershipTagUse'},
'bookown': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cabinet.BookOwnership']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cabinet.Tag']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'cabinet.booktaguse': {
'Meta': {'unique_together': "(('tag', 'user', 'book'),)", 'object_name': 'BookTagUse'},
'book': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cabinet.Book']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cabinet.Tag']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'cabinet.cabinetnews': {
'Meta': {'ordering': "['-datetime']", 'object_name': 'CabinetNews'},
'datetime': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lead': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'news': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
u'cabinet.ebookrequest': {
'Meta': {'object_name': 'EBookRequest'},
'bo_ship': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cabinet.BookOwnership']"}),
'datetime': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'requester': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'cabinet.feedback': {
'Meta': {'object_name': 'Feedback'},
'content': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'cabinet.follow': {
'Meta': {'unique_together': "(('following', 'user'),)", 'object_name': 'Follow'},
'following': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'follower_set'", 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'remark': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'cabinet.joinrepositoryrequest': {
'Meta': {'object_name': 'JoinRepositoryRequest'},
'datetime': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'remark': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'repo': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cabinet.Repository']"}),
'requester': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'cabinet.repository': {
'Meta': {'object_name': 'Repository'},
'admin': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'managed_repos'", 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'create_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'joined_repos'", 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
u'cabinet.sysbooktaguse': {
'Meta': {'object_name': 'SysBookTagUse'},
'book': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cabinet.Book']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cabinet.Tag']"})
},
u'cabinet.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'})
},
u'cabinet.weibouser': {
'Meta': {'object_name': 'WeiboUser'},
'avatar': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'expires_in': ('django.db.models.fields.BigIntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'screen_name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'uid': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['cabinet']
| 71.396313
| 187
| 0.556122
|
4a136fd5f741d15f9b16bb511ae89b220554c3a9
| 3,817
|
py
|
Python
|
huaweicloud-sdk-drs/huaweicloudsdkdrs/v3/model/batch_reset_password_response.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 64
|
2020-06-12T07:05:07.000Z
|
2022-03-30T03:32:50.000Z
|
huaweicloud-sdk-drs/huaweicloudsdkdrs/v3/model/batch_reset_password_response.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 11
|
2020-07-06T07:56:54.000Z
|
2022-01-11T11:14:40.000Z
|
huaweicloud-sdk-drs/huaweicloudsdkdrs/v3/model/batch_reset_password_response.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 24
|
2020-06-08T11:42:13.000Z
|
2022-03-04T06:44:08.000Z
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class BatchResetPasswordResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'count': 'int',
'results': 'list[ModifyJobResp]'
}
attribute_map = {
'count': 'count',
'results': 'results'
}
def __init__(self, count=None, results=None):
"""BatchResetPasswordResponse - a model defined in huaweicloud sdk"""
super(BatchResetPasswordResponse, self).__init__()
self._count = None
self._results = None
self.discriminator = None
if count is not None:
self.count = count
if results is not None:
self.results = results
@property
def count(self):
"""Gets the count of this BatchResetPasswordResponse.
总数
:return: The count of this BatchResetPasswordResponse.
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this BatchResetPasswordResponse.
总数
:param count: The count of this BatchResetPasswordResponse.
:type: int
"""
self._count = count
@property
def results(self):
"""Gets the results of this BatchResetPasswordResponse.
批量修改任务返回列表
:return: The results of this BatchResetPasswordResponse.
:rtype: list[ModifyJobResp]
"""
return self._results
@results.setter
def results(self, results):
"""Sets the results of this BatchResetPasswordResponse.
批量修改任务返回列表
:param results: The results of this BatchResetPasswordResponse.
:type: list[ModifyJobResp]
"""
self._results = results
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BatchResetPasswordResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.880282
| 79
| 0.565889
|
4a136fde7352293ffac2b706e8b24f755d1ab134
| 4,271
|
py
|
Python
|
Competitive Programming/Array/Rearrange array in alternating positive & negative items with O(1) extra space.py
|
shreejitverma/GeeksforGeeks
|
d7bcb166369fffa9a031a258e925b6aff8d44e6c
|
[
"MIT"
] | 2
|
2022-02-18T05:14:28.000Z
|
2022-03-08T07:00:08.000Z
|
Competitive Programming/Array/Rearrange array in alternating positive & negative items with O(1) extra space.py
|
shivaniverma1/Competitive-Programming-1
|
d7bcb166369fffa9a031a258e925b6aff8d44e6c
|
[
"MIT"
] | 6
|
2022-01-13T04:31:04.000Z
|
2022-03-12T01:06:16.000Z
|
Competitive Programming/Array/Rearrange array in alternating positive & negative items with O(1) extra space.py
|
shivaniverma1/Competitive-Programming-1
|
d7bcb166369fffa9a031a258e925b6aff8d44e6c
|
[
"MIT"
] | 2
|
2022-02-14T19:53:53.000Z
|
2022-02-18T05:14:30.000Z
|
'''https://www.geeksforgeeks.org/rearrange-array-alternating-positive-negative-items-o1-extra-space/
Given an array of positive and negative numbers, arrange them in an alternate fashion such that every positive number is followed by negative and vice-versa maintaining the order of appearance.
Number of positive and negative numbers need not be equal. If there are more positive numbers they appear at the end of the array. If there are more negative numbers, they too appear in the end of the array.
Examples :
Input: arr[] = {1, 2, 3, -4, -1, 4}
Output: arr[] = {-4, 1, -1, 2, 3, 4}
Input: arr[] = {-5, -2, 5, 2, 4, 7, 1, 8, 0, -8}
output: arr[] = {-5, 5, -2, 2, -8, 4, 7, 1, 8, 0}
Naive Approach :
The above problem can be easily solved if O(n) extra space is allowed. It becomes interesting due to the limitations that O(1) extra space and order of appearances.
The idea is to process array from left to right. While processing, find the first out of place element in the remaining unprocessed array. An element is out of place if it is negative and at odd index, or it is positive and at even index. Once we find an out of place element, we find the first element after it with opposite sign. We right rotate the subarray between these two elements (including these two).
Following is the implementation of above idea.
Output
Given array is
-5 -2 5 2 4 7 1 8 0 -8
Rearranged array is
-5 5 -2 2 -8 4 7 1 8 0
Time Complexity : O(N^2)
Space Complexity : O(1)
Efficient Approach :
We first sort the array in non-increasing order.Then we will count the number of positive and negative integers. Then we will swap the one negative and one positive
number till we reach our condition. This will rearrange the array elements because we are sorting the array and accessing the element from left to right according to our need.'''
# Python3 program to rearrange
# positive and negative integers
# in alternate fashion and
# maintaining the order of positive
# and negative numbers
# rotates the array to right by once
# from index 'outOfPlace to cur'
def rightRotate(arr, n, outOfPlace, cur):
temp = arr[cur]
for i in range(cur, outOfPlace, -1):
arr[i] = arr[i - 1]
arr[outOfPlace] = temp
return arr
def rearrange(arr, n):
outOfPlace = -1
for index in range(n):
if(outOfPlace >= 0):
# if element at outOfPlace place in
# negative and if element at index
# is positive we can rotate the
# array to right or if element
# at outOfPlace place in positive and
# if element at index is negative we
# can rotate the array to right
if((arr[index] >= 0 and arr[outOfPlace] < 0) or
(arr[index] < 0 and arr[outOfPlace] >= 0)):
arr = rightRotate(arr, n, outOfPlace, index)
if(index-outOfPlace > 2):
outOfPlace += 2
else:
outOfPlace = - 1
if(outOfPlace == -1):
# conditions for A[index] to
# be in out of place
if((arr[index] >= 0 and index % 2 == 0) or
(arr[index] < 0 and index % 2 == 1)):
outOfPlace = index
return arr
# Driver Code
arr = [-5, -2, 5, 2, 4,
7, 1, 8, 0, -8]
print("Given Array is:")
print(arr)
print("\nRearranged array is:")
print(rearrange(arr, len(arr)))
# Below is the implementation of the above approach
def rearrange(arr, n):
# sort the array
arr.sort()
# initialize two pointers
# one pointing to the negative number
# one pointing to the positive number
i, j = 1, 1
while j < n:
if arr[j] > 0:
break
j += 1
# swap the numbers until the given condition gets satisfied
while (arr[i] < 0) and (j < n):
# swaping
arr[i], arr[j] = arr[j], arr[i]
# increment i by 2
# because a negative number is followed by a positive number
i += 2
j += 1
return(arr)
# Driver Code
# Given array
arr = [-5, -2, 5, 2, 4, 7, 1, 8, 0, -8]
ans = rearrange(arr, len(arr))
for num in ans:
print(num, end=" ")
'''Output
-8 1 -2 0 -5 2 4 5 7 8
Time Complexity: O(N*logN)
Space Complexity: O(1)'''
| 32.356061
| 410
| 0.632639
|
4a1370458f1d39e7219fdc0bad6fde6cc37349ad
| 679
|
py
|
Python
|
src/sentinel/azext_sentinel/_client_factory.py
|
hpsan/azure-cli-extensions
|
be1589bb6dd23837796e088d28e65e873050171e
|
[
"MIT"
] | null | null | null |
src/sentinel/azext_sentinel/_client_factory.py
|
hpsan/azure-cli-extensions
|
be1589bb6dd23837796e088d28e65e873050171e
|
[
"MIT"
] | null | null | null |
src/sentinel/azext_sentinel/_client_factory.py
|
hpsan/azure-cli-extensions
|
be1589bb6dd23837796e088d28e65e873050171e
|
[
"MIT"
] | 1
|
2020-07-16T23:49:49.000Z
|
2020-07-16T23:49:49.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azext_sentinel.vendored_sdks.security_insights import SecurityInsights
def cf_sentinel(cli_ctx, *_):
return get_mgmt_service_client(cli_ctx, SecurityInsights)
def cf_sentinel_alert_rules(cli_ctx, *_):
return cf_sentinel(cli_ctx).alert_rules
| 42.4375
| 94
| 0.59352
|
4a13708f8325d60d83e5843489ee05a3c353def9
| 1,097
|
py
|
Python
|
backend/users/tests/test_forms.py
|
crowdbotics-apps/test-31818
|
3c0be5481a1adec1445c703af63db4ff1d0b9146
|
[
"FTL",
"AML",
"RSA-MD"
] | 2
|
2021-10-20T04:12:53.000Z
|
2021-11-08T10:23:14.000Z
|
backend/users/tests/test_forms.py
|
crowdbotics-apps/test-31818
|
3c0be5481a1adec1445c703af63db4ff1d0b9146
|
[
"FTL",
"AML",
"RSA-MD"
] | 321
|
2021-07-16T15:22:20.000Z
|
2021-07-19T20:57:51.000Z
|
backend/users/tests/test_forms.py
|
crowdbotics-apps/rwar-33953
|
69c3a19f094ce817df5dd5f3130f0103c7da4dcd
|
[
"FTL",
"AML",
"RSA-MD"
] | 1
|
2021-06-21T14:49:57.000Z
|
2021-06-21T14:49:57.000Z
|
import pytest
from users.forms import UserCreationForm
from users.tests.factories import UserFactory
pytestmark = pytest.mark.django_db
class TestUserCreationForm:
def test_clean_username(self):
# A user with proto_user params does not exist yet.
proto_user = UserFactory.build()
form = UserCreationForm(
{
"username": proto_user.username,
"password1": proto_user._password,
"password2": proto_user._password,
}
)
assert form.is_valid()
assert form.clean_username() == proto_user.username
# Creating a user.
form.save()
# The user with proto_user params already exists,
# hence cannot be created.
form = UserCreationForm(
{
"username": proto_user.username,
"password1": proto_user._password,
"password2": proto_user._password,
}
)
assert not form.is_valid()
assert len(form.errors) == 1
assert "username" in form.errors
| 26.756098
| 59
| 0.588879
|
4a137288cb5817ebd0f13d8ffe815557525aff94
| 572
|
py
|
Python
|
src/blog/migrations/0003_post_tags.py
|
abbeymaniak/django-python-Restaurant-app-
|
2212e79e3b2e824bb7c6c81869e983bea2ce03a2
|
[
"MIT"
] | 1
|
2020-05-18T01:05:01.000Z
|
2020-05-18T01:05:01.000Z
|
src/blog/migrations/0003_post_tags.py
|
abbeymaniak/django-python-Restaurant-app-
|
2212e79e3b2e824bb7c6c81869e983bea2ce03a2
|
[
"MIT"
] | null | null | null |
src/blog/migrations/0003_post_tags.py
|
abbeymaniak/django-python-Restaurant-app-
|
2212e79e3b2e824bb7c6c81869e983bea2ce03a2
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.4 on 2020-04-11 14:26
from django.db import migrations
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('taggit', '0003_taggeditem_add_unique_index'),
('blog', '0002_auto_20200411_1458'),
]
operations = [
migrations.AddField(
model_name='post',
name='tags',
field=taggit.managers.TaggableManager(blank=True, help_text='A comma-separated list of tags.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags'),
),
]
| 27.238095
| 174
| 0.645105
|
4a1372ac14eb698114f2010e0c21b2e17e76f0b6
| 5,166
|
py
|
Python
|
plugins/custom_thumbnail.py
|
iamvpk/MNLTheBot
|
c8e7ba00dd15a52c4528dac7a4a58f810697ff88
|
[
"Apache-2.0"
] | null | null | null |
plugins/custom_thumbnail.py
|
iamvpk/MNLTheBot
|
c8e7ba00dd15a52c4528dac7a4a58f810697ff88
|
[
"Apache-2.0"
] | null | null | null |
plugins/custom_thumbnail.py
|
iamvpk/MNLTheBot
|
c8e7ba00dd15a52c4528dac7a4a58f810697ff88
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# (c) MNL
# the logging things
import logging
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
import numpy
import os
from PIL import Image
import time
# the secret configuration specific things
if bool(os.environ.get("WEBHOOK", False)):
from sample_config import Config
else:
from config import Config
# the Strings used for this "thing"
from translation import Translation
import pyrogram
logging.getLogger("pyrogram").setLevel(logging.WARNING)
from helper_funcs.chat_base import TRChatBase
@pyrogram.Client.on_message(pyrogram.Filters.command(["generatecustomthumbnail"]))
async def generate_custom_thumbnail(bot, update):
if update.from_user.id in Config.BANNED_USERS:
await bot.delete_messages(
chat_id=update.chat.id,
message_ids=update.message_id,
revoke=True
)
return
TRChatBase(update.from_user.id, update.text, "generatecustomthumbnail")
if update.reply_to_message is not None:
reply_message = update.reply_to_message
if reply_message.media_group_id is not None:
download_location = Config.DOWNLOAD_LOCATION + "/" + str(update.from_user.id) + "/" + str(reply_message.media_group_id) + "/"
save_final_image = download_location + str(round(time.time())) + ".jpg"
list_im = os.listdir(download_location)
if len(list_im) == 2:
imgs = [ Image.open(download_location + i) for i in list_im ]
inm_aesph = sorted([(numpy.sum(i.size), i.size) for i in imgs])
min_shape = inm_aesph[1][1]
imgs_comb = numpy.hstack(numpy.asarray(i.resize(min_shape)) for i in imgs)
imgs_comb = Image.fromarray(imgs_comb)
# combine: https://stackoverflow.com/a/30228789/4723940
imgs_comb.save(save_final_image)
# send
await bot.send_photo(
chat_id=update.chat.id,
photo=save_final_image,
caption=Translation.CUSTOM_CAPTION_UL_FILE,
reply_to_message_id=update.message_id
)
else:
await bot.send_message(
chat_id=update.chat.id,
text=Translation.ERR_ONLY_TWO_MEDIA_IN_ALBUM,
reply_to_message_id=update.message_id
)
try:
[os.remove(download_location + i) for i in list_im ]
os.remove(download_location)
except:
pass
else:
await bot.send_message(
chat_id=update.chat.id,
text=Translation.REPLY_TO_MEDIA_ALBUM_TO_GEN_THUMB,
reply_to_message_id=update.message_id
)
else:
await bot.send_message(
chat_id=update.chat.id,
text=Translation.REPLY_TO_MEDIA_ALBUM_TO_GEN_THUMB,
reply_to_message_id=update.message_id
)
@pyrogram.Client.on_message(pyrogram.Filters.photo)
async def save_photo(bot, update):
if update.from_user.id in Config.BANNED_USERS:
await bot.delete_messages(
chat_id=update.chat.id,
message_ids=update.message_id,
revoke=True
)
return
TRChatBase(update.from_user.id, update.text, "save_photo")
if update.media_group_id is not None:
# album is sent
download_location = Config.DOWNLOAD_LOCATION + "/" + str(update.from_user.id) + "/" + str(update.media_group_id) + "/"
# create download directory, if not exist
if not os.path.isdir(download_location):
os.makedirs(download_location)
await bot.download_media(
message=update,
file_name=download_location
)
else:
# received single photo
download_location = Config.DOWNLOAD_LOCATION + "/" + str(update.from_user.id) + ".jpg"
await bot.download_media(
message=update,
file_name=download_location
)
await bot.send_message(
chat_id=update.chat.id,
text=Translation.SAVED_CUSTOM_THUMB_NAIL,
reply_to_message_id=update.message_id
)
@pyrogram.Client.on_message(pyrogram.Filters.command(["deletethumbnail"]))
async def delete_thumbnail(bot, update):
if update.from_user.id in Config.BANNED_USERS:
await bot.delete_messages(
chat_id=update.chat.id,
message_ids=update.message_id,
revoke=True
)
return
TRChatBase(update.from_user.id, update.text, "deletethumbnail")
download_location = Config.DOWNLOAD_LOCATION + "/" + str(update.from_user.id)
try:
os.remove(download_location + ".jpg")
# os.remove(download_location + ".json")
except:
pass
await bot.send_message(
chat_id=update.chat.id,
text=Translation.DEL_ETED_CUSTOM_THUMB_NAIL,
reply_to_message_id=update.message_id
)
| 36.380282
| 137
| 0.624855
|
4a137381b585e9f7c5f4ba90e5898586431f64b1
| 1,880
|
py
|
Python
|
tests/fixtures/schnet_fixtures.py
|
lmj1029123/schnetpack
|
5b4c26f06db8fc9947d166840b5faba65519a11b
|
[
"MIT"
] | null | null | null |
tests/fixtures/schnet_fixtures.py
|
lmj1029123/schnetpack
|
5b4c26f06db8fc9947d166840b5faba65519a11b
|
[
"MIT"
] | null | null | null |
tests/fixtures/schnet_fixtures.py
|
lmj1029123/schnetpack
|
5b4c26f06db8fc9947d166840b5faba65519a11b
|
[
"MIT"
] | null | null | null |
import pytest
from schnetpack.nn.cutoff import HardCutoff
from schnetpack.representation import SchNet
@pytest.fixture(scope="session")
def n_atom_basis():
return 128
@pytest.fixture(scope="session")
def n_filters():
return 128
@pytest.fixture(scope="session")
def n_interactions():
return 1
@pytest.fixture(scope="session")
def cutoff():
return 5.0
@pytest.fixture(scope="session")
def n_gaussians():
return 25
@pytest.fixture(scope="session")
def normalize_filter():
return False
@pytest.fixture(scope="session")
def coupled_interactions():
return False
@pytest.fixture(scope="session")
def return_intermediate():
return False
@pytest.fixture(scope="session")
def max_z():
return 100
@pytest.fixture(scope="session")
def cutoff_network():
return HardCutoff
@pytest.fixture(scope="session")
def trainable_gaussians():
return False
@pytest.fixture(scope="session")
def distance_expansion():
return None
@pytest.fixture(scope="session")
def charged_systems():
return False
@pytest.fixture(scope="session")
def schnet(
n_atom_basis,
n_filters,
n_interactions,
cutoff,
n_gaussians,
normalize_filter,
coupled_interactions,
return_intermediate,
max_z,
cutoff_network,
trainable_gaussians,
distance_expansion,
charged_systems,
):
return SchNet(
n_atom_basis=n_atom_basis,
n_filters=n_filters,
n_interactions=n_interactions,
cutoff=cutoff,
n_gaussians=n_gaussians,
normalize_filter=normalize_filter,
coupled_interactions=coupled_interactions,
return_intermediate=return_intermediate,
max_z=max_z,
cutoff_network=cutoff_network,
trainable_gaussians=trainable_gaussians,
distance_expansion=distance_expansion,
charged_systems=charged_systems,
)
| 18.431373
| 50
| 0.714362
|
4a137565298c13f8de930b3365dcd94fc59eb6a3
| 1,702
|
py
|
Python
|
test/test_add_contact_to_group.py
|
Alimury/Python_lesson
|
8e97632fd9f1eaf4ec3cd878280333e1409beb52
|
[
"Apache-2.0"
] | null | null | null |
test/test_add_contact_to_group.py
|
Alimury/Python_lesson
|
8e97632fd9f1eaf4ec3cd878280333e1409beb52
|
[
"Apache-2.0"
] | null | null | null |
test/test_add_contact_to_group.py
|
Alimury/Python_lesson
|
8e97632fd9f1eaf4ec3cd878280333e1409beb52
|
[
"Apache-2.0"
] | 1
|
2020-12-29T17:55:13.000Z
|
2020-12-29T17:55:13.000Z
|
from model.add_new import Add_New
from model.group import Group
import random
import allure
def test_add_contact_to_group(app, db, orm):
with allure.step("Given if the list is empty new contact"):
if len(db.get_contact_list()) == 0:
app.contact.create(Add_New(firstname="c_firstname", lastname="c_lastname",
address="c_address", homephone="c_homef", email="c_email"))
if len(db.get_group_list()) == 0:
app.group.create(Group(name="group_name", header="group_header", footer="group_footer"))
groups = db.get_group_list()
with allure.step("When I select a contact and group and add contact to group"):
for group in groups:
list_group = orm.get_contacts_not_in_group(Group(id=group.id))
if len(list_group) > 0:
contact = random.choice(list_group)
app.contact.add_contact_to_group(contact.id, group.id)
break
elif len(list_group) == 0:
if group != groups[-1]:
continue
else:
app.group.create(Group(name="test_name", header="test_header", footer="test_footer"))
groups = sorted(db.get_group_list(), key=Group.id_or_max)
group = groups[-1]
contact = random.choice(db.get_contact_list())
app.contact.add_contact_to_group(contact.id, group.id)
print(contact)
with allure.step("Then contact %s is in a group %s" % (contact, group)):
list_cont = orm.get_contacts_in_group(Group(id=group.id))
print(list_cont)
assert contact in list_cont
| 50.058824
| 105
| 0.60047
|
4a1376cdc9d2875142b09eff05ca733840e5106f
| 852
|
py
|
Python
|
src/electionguard_cli/cli_models/__init__.py
|
PradyumnaKrishna/electionguard-python
|
e239478972d76195c64fd715bb57682d526aab6c
|
[
"MIT"
] | null | null | null |
src/electionguard_cli/cli_models/__init__.py
|
PradyumnaKrishna/electionguard-python
|
e239478972d76195c64fd715bb57682d526aab6c
|
[
"MIT"
] | null | null | null |
src/electionguard_cli/cli_models/__init__.py
|
PradyumnaKrishna/electionguard-python
|
e239478972d76195c64fd715bb57682d526aab6c
|
[
"MIT"
] | null | null | null |
from electionguard_cli.cli_models import cli_decrypt_results
from electionguard_cli.cli_models import cli_election_inputs_base
from electionguard_cli.cli_models import e2e_build_election_results
from electionguard_cli.cli_models import encrypt_results
from electionguard_cli.cli_models.cli_decrypt_results import (
CliDecryptResults,
)
from electionguard_cli.cli_models.cli_election_inputs_base import (
CliElectionInputsBase,
)
from electionguard_cli.cli_models.e2e_build_election_results import (
BuildElectionResults,
)
from electionguard_cli.cli_models.encrypt_results import (
EncryptResults,
)
__all__ = [
"BuildElectionResults",
"CliDecryptResults",
"CliElectionInputsBase",
"EncryptResults",
"cli_decrypt_results",
"cli_election_inputs_base",
"e2e_build_election_results",
"encrypt_results",
]
| 29.37931
| 69
| 0.820423
|
4a13775bea03d7676ac6c138a884e81602620e67
| 922
|
py
|
Python
|
riko/bado/__init__.py
|
SpyHello/riko-py-stream
|
b52e35f3883596a8eac5b2d5be86dcc766dd0a88
|
[
"MIT"
] | null | null | null |
riko/bado/__init__.py
|
SpyHello/riko-py-stream
|
b52e35f3883596a8eac5b2d5be86dcc766dd0a88
|
[
"MIT"
] | null | null | null |
riko/bado/__init__.py
|
SpyHello/riko-py-stream
|
b52e35f3883596a8eac5b2d5be86dcc766dd0a88
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
riko.bado
~~~~~~~~~
Provides functions for creating asynchronous riko pipes
Examples:
basic usage::
>>> from riko import get_path
>>> from riko.bado import react
"""
from __future__ import (
absolute_import, division, print_function, unicode_literals)
from builtins import * # noqa pylint: disable=unused-import
try:
from twisted.internet.task import react
except ImportError:
react = lambda _, _reactor=None: None
inlineCallbacks = lambda _: lambda: None
returnValue = lambda _: lambda: None
backend = 'empty'
else:
from twisted.internet.defer import inlineCallbacks
from twisted.internet.defer import returnValue
backend = 'twisted'
class Reactor(object):
fake = False
reactor = Reactor()
coroutine = inlineCallbacks
return_value = returnValue
_issync = backend == 'empty'
_isasync = not _issync
| 22.487805
| 64
| 0.706074
|
4a1377de84629fad3ba63c585cde1d5d0a8aeeed
| 5,931
|
py
|
Python
|
pybind/nos/v7_1_0/brocade_tunnels_ext_rpc/get_tunnel_info/output/tunnel/rbridges/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/nos/v7_1_0/brocade_tunnels_ext_rpc/get_tunnel_info/output/tunnel/rbridges/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/nos/v7_1_0/brocade_tunnels_ext_rpc/get_tunnel_info/output/tunnel/rbridges/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | 1
|
2021-11-05T22:15:42.000Z
|
2021-11-05T22:15:42.000Z
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class rbridges(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-tunnels-ext - based on the path /brocade_tunnels_ext_rpc/get-tunnel-info/output/tunnel/rbridges. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Indicates the rbridges on which this tunnel
exists
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__rbid',)
_yang_name = 'rbridges'
_rest_name = 'rbridges'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__rbid = YANGDynClass(base=TypedListType(allowed_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)), is_leaf=False, yang_name="rbid", rest_name="rbid", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-tunnels-ext', defining_module='brocade-tunnels-ext', yang_type='uint32', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'brocade_tunnels_ext_rpc', u'get-tunnel-info', u'output', u'tunnel', u'rbridges']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'get-tunnel-info', u'output', u'tunnel', u'rbridges']
def _get_rbid(self):
"""
Getter method for rbid, mapped from YANG variable /brocade_tunnels_ext_rpc/get_tunnel_info/output/tunnel/rbridges/rbid (uint32)
YANG Description: Rbridge id
"""
return self.__rbid
def _set_rbid(self, v, load=False):
"""
Setter method for rbid, mapped from YANG variable /brocade_tunnels_ext_rpc/get_tunnel_info/output/tunnel/rbridges/rbid (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_rbid is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_rbid() directly.
YANG Description: Rbridge id
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=TypedListType(allowed_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)), is_leaf=False, yang_name="rbid", rest_name="rbid", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-tunnels-ext', defining_module='brocade-tunnels-ext', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """rbid must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=TypedListType(allowed_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)), is_leaf=False, yang_name="rbid", rest_name="rbid", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-tunnels-ext', defining_module='brocade-tunnels-ext', yang_type='uint32', is_config=True)""",
})
self.__rbid = t
if hasattr(self, '_set'):
self._set()
def _unset_rbid(self):
self.__rbid = YANGDynClass(base=TypedListType(allowed_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)), is_leaf=False, yang_name="rbid", rest_name="rbid", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-tunnels-ext', defining_module='brocade-tunnels-ext', yang_type='uint32', is_config=True)
rbid = __builtin__.property(_get_rbid, _set_rbid)
_pyangbind_elements = {'rbid': rbid, }
| 45.623077
| 452
| 0.716574
|
4a1377f7591e488272f21b514695af0bc7af109f
| 1,284
|
py
|
Python
|
api/utils/chess.py
|
p0lygun/astounding-arapaimas
|
f82dbb2ec75ab7d98da6a46a1276c12583048b3c
|
[
"MIT"
] | null | null | null |
api/utils/chess.py
|
p0lygun/astounding-arapaimas
|
f82dbb2ec75ab7d98da6a46a1276c12583048b3c
|
[
"MIT"
] | 19
|
2021-07-11T10:02:08.000Z
|
2021-07-20T14:58:29.000Z
|
api/utils/chess.py
|
p0lygun/astounding-arapaimas
|
f82dbb2ec75ab7d98da6a46a1276c12583048b3c
|
[
"MIT"
] | null | null | null |
import logging
from Chessnut import Game
from api.crud import game
from api.endpoints import get_db
logger = logging.getLogger(__name__)
class ChessBoard:
"""Base class for chess board game."""
def __init__(self, fen: str, game_id: int):
self.FEN = fen # will be given by server when multiplayer is added
self.board = Game(self.FEN)
self.game_id = game_id
self.db = next(get_db())
def give_board(self) -> str:
"""Returns the board in FEN representation."""
return game.get_board_by_id(self.db, game_id=self.game_id)
def all_available_moves(self) -> list:
"""Returns all moves that each piece of a player can make."""
return self.board.get_moves()
def move_piece(self, move: str) -> None:
"""Function to apply a move defined in simple algebraic notation like a1b1."""
self.board.apply_move(move)
game.update_board_by_id(
self.db, game_id=self.game_id, board=self.board.get_fen()
)
def reset(self) -> None:
"""Reset the board to initial position."""
self.board.reset()
game.update_board_by_id(
self.db, game_id=self.game_id, board=self.board.get_fen()
)
logger.info("Resetting the Board")
| 29.181818
| 86
| 0.641745
|
4a13784d9c58879dd6ca3eda2b13b2724489d804
| 763
|
py
|
Python
|
examples/structure_from_motion_psychopy_demo.py
|
jenca-adam/ratcave
|
141756f4561c1cbf838b354d4f6421926f0b3987
|
[
"MIT"
] | 84
|
2018-06-14T12:58:33.000Z
|
2022-03-26T20:36:46.000Z
|
examples/structure_from_motion_psychopy_demo.py
|
jenca-adam/ratcave
|
141756f4561c1cbf838b354d4f6421926f0b3987
|
[
"MIT"
] | 62
|
2018-05-18T11:55:32.000Z
|
2021-10-21T14:47:21.000Z
|
examples/structure_from_motion_psychopy_demo.py
|
jenca-adam/ratcave
|
141756f4561c1cbf838b354d4f6421926f0b3987
|
[
"MIT"
] | 24
|
2018-06-06T15:21:40.000Z
|
2021-12-27T00:27:20.000Z
|
from psychopy import visual, event
import ratcave as rc
import numpy as np
from numpy.random import random
n_points = 1000
width, height = 0.2, 0.5
theta = random(n_points) * np.pi * 2
verts = np.vstack((np.sin(theta) * width, (random(n_points) - .5) * height, np.cos(theta) * width)).T
cylinder = rc.Mesh.from_incomplete_data(verts, drawmode=rc.gl.GL_POINTS, position=(0, 0, -2), point_size=2, mean_center=False)
cylinder.uniforms['diffuse'] = 1., 1., 1.
cylinder.uniforms['flat_shading'] = True
scene = rc.Scene(meshes=[cylinder], bgColor=(0., 0, 0))
scene.camera.projection = rc.OrthoProjection()
win = visual.Window()
while 'escape' not in event.getKeys():
cylinder.rotation.y += .02
with rc.default_shader:
scene.draw()
win.flip()
| 28.259259
| 126
| 0.697248
|
4a13799c4ce8ab001c22422e46012406adc7968a
| 1,669
|
py
|
Python
|
tests/berlinium/asleep_server.py
|
fossabot/tilde-1
|
143810a711f00dc1c64a6eb10573986dddadfcef
|
[
"MIT"
] | 22
|
2015-07-15T09:55:11.000Z
|
2021-06-13T14:48:41.000Z
|
tests/berlinium/asleep_server.py
|
fossabot/tilde-1
|
143810a711f00dc1c64a6eb10573986dddadfcef
|
[
"MIT"
] | 87
|
2015-11-01T20:51:26.000Z
|
2022-01-02T18:54:18.000Z
|
tests/berlinium/asleep_server.py
|
fossabot/tilde-1
|
143810a711f00dc1c64a6eb10573986dddadfcef
|
[
"MIT"
] | 4
|
2015-09-13T16:30:18.000Z
|
2020-07-17T15:55:53.000Z
|
#!/usr/bin/env python
import time
import logging
from sqlalchemy import text
from tornado import web, ioloop
from sockjs.tornado import SockJSRouter
import set_path
from tilde.core.settings import settings
from tilde.core.api import API
from tilde.berlinium import Async_Connection
logging.basicConfig(level=logging.INFO)
Tilde = API()
settings['debug_regime'] = False
class SleepTester:
@staticmethod
def login(req, client_id, db_session):
Connection.Clients[client_id].authorized = True
return "OK", None
@staticmethod
def sleep(req, client_id, db_session):
result, error = '', None
try: req = float(req)
except: return result, 'Not a number!'
current_engine = db_session.get_bind()
if settings['db']['engine'] == 'postgresql':
current_engine.execute(text('SELECT pg_sleep(:i)'), **{'i': req})
elif settings['db']['engine'] == 'sqlite':
conn = current_engine.raw_connection()
conn.create_function("sq_sleep", 1, time.sleep)
c = conn.cursor()
c.execute('SELECT sq_sleep(%s)' % req)
result = Tilde.count(db_session)
return result, error
if __name__ == "__main__":
Connection = Async_Connection
Connection.GUIProvider = SleepTester
DuplexRouter = SockJSRouter(Connection)
application = web.Application(DuplexRouter.urls, debug=False)
application.listen(settings['webport'], address='0.0.0.0')
logging.info("DB is %s" % settings['db']['engine'])
logging.info("Connections are %s" % Connection.Type)
logging.info("Server started")
ioloop.IOLoop.instance().start()
| 27.816667
| 77
| 0.669263
|
4a137dd65dbaf608fdd62d4fb72445447a0f8ba4
| 3,059
|
py
|
Python
|
src/encode_task_post_call_peak_atac.py
|
TomKellyGenetics/atac-seq-pipeline
|
0a69b767064edf7b0edc7af4aaabb09e0fc23b3d
|
[
"MIT"
] | 1
|
2019-12-28T00:54:00.000Z
|
2019-12-28T00:54:00.000Z
|
src/encode_task_post_call_peak_atac.py
|
TomKellyGenetics/atac-seq-pipeline
|
0a69b767064edf7b0edc7af4aaabb09e0fc23b3d
|
[
"MIT"
] | null | null | null |
src/encode_task_post_call_peak_atac.py
|
TomKellyGenetics/atac-seq-pipeline
|
0a69b767064edf7b0edc7af4aaabb09e0fc23b3d
|
[
"MIT"
] | 1
|
2020-04-30T00:17:20.000Z
|
2020-04-30T00:17:20.000Z
|
#!/usr/bin/env python
# Author: Jin Lee (leepc12@gmail.com)
import sys
import argparse
from encode_lib_common import (
assert_file_not_empty, log, ls_l, mkdir_p)
from encode_lib_genomic import (
peak_to_bigbed, peak_to_hammock, get_region_size_metrics, get_num_peaks)
from encode_lib_blacklist_filter import blacklist_filter
from encode_lib_frip import frip
def parse_arguments():
parser = argparse.ArgumentParser(prog='ENCODE post_call_peak (atac)',
description='')
parser.add_argument(
'peak', type=str,
help='Path for PEAK file. Peak filename should be "*.*Peak.gz". '
'e.g. rep1.narrowPeak.gz')
parser.add_argument('--ta', type=str,
help='TAG-ALIGN file.')
parser.add_argument('--peak-type', type=str, required=True,
choices=['narrowPeak', 'regionPeak',
'broadPeak', 'gappedPeak'],
help='Peak file type.')
parser.add_argument('--chrsz', type=str,
help='2-col chromosome sizes file.')
parser.add_argument('--blacklist', type=str,
help='Blacklist BED file.')
parser.add_argument('--regex-bfilt-peak-chr-name',
help='Keep chromosomes matching this pattern only '
'in .bfilt. peak files.')
parser.add_argument('--out-dir', default='', type=str,
help='Output directory.')
parser.add_argument('--log-level', default='INFO',
choices=['NOTSET', 'DEBUG', 'INFO',
'WARNING', 'CRITICAL', 'ERROR',
'CRITICAL'],
help='Log level')
args = parser.parse_args()
if args.blacklist is None or args.blacklist.endswith('null'):
args.blacklist = ''
log.setLevel(args.log_level)
log.info(sys.argv)
return args
def main():
# read params
args = parse_arguments()
log.info('Initializing and making output directory...')
mkdir_p(args.out_dir)
log.info('Blacklist-filtering peaks...')
bfilt_peak = blacklist_filter(
args.peak, args.blacklist, args.regex_bfilt_peak_chr_name, args.out_dir)
log.info('Checking if output is empty...')
assert_file_not_empty(bfilt_peak)
log.info('Converting peak to bigbed...')
peak_to_bigbed(bfilt_peak, args.peak_type, args.chrsz,
args.out_dir)
log.info('Converting peak to hammock...')
peak_to_hammock(bfilt_peak, args.out_dir)
log.info('FRiP without fragment length...')
frip(args.ta, bfilt_peak, args.out_dir)
log.info('Calculating (blacklist-filtered) peak region size QC/plot...')
get_region_size_metrics(bfilt_peak)
log.info('Calculating number of peaks (blacklist-filtered)...')
get_num_peaks(bfilt_peak)
log.info('List all files in output directory...')
ls_l(args.out_dir)
log.info('All done.')
if __name__ == '__main__':
main()
| 34.370787
| 80
| 0.607388
|
4a137e24b04b87b1425b85d7f1c3371b8f67d268
| 1,276
|
py
|
Python
|
python/oneflow/test/modules/optimizer_test_util.py
|
wangyuyue/oneflow
|
0a71c22fe8355392acc8dc0e301589faee4c4832
|
[
"Apache-2.0"
] | 3,285
|
2020-07-31T05:51:22.000Z
|
2022-03-31T15:20:16.000Z
|
python/oneflow/test/modules/optimizer_test_util.py
|
wangyuyue/oneflow
|
0a71c22fe8355392acc8dc0e301589faee4c4832
|
[
"Apache-2.0"
] | 2,417
|
2020-07-31T06:28:58.000Z
|
2022-03-31T23:04:14.000Z
|
python/oneflow/test/modules/optimizer_test_util.py
|
wangyuyue/oneflow
|
0a71c22fe8355392acc8dc0e301589faee4c4832
|
[
"Apache-2.0"
] | 520
|
2020-07-31T05:52:42.000Z
|
2022-03-29T02:38:11.000Z
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
def clip_grad_norm_np(np_grad, max_norm, norm_type):
max_norm = float(max_norm)
norm_type = float(norm_type)
if norm_type == float("inf"):
total_norm = np.max(np.abs(np_grad))
if norm_type == float("-inf"):
total_norm = np.min(np.abs(np_grad))
elif norm_type == 0:
total_norm = np.sum(np.stack([np.sum(np_grad != 0)]) != 0)
else:
total_norm = np_grad
for i in range(np_grad.ndim, 0, -1):
total_norm = np.linalg.norm(total_norm, norm_type, axis=i - 1)
clip_coef = max_norm / (total_norm + 1e-6)
if clip_coef < 1:
np_grad = np_grad * clip_coef
return total_norm, np_grad
| 35.444444
| 74
| 0.695925
|
4a137e36408d5b5457c982448a1c6047a9b85550
| 8,240
|
py
|
Python
|
src/olympia/amo/tests/test_redirects.py
|
gijsk/addons-server
|
7c38f379e3a0b4a5ca231f98ac0c049450c224bd
|
[
"BSD-3-Clause"
] | null | null | null |
src/olympia/amo/tests/test_redirects.py
|
gijsk/addons-server
|
7c38f379e3a0b4a5ca231f98ac0c049450c224bd
|
[
"BSD-3-Clause"
] | null | null | null |
src/olympia/amo/tests/test_redirects.py
|
gijsk/addons-server
|
7c38f379e3a0b4a5ca231f98ac0c049450c224bd
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Check all our redirects from remora to zamboni."""
from django.db import connection
from olympia import amo
from olympia.addons.models import Category
from olympia.amo.tests import TestCase
class TestRedirects(TestCase):
fixtures = ['ratings/test_models', 'addons/persona', 'base/global-stats']
def test_persona_category(self):
"""`/personas/film and tv` should go to /themes/film-and-tv"""
r = self.client.get('/personas/film and tv', follow=True)
assert r.redirect_chain[-1][0].endswith(
'/en-US/firefox/themes/film-and-tv')
def test_contribute_installed(self):
"""`/addon/\d+/about` should go to
`/addon/\d+/contribute/installed`."""
r = self.client.get(u'/addon/5326/about', follow=True)
redirect = r.redirect_chain[-1][0]
assert redirect.endswith(
'/en-US/firefox/addon/5326/')
def test_contribute(self):
"""`/addons/contribute/$id` should go to `/addon/$id/contribute`."""
response = self.client.get(u'/addon/5326/contribute', follow=True)
redirect = response.redirect_chain[-1][0]
assert redirect.endswith('/en-US/firefox/addon/5326/')
def test_utf8(self):
"""Without proper unicode handling this will fail."""
response = self.client.get(u'/api/1.5/search/ツールバー',
follow=True)
# Sphinx will be off so let's just test that it redirects.
assert response.redirect_chain[0][1] == 301
def test_parameters(self):
"""Bug 554976. Make sure when we redirect, we preserve our query
strings."""
url = u'/users/login?to=/en-US/firefox/users/edit'
r = self.client.get(url, follow=True)
self.assert3xx(r, '/en-US/firefox' + url, status_code=301)
def test_reviews(self):
response = self.client.get('/reviews/display/4', follow=True)
self.assert3xx(response, '/en-US/firefox/addon/a4/reviews/',
status_code=301)
def test_browse(self):
response = self.client.get('/browse/type:3', follow=True)
self.assert3xx(response, '/en-US/firefox/language-tools/',
status_code=301)
response = self.client.get('/browse/type:2', follow=True)
self.assert3xx(response, '/en-US/firefox/complete-themes/',
status_code=301)
# Drop the category.
response = self.client.get('/browse/type:2/cat:all', follow=True)
self.assert3xx(response, '/en-US/firefox/complete-themes/',
status_code=301)
def test_accept_language(self):
"""
Given an Accept Language header, do the right thing. See bug 439568
for juicy details.
"""
response = self.client.get('/', follow=True, HTTP_ACCEPT_LANGUAGE='de')
self.assert3xx(response, '/de/firefox/', status_code=301)
response = self.client.get('/', follow=True,
HTTP_ACCEPT_LANGUAGE='en-us, de')
self.assert3xx(response, '/en-US/firefox/', status_code=301)
response = self.client.get('/', follow=True,
HTTP_ACCEPT_LANGUAGE='fr, en')
self.assert3xx(response, '/fr/firefox/', status_code=301)
response = self.client.get('/', follow=True,
HTTP_ACCEPT_LANGUAGE='pt-XX, xx, yy')
self.assert3xx(response, '/pt-PT/firefox/', status_code=301)
response = self.client.get('/', follow=True,
HTTP_ACCEPT_LANGUAGE='pt')
self.assert3xx(response, '/pt-PT/firefox/', status_code=301)
response = self.client.get('/', follow=True,
HTTP_ACCEPT_LANGUAGE='pt, de')
self.assert3xx(response, '/pt-PT/firefox/', status_code=301)
response = self.client.get('/', follow=True,
HTTP_ACCEPT_LANGUAGE='pt-XX, xx, de')
self.assert3xx(response, '/pt-PT/firefox/', status_code=301)
response = self.client.get('/', follow=True,
HTTP_ACCEPT_LANGUAGE='xx, yy, zz')
self.assert3xx(response, '/en-US/firefox/', status_code=301)
response = self.client.get(
'/', follow=True,
HTTP_ACCEPT_LANGUAGE='some,thing-very;very,,,broken!\'jj')
self.assert3xx(response, '/en-US/firefox/', status_code=301)
response = self.client.get('/', follow=True,
HTTP_ACCEPT_LANGUAGE='en-us;q=0.5, de')
self.assert3xx(response, '/de/firefox/', status_code=301)
def test_users(self):
response = self.client.get('/users/info/1', follow=True)
self.assert3xx(response, '/en-US/firefox/user/1/',
status_code=301)
def test_extension_sorting(self):
r = self.client.get('/browse/type:1?sort=updated', follow=True)
self.assert3xx(r, '/en-US/firefox/extensions/?sort=updated',
status_code=301)
r = self.client.get('/browse/type:1?sort=name', follow=True)
self.assert3xx(r, '/en-US/firefox/extensions/?sort=name',
status_code=301)
r = self.client.get('/browse/type:1?sort=newest', follow=True)
self.assert3xx(r, '/en-US/firefox/extensions/?sort=created',
status_code=301)
r = self.client.get('/browse/type:1?sort=weeklydownloads', follow=True)
self.assert3xx(r, '/en-US/firefox/extensions/?sort=popular',
status_code=301)
r = self.client.get('/browse/type:1?sort=averagerating', follow=True)
self.assert3xx(r, '/en-US/firefox/extensions/?sort=rating',
status_code=301)
# If we don't recognize the sort, they get nothing.
r = self.client.get('/browse/type:1?sort=xxx', follow=True)
self.assert3xx(r, '/en-US/firefox/extensions/',
status_code=301)
Category.objects.create(pk=12, slug='woo', type=amo.ADDON_EXTENSION,
application=amo.FIREFOX.id, count=1, weight=0)
r = self.client.get('/browse/type:1/cat:12?sort=averagerating',
follow=True)
url, code = r.redirect_chain[-1]
assert code == 301
assert url.endswith('/en-US/firefox/extensions/woo/?sort=rating')
def test_addons_versions(self):
r = self.client.get('/addons/versions/4', follow=True)
self.assert3xx(r, '/en-US/firefox/addon/a4/versions/', status_code=301)
def test_addons_versions_rss(self):
r = self.client.get('/addons/versions/4/format:rss', follow=True)
self.assert3xx(r, '/en-US/firefox/addon/4/versions/format:rss',
status_code=301)
def test_addons_reviews_rss(self):
r = self.client.get('/addons/reviews/4/format:rss', follow=True)
self.assert3xx(r, '/en-US/firefox/addon/4/reviews/format:rss',
status_code=301)
class TestPersonaRedirect(TestCase):
fixtures = ['addons/persona']
def test_persona_redirect(self):
"""`/persona/\d+` should go to `/addon/\d+`."""
r = self.client.get('/persona/813', follow=True)
self.assert3xx(r, '/en-US/firefox/addon/a15663/', status_code=301)
def test_persona_redirect_addon_no_exist(self):
"""When the persona exists but not its addon, throw a 404."""
# Got get shady to separate Persona/Addons.
try:
with connection.cursor() as cursor:
cursor.execute("""
SET FOREIGN_KEY_CHECKS = 0;
UPDATE personas SET addon_id=123 WHERE persona_id=813;
SET FOREIGN_KEY_CHECKS = 1;
""")
r = self.client.get('/persona/813', follow=True)
assert r.status_code == 404
finally:
with connection.cursor() as cursor:
cursor.execute("""
SET FOREIGN_KEY_CHECKS = 0;
UPDATE personas SET addon_id=15663 WHERE persona_id=813;
SET FOREIGN_KEY_CHECKS = 1;
""")
| 43.829787
| 79
| 0.586408
|
4a137ed614c83c5fe391ce30c84e030ca4799ba6
| 3,632
|
py
|
Python
|
oarepo_model_builder/schema.py
|
Alzpeta/oarepo-model-builder
|
0684b505f3e6f41e964747190fe78b938d53182f
|
[
"MIT"
] | null | null | null |
oarepo_model_builder/schema.py
|
Alzpeta/oarepo-model-builder
|
0684b505f3e6f41e964747190fe78b938d53182f
|
[
"MIT"
] | null | null | null |
oarepo_model_builder/schema.py
|
Alzpeta/oarepo-model-builder
|
0684b505f3e6f41e964747190fe78b938d53182f
|
[
"MIT"
] | null | null | null |
import copy
import pathlib
from typing import Dict, Callable
import munch
from jsonpointer import resolve_pointer
from .exceptions import IncludedFileNotFoundException
from .utils.deepmerge import deepmerge
from .utils.hyphen_munch import HyphenMunch
class ModelSchema:
OAREPO_USE = 'oarepo:use'
def __init__(self, file_path, content=None,
included_models: Dict[str, Callable] = None,
loaders=None):
"""
Creates and parses model schema
:param file_path: path on the filesystem to the model schema file
:param content: if set, use this content, otherwise load the file_path
:param included_models: a dictionary of file_id to callable that returns included json.
The callable expects a single parameter, an instance of this schema
"""
self.file_path = file_path
self.included_schemas = included_models or {}
self.loaders = loaders
if content is not None:
self.schema = content
else:
self.schema = copy.deepcopy(self._load(file_path))
self._resolve_references(self.schema, [])
self.schema.setdefault('settings', {})
self.schema['settings'].setdefault('plugins', {})
self.schema = munch.munchify(self.schema, factory=HyphenMunch)
def get(self, key):
return self.schema.get(key, None)
def set(self, key, value):
self.schema[key] = value
@property
def settings(self):
return self.schema.settings
def merge(self, another):
self.schema = munch.munchify(deepmerge(another, self.schema, []), factory=HyphenMunch)
def _load(self, file_path):
"""
Loads a json/json5 file on the path
:param file_path: file path on filesystem
:return: parsed json
"""
extension = pathlib.Path(file_path).suffix.lower()[1:]
if extension in self.loaders:
return self.loaders[extension](file_path, self)
raise Exception(f'Can not load {file_path} - no loader has been found for extension {extension} '
f'in entry point group oarepo_model_builder.loaders')
def _load_included_file(self, file_id):
"""
Resolve and load an included file. Internal method called when loading schema.
If the included file contains a json pointer,
return only the part identified by the json pointer.
:param file_id: the id of the included file, might contain #xpointer
:return: loaded json
"""
if '#' in file_id:
file_id, json_pointer = file_id.rsplit('#', 1)
else:
json_pointer = None
if file_id not in self.included_schemas:
raise IncludedFileNotFoundException(f'Included file {file_id} not found in includes')
ret = self.included_schemas[file_id](self)
if json_pointer:
ret = resolve_pointer(ret, json_pointer)
return copy.deepcopy(ret)
def _resolve_references(self, element, stack):
if isinstance(element, dict):
if self.OAREPO_USE in element:
included_name = element.pop(self.OAREPO_USE)
included_data = self._load_included_file(included_name)
deepmerge(element, included_data, [])
return self._resolve_references(element, stack)
for k, v in element.items():
self._resolve_references(v, stack + [k])
elif isinstance(element, list):
for v in element:
self._resolve_references(v, stack)
| 33.62963
| 105
| 0.636013
|
4a137fe8ed336765232f342362e325593bc263b3
| 844
|
py
|
Python
|
posts/migrations/0004_auto_20210107_1419.py
|
KolesnikRV/hw05_final
|
91985b2a14daae5e66a61bbb71443671a0b063e7
|
[
"BSD-3-Clause"
] | null | null | null |
posts/migrations/0004_auto_20210107_1419.py
|
KolesnikRV/hw05_final
|
91985b2a14daae5e66a61bbb71443671a0b063e7
|
[
"BSD-3-Clause"
] | 2
|
2021-09-08T03:39:22.000Z
|
2022-01-13T03:54:00.000Z
|
posts/migrations/0004_auto_20210107_1419.py
|
KolesnikRV/hw05_final
|
91985b2a14daae5e66a61bbb71443671a0b063e7
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated by Django 2.2.6 on 2021-01-07 14:19
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('posts', '0003_auto_20201214_0429'),
]
operations = [
migrations.AlterField(
model_name='post',
name='group',
field=models.ForeignKey(blank=True, help_text='Выберите одну из существующих групп.<p>Не обязательно для заполнения.</p>', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='posts', to='posts.Group', verbose_name='Группа'),
),
migrations.AlterField(
model_name='post',
name='text',
field=models.TextField(help_text='Введите текс поста.<p>* Oбязательно для заполнения.</p>', verbose_name='Текст'),
),
]
| 33.76
| 255
| 0.64455
|
4a138209d972f0429132e8fcf0c0508cb6aa7202
| 784
|
py
|
Python
|
fbpcs/pcf/tests/async_utils.py
|
joe1234wu/fbpcs
|
c9f57bf1b65adcbe39c6676ade5fc89e81dc5979
|
[
"MIT"
] | 63
|
2021-08-18T01:50:22.000Z
|
2022-03-25T06:44:36.000Z
|
fbpcs/pcf/tests/async_utils.py
|
joe1234wu/fbpcs
|
c9f57bf1b65adcbe39c6676ade5fc89e81dc5979
|
[
"MIT"
] | 672
|
2021-08-18T05:20:32.000Z
|
2022-03-31T23:30:13.000Z
|
fbpcs/pcf/tests/async_utils.py
|
joe1234wu/fbpcs
|
c9f57bf1b65adcbe39c6676ade5fc89e81dc5979
|
[
"MIT"
] | 61
|
2021-08-18T20:02:30.000Z
|
2022-03-31T22:44:17.000Z
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import asyncio
import functools
import unittest
from typing import Any
def AsyncMock(*args, **kwargs):
m = unittest.mock.MagicMock(*args, **kwargs)
async def mock_future(*args, **kwargs):
return m(*args, **kwargs)
mock_future.mock = m
return mock_future
async def awaitable(v: Any) -> Any:
return v
def wait(f: asyncio.Future) -> Any:
loop = asyncio.get_event_loop()
return loop.run_until_complete(f)
def to_sync(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
asyncio.run(f(*args, **kwargs))
return wrapper
| 20.631579
| 65
| 0.682398
|
4a13825cedd84ea1933669a22b9649cd4c792286
| 1,872
|
py
|
Python
|
lambda-func.py
|
nolorin/workbook-py
|
48064746532d5ff3b316d048293a737efa92c708
|
[
"MIT"
] | null | null | null |
lambda-func.py
|
nolorin/workbook-py
|
48064746532d5ff3b316d048293a737efa92c708
|
[
"MIT"
] | null | null | null |
lambda-func.py
|
nolorin/workbook-py
|
48064746532d5ff3b316d048293a737efa92c708
|
[
"MIT"
] | null | null | null |
# Integer multiplication function that uses lambda function arguments. Note: in PHP lambda functions are variable scope function definitions.
class Object:
def __init__( self, prop ):
self.value = prop
def __mul__( self, toMult ):
n = len( vars( self ) )
setattr( self, 'child_' + str(n), toMult );
return self
def __str__( self ):
output = 'Object(value=' + str( self.value ) + ') {' + "\n"
for prop, obj in vars( self ).items():
if prop is 'value': continue
output += "\t" + str( obj ) + "\n"
output += '}'
return output
multScalar = lambda base, out : base + out
multObj = lambda base, out : out * base
negScalar = lambda out : out*-1 if type( out ) is int else out[::-1] if type( out ) is str else arrayIter( out ) if type( out ) is list or tuple else False
arrayIter = lambda out : tuple( [ negScalar( i ) for i in list( out ) ] ) if type( out ) is tuple else [ negScalar( i ) for i in out ]
negObj = lambda out : out
def multiply( base, multiplicant ):
if type( base ) is not int:
output = '' if type( base ) is str else Object( 'Shell' ) if type( base ) is Object else base if type( base ) is list or tuple else None
iterant = multObj if type( base ) is Object else multScalar if type( base ) is str or list or type else None
negative = negObj if type( base ) is Object else negScalar if type( base ) is str or list or tuple else None
if iterant is not None:
for i in range( 0, abs( multiplicant ) ):
output = iterant( base, output )
if multiplicant < 0:
output = negative( output )
return output
else:
return False
else:
return base*multiplicant
# Test
print( multiply( 5, 5 ) )
print( multiply( 'Harp', 4 ) )
print( multiply( '*deer', -5 ) )
print( multiply( [ 1, 2, 3, 4 ], 2 ) )
print( multiply( ( 2, 34, 11, 7 ), 3 ) )
print( multiply( ( 1, -2, 4 ), -4 ) )
print( multiply( Object( 'Item' ), 2 ) )
| 39.829787
| 155
| 0.645299
|
4a1383279eb80f60cc3de2ebcf2402693b4c8e37
| 4,261
|
py
|
Python
|
spinor_gpe/examples/1_ground_state.py
|
readthedocs-assistant/spinor-gpe
|
02ec6ad98479ca182ff3f7f578669690bb1d5379
|
[
"MIT"
] | null | null | null |
spinor_gpe/examples/1_ground_state.py
|
readthedocs-assistant/spinor-gpe
|
02ec6ad98479ca182ff3f7f578669690bb1d5379
|
[
"MIT"
] | 1
|
2021-06-10T12:02:16.000Z
|
2021-06-10T12:02:16.000Z
|
spinor_gpe/examples/1_ground_state.py
|
readthedocs-assistant/spinor-gpe
|
02ec6ad98479ca182ff3f7f578669690bb1d5379
|
[
"MIT"
] | 1
|
2022-01-19T20:07:03.000Z
|
2022-01-19T20:07:03.000Z
|
"""
Example 1: Ground State
=======================
Starting with the Thomas-Fermi solution, propagate in imaginary time to
reach the ground state. Propagation smooths out the sharp edges
on both components' densities.
Physical Parameters
-------------------
.. topic:: Atom number
:math:`\\quad N_{\\rm at} = 100`
.. topic:: Atomic mass, Rubidium-87
:math:`\\quad m = 1.4442 \\times 10^{-25}~[\\rm kg]`
.. topic:: Trap frequencies
:math:`\\quad (\\omega_x, \\omega_y, \\omega_z) = 2 \\pi \\times (50, 50, 2000)~[{\\rm Hz}]`
:math:`\\quad (\\omega_x, \\omega_y, \\omega_z) = \\omega_x \\times (1, \\gamma, \\eta) = (1, 1, 40)~[\\omega_x]`
.. topic:: Harmonic oscillator length, x-axis
:math:`\\quad a_x = \\sqrt{\\hbar / m \\omega_x} = 1.525~[{\\mu\\rm m}]`
.. topic:: 3D scattering length, Rubidium-87
| :math:`\\quad a = 5.313~[{\\rm nm}]`
| :math:`\\quad a_{\\rm sc} = a / a_x = 0.00348~[a_x]`
.. topic:: Scattering 2D scale
| :math:`\\quad g_{\\rm sc}^{2\\rm D} = \\sqrt{8\\pi\\eta}~a_{\\rm sc} = 0.1105~[\\omega_x a_x^2]`
.. topic:: Scattering coupling
| :math:`\\quad (g_{\\rm uu}, g_{\\rm dd}, g_{\\rm ud}) = g_{\\rm sc}^{2 \\rm D} \\times (1, 1, 1.04)~[\\omega_x a_x^2]`
.. topic:: Chemical potential
:math:`\\quad \\mu = \\sqrt{4 N_{\\rm at} a_{\\rm sc} \\gamma \\sqrt{\\eta / 2 \\pi}} = 1.875~[\\omega_x]`
.. topic:: Thomas-Fermi radius
:math:`\\quad R_{\\rm TF} = \\sqrt{2 \\mu} = 1.937~[a_x]`
.. topic:: Initial population fractions
:math:`\\quad (p_0, p_1) = (0.5, 0.5)`
.. topic:: Raman wavelength
:math:`\\quad \\lambda_L = 790.1~[{\\rm nm}]`
Numerical Parameters
--------------------
.. topic:: Number of grid points
:math:`\\quad (N_x, N_y) = (64, 64)`
.. topic:: r-grid half-size
:math:`\\quad (x^{\\rm max}, y^{\\rm max}) = (8, 8)~[a_x]`
.. topic:: r-grid spacing
:math:`\\quad (\\Delta x, \\Delta y) = (0.25, 0.25)~[a_x]`
.. topic:: k-grid half-size
:math:`\\quad (k_x^{\\rm max}, k_y^{\\rm max}) = \\pi / (\\Delta x, \\Delta y)`
:math:`\\quad (k_x^{\\rm max}, k_y^{\\rm max}) = (12.566, 12.566)~[a_x^{-1}]`
.. topic:: k-grid spacing
:math:`\\quad (\\Delta k_x, \\Delta k_y) = \\pi / (x^{\\rm max}, y^{\\rm max})`
:math:`\\quad (\\Delta k_x, \\Delta k_y) = (0.3927, 0.3927)~[a_x^{-1}]`
.. topic:: Time scale
:math:`\\quad \\tau_0 = 1 / \\omega_x = 0.00318~[{\\rm s/rad}]`
:math:`\\quad \\tau_0 = 1~[\\omega_x^{-1}]`
.. topic:: Time step duration, imaginary
:math:`\\quad \\Delta \\tau_{\\rm im} = 1 / 50~[-i \\tau_0]`
.. topic:: Number of time steps, imaginary
:math:`\\quad N_{\\rm im} = 100`
"""
import os
import sys
sys.path.insert(0, os.path.abspath('../..')) # Adds project root to the PATH
import numpy as np
from spinor_gpe.pspinor import pspinor as spin
# sphinx_gallery_thumbnail_path = '_static/1_ground.png'
# 1. SETUP
DATA_PATH = 'examples/Trial_011' # Default data path is in the /data/ folder
FREQ = 50
W = 2*np.pi*FREQ
Y_SCALE = 1
Z_SCALE = 40.0
ATOM_NUM = 1e2
OMEG = {'x': W, 'y': Y_SCALE * W, 'z': Z_SCALE * W}
G_SC = {'uu': 1, 'dd': 1, 'ud': 1.04}
ps = spin.PSpinor(DATA_PATH, overwrite=True, # Initialize PSpinor object
atom_num=ATOM_NUM,
omeg=OMEG,
g_sc=G_SC,
pop_frac=(0.5, 0.5),
r_sizes=(8, 8),
mesh_points=(64, 64))
ps.coupling_setup(wavel=790.1e-9, kin_shift=False)
ZOOM = 4 # Zooms the momentum-space density plots by a constant factor
# Plot real- and momentum-space density & real-space phase of both components
ps.plot_spins(rscale=ps.rad_tf, kscale=ps.kL_recoil, zoom=ZOOM)
# 2. RUN (Imaginary-time)
DT = 1/50
N_STEPS = 100
DEVICE = 'cpu'
ps.rand_seed = 99999
# Run propagation loop:
# - Returns `PropResult` & `TensorPropagator` objects
res, prop = ps.imaginary(DT, N_STEPS, DEVICE, is_sampling=True, n_samples=50)
# 3. ANALYZE
res.plot_spins(rscale=ps.rad_tf, kscale=ps.kL_recoil, zoom=ZOOM)
res.plot_total(kscale=ps.kL_recoil, zoom=ZOOM) # Plot total density & phase
res.plot_pops() # Plot how the spins' populations evolves
res.make_movie(rscale=ps.rad_tf, kscale=ps.kL_recoil, play=True, zoom=ZOOM,
norm_type='half')
| 26.63125
| 124
| 0.582492
|
4a1383ce7a98518b07e197572022ba5d0b703517
| 647
|
py
|
Python
|
POO/ClassesAbstratas/aula110_main.py
|
pinheirogus/Curso-Python-Udemy
|
d6d52320426172e924081b9df619490baa8c6016
|
[
"MIT"
] | 1
|
2021-09-01T01:58:13.000Z
|
2021-09-01T01:58:13.000Z
|
POO/ClassesAbstratas/aula110_main.py
|
pinheirogus/Curso-Python-Udemy
|
d6d52320426172e924081b9df619490baa8c6016
|
[
"MIT"
] | null | null | null |
POO/ClassesAbstratas/aula110_main.py
|
pinheirogus/Curso-Python-Udemy
|
d6d52320426172e924081b9df619490baa8c6016
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
#
# class A(ABC):
# @abstractmethod
# def falar(self):
# pass
#
# class B(A):
# def falar(self):
# print('Falando em B.')
#
#
# b = B()
#
# b.falar()
from classes.contaPoupanca import ContaPoupanca
from classes.contaCorrente import ContaCorrente
poupancaBB = ContaPoupanca(1111, 2222, 0)
poupancaBB.depositar(10)
poupancaBB.sacar(5)
poupancaBB.sacar(5)
poupancaBB.sacar(5)
print('##############################################################################')
correnteBB = ContaCorrente(1111, 3333, 0, 500)
correnteBB.depositar(100)
correnteBB.sacar(500)
correnteBB.sacar(101)
| 19.606061
| 87
| 0.612056
|
4a13850014b5bc2ff3906c107fa28c763aba2349
| 8,853
|
py
|
Python
|
releasenotes/source/conf.py
|
jovial/kayobe-1
|
3bc8d8957714f3485114864c9efd45f5c9210474
|
[
"Apache-2.0"
] | 1
|
2021-08-17T13:55:59.000Z
|
2021-08-17T13:55:59.000Z
|
releasenotes/source/conf.py
|
jovial/kayobe-1
|
3bc8d8957714f3485114864c9efd45f5c9210474
|
[
"Apache-2.0"
] | 1
|
2018-07-25T14:33:59.000Z
|
2018-07-25T14:33:59.000Z
|
releasenotes/source/conf.py
|
stackhpc/gr-kayobe
|
86b1e3ef544f22dc7b52f1915c65d483c13c00e9
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Kayobe Release Notes documentation build configuration file.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'reno.sphinxext',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Kayobe Release Notes'
copyright = u'2018, The Kayobe team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# openstackdocstheme options
# repository_name = 'openstack/kayobe'
# bug_project = 'kayobe'
# bug_tag = ''
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'KayobeReleaseNotesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'KayobeReleaseNotes.tex',
u'Kayobe Release Notes Documentation',
u'Kayobe Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'kayobereleasenotes',
u'Kayobe Release Notes Documentation',
[u'Kayobe Developers'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'KayobeReleaseNotes',
u'Kayobe Release Notes Documentation',
u'Kayobe Developers',
'kayobereleasenotes',
'Deployment of containerised OpenStack to bare metal.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
| 32.192727
| 79
| 0.71456
|
4a138617123a1547223f06a4f12df0d89ec7f33e
| 358
|
py
|
Python
|
tests/datagen/obsscheduleclass.py
|
faysal-ishtiaq/climsoft-api
|
46dacdeba5d935ee3b944df00731640170b87ccd
|
[
"MIT"
] | null | null | null |
tests/datagen/obsscheduleclass.py
|
faysal-ishtiaq/climsoft-api
|
46dacdeba5d935ee3b944df00731640170b87ccd
|
[
"MIT"
] | 2
|
2022-01-16T15:41:27.000Z
|
2022-01-30T18:37:13.000Z
|
tests/datagen/obsscheduleclass.py
|
openclimateinitiative/climsoft-api
|
3591d7499dd7777617b8086332dc83fab1af9588
|
[
"MIT"
] | 2
|
2021-12-22T21:50:19.000Z
|
2022-01-28T12:53:32.000Z
|
import uuid
from faker import Faker
from climsoft_api.api.obsscheduleclass import schema as obsscheduleclass_schema
fake = Faker()
def get_valid_obs_schedule_class_input(station_id: str):
return obsscheduleclass_schema.ObsScheduleClass(
scheduleClass=uuid.uuid4().hex,
description=uuid.uuid4().hex,
refersTo=station_id,
)
| 23.866667
| 79
| 0.759777
|
4a138695ee9c9bb360eb485fbeecf2e951f8bbb0
| 229
|
py
|
Python
|
teleftp/config/__init__.py
|
MoscowSchool45/teleftp
|
75442e3e287508246a32ae65fe215f5eccee18f0
|
[
"BSD-3-Clause"
] | 3
|
2018-04-03T12:14:04.000Z
|
2020-07-11T15:26:55.000Z
|
teleftp/config/__init__.py
|
MoscowSchool45/teleftp
|
75442e3e287508246a32ae65fe215f5eccee18f0
|
[
"BSD-3-Clause"
] | null | null | null |
teleftp/config/__init__.py
|
MoscowSchool45/teleftp
|
75442e3e287508246a32ae65fe215f5eccee18f0
|
[
"BSD-3-Clause"
] | null | null | null |
import json
class Config(object):
def __init__(self, config_filename):
with open(config_filename, "r") as f:
self.config = json.load(f)
def __getattr__(self, name):
return self.config[name]
| 20.818182
| 45
| 0.633188
|
4a1386d3dea4d077a53d68c5acc4756d9f556d77
| 5,733
|
py
|
Python
|
preprocessing/helpers/data_utils.py
|
lbyiuou0329/Copycat-abstractive-opinion-summarizer
|
650339109ca63f27e00a69eece3a42863eed4524
|
[
"MIT"
] | null | null | null |
preprocessing/helpers/data_utils.py
|
lbyiuou0329/Copycat-abstractive-opinion-summarizer
|
650339109ca63f27e00a69eece3a42863eed4524
|
[
"MIT"
] | null | null | null |
preprocessing/helpers/data_utils.py
|
lbyiuou0329/Copycat-abstractive-opinion-summarizer
|
650339109ca63f27e00a69eece3a42863eed4524
|
[
"MIT"
] | null | null | null |
import gzip
from mltoolkit.mlutils.helpers.formatting.general import unescape
from preprocessing.fields import OutputFields, AmazonFields, YelpFields
from mltoolkit.mlutils.helpers.paths_and_files import get_file_name, \
safe_mkfdir, \
comb_paths
import csv
import os
import json
def opener(filename):
f = open(filename,'rb')
if (f.read(2) == '\x1f\x8b'):
f.seek(0)
return gzip.GzipFile(fileobj=f)
else:
f.seek(0)
return f
def read_yelp_data(path):
"""Reads Yelp data, formats, and adds a dummy category attribute (for cons).
Args:
path (str): data path to a file with Yelp reviews.
Returns: an iterator over pairs of group_id and list of data-units (reviews
with attributes).
"""
yelp_to_output_map = {
YelpFields.BUS_ID: OutputFields.GROUP_ID,
YelpFields.REV_TEX: OutputFields.REV_TEXT,
YelpFields.STARS: OutputFields.RATING
}
prev_business_id = None
dus = []
with open(path, encoding='utf-8') as f:
for line in f:
du = json.loads(line)
business_id = du[YelpFields.BUS_ID]
du = {yelp_to_output_map[attr]: du[attr] for attr
in yelp_to_output_map.keys()}
du[OutputFields.REV_TEXT] = clean_text(du[OutputFields.REV_TEXT])
du[OutputFields.CAT] = 'business'
if prev_business_id is not None and prev_business_id != business_id:
yield prev_business_id, dus
dus = []
prev_business_id = business_id
dus.append(du)
if len(dus):
yield prev_business_id, dus
def read_amazon_data(path, max_revs=None, replace_xml=False):
"""Reads AmazonFields data, formats and enriches by adding the category attribute.
Args:
path (str): data path to a file with AmazonFields reviews.
max_revs (int): the maximum number of reviews to read.
replace_xml (bool): if set to True will replace XML/HTML symbols with
proper strings.
Returns: an iterator over pairs of group_id and list of data-units (reviews
with attributes).
"""
amazon_to_output_map = {
AmazonFields.PROD_ID: OutputFields.GROUP_ID,
AmazonFields.REV_TEXT: OutputFields.REV_TEXT,
AmazonFields.OVERALL: OutputFields.RATING
}
dus = []
prev_prod_id = None
for indx, du in enumerate(parse(path)):
prod_id = du[AmazonFields.PROD_ID]
if replace_xml:
du[AmazonFields.REV_TEXT] = unescape(du[AmazonFields.REV_TEXT])
du = {amazon_to_output_map[attr]: du[attr] for attr
in amazon_to_output_map.keys()}
# adding the category attribute based on the file name
du[OutputFields.CAT] = get_file_name(path).lower()
du[OutputFields.REV_TEXT] = clean_text(du[OutputFields.REV_TEXT])
if prev_prod_id is not None and prod_id != prev_prod_id:
yield prev_prod_id, dus
dus = []
prev_prod_id = prod_id
dus.append(du)
if max_revs and indx >= max_revs - 1:
break
if len(dus):
yield prev_prod_id, dus
def read_csv_file(file_path, sep='\t'):
with open(file_path, mode='r', encoding='utf-8') as f:
reader = csv.DictReader(f, delimiter=sep)
for item in reader:
yield item
def write_groups_to_csv(out_dir_path, group_id_to_units, sep='\t'):
for group_id, group_units in group_id_to_units.items():
full_file_name = "%s.csv" % group_id
out_file_path = comb_paths(out_dir_path, full_file_name)
write_group_to_csv(out_file_path, group_units, sep=sep)
def write_group_to_csv(out_file_path, units, sep="\t"):
"""Writes data units into a CSV file.
Args:
out_file_path (str): self-explanatory.
units (list): list with dicts (review texts and other attributes).
sep (str): separation in the output csv files.
Returns: None.
"""
safe_mkfdir(out_file_path)
with open(out_file_path, 'w', encoding='utf-8') as f:
header = None
for du in units:
if header is None:
header = du.keys()
f.write(sep.join(header) + "\n")
str_to_write = sep.join([str(du[attr]) for attr in header])
f.write(str_to_write + '\n')
def parse(path):
g = opener(path, 'rb')
for l in g:
yield eval(l)
def get_act_out_dir_path(out_dir_path, inp_file_path, middle_path):
"""Creates the final/actual output directory path specific to a step."""
out_file_path = os.path.join(out_dir_path, middle_path,
get_file_name(inp_file_path))
return out_file_path
def partition(groups, train_part=0.8, val_part=0.1, test_part=0.1):
"""Splits groups into training, validation, and test partitions.
Args:
groups (list): list of units (e.g. dicts).
train_part (float): proportion in [0, 1] of units for training.
val_part (float): self-explanatory.
test_part (float): self-explanatory.
Returns: lists of data-chunks for each.
"""
assert train_part + val_part + test_part == 1.
total_size = len(groups)
train_part_end = int(total_size * train_part)
val_part_end = train_part_end + int(total_size * val_part)
train_groups = groups[:train_part_end]
val_groups = groups[train_part_end:val_part_end]
if test_part == 0.:
val_groups += groups[val_part_end:]
test_groups = []
else:
test_groups = groups[val_part_end:]
return train_groups, val_groups, test_groups
def clean_text(text_str):
return text_str.replace("\t", '').replace('\n', '')
| 30.989189
| 86
| 0.641723
|
4a1388a0743ab4e460e50400c13a4cdd0c658a0f
| 276
|
py
|
Python
|
FictionTools/amitools/amitools/vamos/cfgcore/__init__.py
|
polluks/Puddle-BuildTools
|
c1762d53a33002b62d8cffe3db129505a387bec3
|
[
"BSD-2-Clause"
] | 38
|
2021-06-18T12:56:15.000Z
|
2022-03-12T20:38:40.000Z
|
FictionTools/amitools/amitools/vamos/cfgcore/__init__.py
|
polluks/Puddle-BuildTools
|
c1762d53a33002b62d8cffe3db129505a387bec3
|
[
"BSD-2-Clause"
] | 2
|
2021-06-20T16:28:12.000Z
|
2021-11-17T21:33:56.000Z
|
FictionTools/amitools/amitools/vamos/cfgcore/__init__.py
|
polluks/Puddle-BuildTools
|
c1762d53a33002b62d8cffe3db129505a387bec3
|
[
"BSD-2-Clause"
] | 6
|
2021-06-18T18:18:36.000Z
|
2021-12-22T08:01:32.000Z
|
from .main import MainParser, log_cfg
from .parser import Parser
from .value import Value, ValueList, ValueDict, parse_scalar, split_nest
from .defdict import DefaultDict
from .argdict import Argument, ArgumentDict
from .trafo import DictTrafo
from .cfgdict import ConfigDict
| 34.5
| 72
| 0.82971
|
4a138958112863eadef4d8bf3702fb21d3733fd3
| 10,276
|
py
|
Python
|
sdks/python/apache_beam/coders/standard_coders_test.py
|
niuzhi/beam
|
1e2b5165c1a4c157ae36312c8d0f078ea64a48ea
|
[
"Apache-2.0"
] | 1
|
2020-07-24T14:52:25.000Z
|
2020-07-24T14:52:25.000Z
|
sdks/python/apache_beam/coders/standard_coders_test.py
|
niuzhi/beam
|
1e2b5165c1a4c157ae36312c8d0f078ea64a48ea
|
[
"Apache-2.0"
] | null | null | null |
sdks/python/apache_beam/coders/standard_coders_test.py
|
niuzhi/beam
|
1e2b5165c1a4c157ae36312c8d0f078ea64a48ea
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for coders that must be consistent across all Beam SDKs.
"""
# pytype: skip-file
from __future__ import absolute_import
from __future__ import print_function
import json
import logging
import math
import os.path
import sys
import unittest
from builtins import map
from typing import Dict
from typing import Tuple
import yaml
from apache_beam.coders import coder_impl
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.portability.api import schema_pb2
from apache_beam.runners import pipeline_context
from apache_beam.transforms import userstate
from apache_beam.transforms import window
from apache_beam.transforms.window import IntervalWindow
from apache_beam.typehints import schemas
from apache_beam.utils import windowed_value
from apache_beam.utils.timestamp import Timestamp
from apache_beam.utils.windowed_value import PaneInfo
from apache_beam.utils.windowed_value import PaneInfoTiming
STANDARD_CODERS_YAML = os.path.normpath(
os.path.join(
os.path.dirname(__file__), '../portability/api/standard_coders.yaml'))
def _load_test_cases(test_yaml):
"""Load test data from yaml file and return an iterable of test cases.
See ``standard_coders.yaml`` for more details.
"""
if not os.path.exists(test_yaml):
raise ValueError('Could not find the test spec: %s' % test_yaml)
with open(test_yaml, 'rb') as coder_spec:
for ix, spec in enumerate(
yaml.load_all(coder_spec, Loader=yaml.SafeLoader)):
spec['index'] = ix
name = spec.get('name', spec['coder']['urn'].split(':')[-2])
yield [name, spec]
def parse_float(s):
x = float(s)
if math.isnan(x):
# In Windows, float('NaN') has opposite sign from other platforms.
# For the purpose of this test, we just need consistency.
x = abs(x)
return x
def value_parser_from_schema(schema):
def attribute_parser_from_type(type_):
# TODO: This should be exhaustive
type_info = type_.WhichOneof("type_info")
if type_info == "atomic_type":
if type_.atomic_type == schema_pb2.BYTES:
return lambda x: x.encode("utf-8")
else:
return schemas.ATOMIC_TYPE_TO_PRIMITIVE[type_.atomic_type]
elif type_info == "array_type":
element_parser = attribute_parser_from_type(type_.array_type.element_type)
return lambda x: list(map(element_parser, x))
elif type_info == "map_type":
key_parser = attribute_parser_from_type(type_.array_type.key_type)
value_parser = attribute_parser_from_type(type_.array_type.value_type)
return lambda x: dict(
(key_parser(k), value_parser(v)) for k, v in x.items())
parsers = [(field.name, attribute_parser_from_type(field.type))
for field in schema.fields]
constructor = schemas.named_tuple_from_schema(schema)
def value_parser(x):
result = []
for name, parser in parsers:
value = x.pop(name)
result.append(None if value is None else parser(value))
if len(x):
raise ValueError(
"Test data contains attributes that don't exist in the schema: {}".
format(', '.join(x.keys())))
return constructor(*result)
return value_parser
class StandardCodersTest(unittest.TestCase):
_urn_to_json_value_parser = {
'beam:coder:bytes:v1': lambda x: x.encode('utf-8'),
'beam:coder:bool:v1': lambda x: x,
'beam:coder:string_utf8:v1': lambda x: x,
'beam:coder:varint:v1': lambda x: x,
'beam:coder:kv:v1': lambda x,
key_parser,
value_parser: (key_parser(x['key']), value_parser(x['value'])),
'beam:coder:interval_window:v1': lambda x: IntervalWindow(
start=Timestamp(micros=(x['end'] - x['span']) * 1000),
end=Timestamp(micros=x['end'] * 1000)),
'beam:coder:iterable:v1': lambda x,
parser: list(map(parser, x)),
'beam:coder:global_window:v1': lambda x: window.GlobalWindow(),
'beam:coder:windowed_value:v1': lambda x,
value_parser,
window_parser: windowed_value.create(
value_parser(x['value']),
x['timestamp'] * 1000,
tuple([window_parser(w) for w in x['windows']])),
'beam:coder:param_windowed_value:v1': lambda x,
value_parser,
window_parser: windowed_value.create(
value_parser(x['value']),
x['timestamp'] * 1000,
tuple([window_parser(w) for w in x['windows']]),
PaneInfo(
x['pane']['is_first'],
x['pane']['is_last'],
PaneInfoTiming.from_string(x['pane']['timing']),
x['pane']['index'],
x['pane']['on_time_index'])),
'beam:coder:timer:v1': lambda x,
value_parser,
window_parser: userstate.Timer(
user_key=value_parser(x['userKey']),
dynamic_timer_tag=x['dynamicTimerTag'],
clear_bit=x['clearBit'],
windows=tuple([window_parser(w) for w in x['windows']]),
fire_timestamp=None,
hold_timestamp=None,
paneinfo=None) if x['clearBit'] else userstate.Timer(
user_key=value_parser(x['userKey']),
dynamic_timer_tag=x['dynamicTimerTag'],
clear_bit=x['clearBit'],
fire_timestamp=Timestamp(micros=x['fireTimestamp'] * 1000),
hold_timestamp=Timestamp(micros=x['holdTimestamp'] * 1000),
windows=tuple([window_parser(w) for w in x['windows']]),
paneinfo=PaneInfo(
x['pane']['is_first'],
x['pane']['is_last'],
PaneInfoTiming.from_string(x['pane']['timing']),
x['pane']['index'],
x['pane']['on_time_index'])),
'beam:coder:double:v1': parse_float,
}
def test_standard_coders(self):
for name, spec in _load_test_cases(STANDARD_CODERS_YAML):
logging.info('Executing %s test.', name)
self._run_standard_coder(name, spec)
def _run_standard_coder(self, name, spec):
def assert_equal(actual, expected):
"""Handle nan values which self.assertEqual fails on."""
if (isinstance(actual, float) and isinstance(expected, float) and
math.isnan(actual) and math.isnan(expected)):
return
self.assertEqual(actual, expected)
coder = self.parse_coder(spec['coder'])
parse_value = self.json_value_parser(spec['coder'])
nested_list = [spec['nested']] if 'nested' in spec else [True, False]
for nested in nested_list:
for expected_encoded, json_value in spec['examples'].items():
value = parse_value(json_value)
expected_encoded = expected_encoded.encode('latin1')
if not spec['coder'].get('non_deterministic', False):
actual_encoded = encode_nested(coder, value, nested)
if self.fix and actual_encoded != expected_encoded:
self.to_fix[spec['index'], expected_encoded] = actual_encoded
else:
self.assertEqual(expected_encoded, actual_encoded)
decoded = decode_nested(coder, expected_encoded, nested)
assert_equal(decoded, value)
else:
# Only verify decoding for a non-deterministic coder
self.assertEqual(
decode_nested(coder, expected_encoded, nested), value)
def parse_coder(self, spec):
context = pipeline_context.PipelineContext()
coder_id = str(hash(str(spec)))
component_ids = [
context.coders.get_id(self.parse_coder(c))
for c in spec.get('components', ())
]
context.coders.put_proto(
coder_id,
beam_runner_api_pb2.Coder(
spec=beam_runner_api_pb2.FunctionSpec(
urn=spec['urn'],
payload=spec.get('payload', '').encode('latin1')),
component_coder_ids=component_ids))
return context.coders.get_by_id(coder_id)
def json_value_parser(self, coder_spec):
# TODO: integrate this with the logic for the other parsers
if coder_spec['urn'] == 'beam:coder:row:v1':
schema = schema_pb2.Schema.FromString(
coder_spec['payload'].encode('latin1'))
return value_parser_from_schema(schema)
component_parsers = [
self.json_value_parser(c) for c in coder_spec.get('components', ())
]
return lambda x: self._urn_to_json_value_parser[coder_spec['urn']](
x, *component_parsers)
# Used when --fix is passed.
fix = False
to_fix = {} # type: Dict[Tuple[int, bytes], bytes]
@classmethod
def tearDownClass(cls):
if cls.fix and cls.to_fix:
print("FIXING", len(cls.to_fix), "TESTS")
doc_sep = '\n---\n'
docs = open(STANDARD_CODERS_YAML).read().split(doc_sep)
def quote(s):
return json.dumps(s.decode('latin1')).replace(r'\u0000', r'\0')
for (doc_ix, expected_encoded), actual_encoded in cls.to_fix.items():
print(quote(expected_encoded), "->", quote(actual_encoded))
docs[doc_ix] = docs[doc_ix].replace(
quote(expected_encoded) + ':', quote(actual_encoded) + ':')
open(STANDARD_CODERS_YAML, 'w').write(doc_sep.join(docs))
def encode_nested(coder, value, nested=True):
out = coder_impl.create_OutputStream()
coder.get_impl().encode_to_stream(value, out, nested)
return out.get()
def decode_nested(coder, encoded, nested=True):
return coder.get_impl().decode_from_stream(
coder_impl.create_InputStream(encoded), nested)
if __name__ == '__main__':
if '--fix' in sys.argv:
StandardCodersTest.fix = True
sys.argv.remove('--fix')
unittest.main()
| 37.097473
| 80
| 0.668548
|
4a13896e428942841347a16f80cd57b3d639d3f0
| 3,239
|
py
|
Python
|
app/mysite/mysite/settings.py
|
lassi-dev/pipeline1
|
bd91d8e3fab636e0d30127976726217ce177d2e8
|
[
"Unlicense"
] | null | null | null |
app/mysite/mysite/settings.py
|
lassi-dev/pipeline1
|
bd91d8e3fab636e0d30127976726217ce177d2e8
|
[
"Unlicense"
] | null | null | null |
app/mysite/mysite/settings.py
|
lassi-dev/pipeline1
|
bd91d8e3fab636e0d30127976726217ce177d2e8
|
[
"Unlicense"
] | null | null | null |
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-wyit+87@gf@2er%1h!ujmniy%08(a+e!!le=+k0z*9zq#-rjoh'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| 25.706349
| 91
| 0.700525
|
4a138981275e9d5237d80becb5ec4b00833b9da0
| 22,254
|
py
|
Python
|
tests/test_cassandra.py
|
smaato/biggraphite
|
edf2c6e56505806c122196745de149cd6f53b453
|
[
"Apache-2.0"
] | null | null | null |
tests/test_cassandra.py
|
smaato/biggraphite
|
edf2c6e56505806c122196745de149cd6f53b453
|
[
"Apache-2.0"
] | null | null | null |
tests/test_cassandra.py
|
smaato/biggraphite
|
edf2c6e56505806c122196745de149cd6f53b453
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2016 Criteo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import time
import re
from distutils import version
from biggraphite import accessor as bg_accessor
from biggraphite import accessor_cache as bg_accessor_cache
from biggraphite import test_utils as bg_test_utils
from biggraphite import glob_utils as bg_glob_utils
from biggraphite.drivers import cassandra as bg_cassandra
_METRIC = bg_test_utils.make_metric("test.metric")
# Points test query.
_QUERY_RANGE = 3600
_QUERY_START = 1000 * _QUERY_RANGE
_QUERY_END = _QUERY_START + _QUERY_RANGE
# Points injected in the test DB, a superset of above.
_EXTRA_POINTS = 1000
_POINTS_START = _QUERY_START - _EXTRA_POINTS
_POINTS_END = _QUERY_END + _EXTRA_POINTS
_POINTS = [(t, v) for v, t in enumerate(range(_POINTS_START, _POINTS_END))]
_USEFUL_POINTS = _POINTS[_EXTRA_POINTS:-_EXTRA_POINTS]
assert _QUERY_RANGE == len(_USEFUL_POINTS)
class _BaseTestAccessorWithCassandraMetadata(object):
def test_glob_metrics(self):
IS_LUCENE = self.ACCESSOR_SETTINGS.get('use_lucene', False)
metrics = [
"a", "a.a", "a.b", "a.a.a", "a.b.c", "a.x.y",
"x.y.z", "x.y.y.z", "x.y.y.y.z",
"super", "superb", "supercomputer", "superconductivity", "superman",
"supper", "suppose",
"ad.o.g", "af.o.g", "ap.o.g", "az.o.g",
"b.o.g", "m.o.g",
"zd.o.g", "zf.o.g", "zp.o.g", "zz.o.g",
"-b-.a.t", "-c-.a.t", "-d-.a.t", "-e-.a.t",
]
metrics.sort()
for name in metrics:
metric = bg_test_utils.make_metric(name)
self.accessor.create_metric(metric)
self.flush()
def assert_find(glob, expected_matches):
# Check we can find the matches of a glob
matches = sorted(list(self.accessor.glob_metric_names(glob)))
# Lucene is supposed to give perfect results, so filter wrongly expected matches.
if IS_LUCENE:
glob_re = re.compile(bg_glob_utils.glob_to_regex(glob))
expected_matches = list(filter(glob_re.match, expected_matches))
self.assertEqual(expected_matches, matches)
# Empty query
assert_find("", [])
# Exact matches
assert_find("a.a", ["a.a"])
assert_find("A", [])
# Character wildcard
assert_find("?",
[x for x in metrics if x.count('.') == 0])
assert_find("sup?er",
[x for x in metrics if x.startswith("sup")])
# Character selector
for pattern in [
"a[!dfp].o.g",
u"a[!dfp].o.g",
"a[!dfp]suffix.o.g",
"a[nope].o.g",
"a[nope]suffix.o.g",
]:
assert_find(pattern,
["a{0}.o.g".format(x) for x in "dfpz"])
# Sequence wildcard
assert_find("*",
[x for x in metrics if x.count('.') == 0])
assert_find("*.*",
[x for x in metrics if x.count('.') == 1])
assert_find("*.*.*",
[x for x in metrics if x.count('.') == 2])
assert_find("super*",
[x for x in metrics if x.startswith("super")])
# Sequence selector
assert_find("a.{b,x}.{c,y}",
["a.b.c", "a.x.y"])
assert_find("a{d,f,p}.o.g",
["a{0}.o.g".format(c) for c in "dfp"])
assert_find("{a,z}{d,f,p}.o.g",
["{0}{1}.o.g".format(a, b) for a in "az" for b in "dfp"])
assert_find("{a{d,f,p},z{d,f,p}}.o.g",
["{0}{1}.o.g".format(a, b) for a in "az" for b in "dfp"])
for pattern in [
"-{b,c,d}-.a.t",
u"-{b,c,d}-.a.t",
"-{b,c,d}?.a.t",
"-{b,c,d}?suffix.a.t",
"-{b,c,d}[ha].a.t",
"-{b,c,d}[ha]suffix.a.t",
"-{b,c,d}[!ha].a.t",
"-{b,c,d}[!ha]suffix.a.t",
"-{b,c,d}*.a.t",
"-{b,c,d}*suffix.a.t",
u"-{b,c,d}*suffix.a.t",
]:
assert_find(pattern, ["-b-.a.t", "-c-.a.t", "-d-.a.t"])
# Ensure the query optimizer works as expected by having a high
# combinatorial pattern.
assert_find(
"-{b,c,d}*suffix.a.t{,u}{,v}{,w}{,x}{,y}{,z}",
["-{0}-.a.t".format(c) for c in "bcde"],
)
# Globstars
assert_find("**",
metrics)
assert_find("x.**",
[x for x in metrics if x.startswith("x.")])
if not IS_LUCENE:
# FIXME: Lucene doesn't support globstars here yet.
assert_find("**.z",
[x for x in metrics if x.endswith(".z")])
assert_find("x.**.z",
[x for x in metrics
if x.startswith("x.") and x.endswith(".z")])
self.accessor.drop_all_metrics()
assert_find("*", [])
assert_find("**", [])
def test_glob_directories(self):
for name in "a", "a.b", "x.y.z":
metric = bg_test_utils.make_metric(name)
self.accessor.create_metric(metric)
self.flush()
def assert_find(glob, expected_matches):
# Check we can find the matches of a glob
self.assertEqual(expected_matches, list(
self.accessor.glob_directory_names(glob)))
assert_find("x.y", ["x.y"]) # Test exact match
assert_find("A", []) # Test case mismatch
# Test various depths
assert_find("*", ["a", "x"])
assert_find("*.*", ["x.y"])
assert_find("*.*.*", [])
self.accessor.drop_all_metrics()
assert_find("*", [])
def test_glob_metrics_cached(self):
metrics = ["a", "a.b", "x.y.z"]
for name in metrics:
metric = bg_test_utils.make_metric(name)
self.accessor.create_metric(metric)
self.flush()
cache = bg_accessor_cache.MemoryCache(10, 60)
original_cache = self.accessor.cache
self.accessor.cache = cache
def assert_find(glob, results):
res = self.accessor.glob_metric_names(glob)
self.assertEqual(set(results), set(res))
# Nothing should be cached here.
assert_find('**', metrics)
assert_find('a', ['a'])
assert_find('{x,y}.*y.[z]', ['x.y.z'])
# Things should be cached here.
assert_find('**', metrics)
assert_find('a', ['a'])
assert_find('{x,y}.*y.[z]', ['x.y.z'])
# Make sure we use the cache.
self.accessor.cache.get = lambda _, version: ['fake']
assert_find('a', ['fake'])
assert_find('**', ['fake'])
assert_find('{x,y}.*y.[z]', ['fake'])
self.accessor.cache = original_cache
def test_glob_too_many_directories(self):
for name in "a", "a.b", "x.y.z":
metric = bg_test_utils.make_metric(name)
self.accessor.create_metric(metric)
self.flush()
old_value = self.accessor.max_metrics_per_pattern
self.accessor.max_metrics_per_pattern = 1
with self.assertRaises(bg_cassandra.TooManyMetrics):
list(self.accessor.glob_directory_names('**'))
self.accessor.max_metrics_per_pattern = old_value
def test_create_metrics(self):
meta_dict = {
"aggregator": bg_accessor.Aggregator.last,
"retention": bg_accessor.Retention.from_string("60*1s:60*60s"),
"carbon_xfilesfactor": 0.3,
}
metric = bg_test_utils.make_metric("a.b.c.d.e.f", **meta_dict)
self.assertEqual(self.accessor.has_metric(metric.name), False)
self.accessor.create_metric(metric)
self.assertEqual(self.accessor.has_metric(metric.name), True)
metric_again = self.accessor.get_metric(metric.name)
self.assertEqual(metric.name, metric_again.name)
for k, v in meta_dict.items():
self.assertEqual(v, getattr(metric_again.metadata, k))
def test_update_metrics(self):
# prepare test
meta_dict = {
"aggregator": bg_accessor.Aggregator.last,
"retention": bg_accessor.Retention.from_string("60*1s:60*60s"),
"carbon_xfilesfactor": 0.3,
}
metadata = bg_accessor.MetricMetadata(**meta_dict)
metric_name = "a.b.c.d.e.f"
self.accessor.create_metric(
self.accessor.make_metric(metric_name, metadata))
metric = self.accessor.get_metric(metric_name)
for k, v in meta_dict.items():
self.assertEqual(v, getattr(metric.metadata, k))
# test
updated_meta_dict = {
"aggregator": bg_accessor.Aggregator.maximum,
"retention": bg_accessor.Retention.from_string("30*1s:120*30s"),
"carbon_xfilesfactor": 0.5,
}
updated_metadata = bg_accessor.MetricMetadata(**updated_meta_dict)
# Setting a known metric name should work
self.accessor.update_metric(metric_name, updated_metadata)
updated_metric = self.accessor.get_metric(metric_name)
for k, v in updated_meta_dict.items():
self.assertEqual(v, getattr(updated_metric.metadata, k))
# Setting an unknown metric name should fail
self.assertRaises(
bg_cassandra.InvalidArgumentError,
self.accessor.update_metric, "fake.metric.name", updated_metadata)
def test_has_metric(self):
metric = self.make_metric("a.b.c.d.e.f")
self.assertEqual(self.accessor.has_metric(metric.name), False)
self.accessor.create_metric(metric)
self.assertEqual(self.accessor.has_metric(metric.name), True)
def test_delete_metric(self):
metric = self.make_metric("a.b.c.d.e.f")
self.accessor.create_metric(metric)
self.assertEqual(self.accessor.has_metric(metric.name), True)
self.accessor.delete_metric(metric.name)
self.flush()
self.assertEqual(self.accessor.has_metric(metric.name), False)
def test_repair(self):
# TODO(c.chary): Add better test for repair()
self.accessor.repair()
def test_doubledots(self):
metric = self.make_metric("a.b..c")
metric_1 = self.make_metric("a.b.c")
points = [(1, 42)]
self.accessor.create_metric(metric)
self.accessor.create_metric(metric_1)
self.flush()
self.assertEqual(['a.b.c'],
list(self.accessor.glob_metric_names("a.b.*")))
self.assertEqual(True, self.accessor.has_metric("a.b..c"))
self.assertNotEqual(None, self.accessor.get_metric("a.b..c"))
self.accessor.insert_points(metric, points)
self.flush()
actual_points = self.accessor.fetch_points(
metric, 1, 2, stage=metric.retention[0])
self.assertEqual(points, list(actual_points))
actual_points = self.accessor.fetch_points(
metric_1, 1, 2, stage=metric.retention[0])
self.assertEqual(points, list(actual_points))
def test_metrics_ttl_correctly_refreshed(self):
metric1 = self.make_metric("a.b.c.d.e.f")
self.accessor.create_metric(metric1)
# Setting up the moc function
isUpdated = [False]
def touch_metric_moc(*args, **kwargs):
isUpdated[0] = True
old_touch_fn = self.accessor.touch_metric
self.accessor.touch_metric = touch_metric_moc
time.sleep(2)
self.accessor.get_metric(metric1.name, touch=True)
self.assertEqual(isUpdated[0], False)
old_ttl = self.accessor._CassandraAccessor__metadata_touch_ttl_sec
self.accessor._CassandraAccessor__metadata_touch_ttl_sec = 1
self.accessor.get_metric(metric1.name, touch=True)
self.assertEqual(isUpdated[0], True)
self.accessor._CassandraAccessor__metadata_touch_ttl_sec = old_ttl
self.accessor.touch_metric = old_touch_fn
def test_clean_expired(self):
metric1 = self.make_metric("a.b.c.d.e.f")
self.accessor.create_metric(metric1)
metric2 = self.make_metric("g.h.i.j.k.l")
self.accessor.create_metric(metric2)
self.flush()
# Check that the metrics exist before the cleanup
self.assertEqual(self.accessor.has_metric(metric1.name), True)
self.assertEqual(self.accessor.has_metric(metric2.name), True)
# set cutoff time in the future to delete all created metrics
cutoff = -3600
self.accessor.clean(cutoff)
# Check that the metrics are correctly deleted
self.assertEqual(self.accessor.has_metric(metric1.name), False)
self.assertEqual(self.accessor.has_metric(metric2.name), False)
self.addCleanup(self.accessor.drop_all_metrics)
def test_clean_not_expired(self):
metric1 = self.make_metric("a.b.c.d.e.f")
self.accessor.create_metric(metric1)
metric2 = self.make_metric("g.h.i.j.k.l")
self.accessor.create_metric(metric2)
self.flush()
# Check that the metrics exist before the cleanup
self.assertEqual(self.accessor.has_metric(metric1.name), True)
self.assertEqual(self.accessor.has_metric(metric2.name), True)
# set cutoff time in the past to delete nothing
cutoff = 3600
self.accessor.clean(cutoff)
# Check that the metrics still exist after the cleanup
self.assertEqual(self.accessor.has_metric(metric1.name), True)
self.assertEqual(self.accessor.has_metric(metric2.name), True)
self.addCleanup(self.accessor.drop_all_metrics)
def test_map(self):
metric1 = self.make_metric("a.b.c.d.e.f")
self.accessor.create_metric(metric1)
metric2 = self.make_metric("g.h.i.j.k.l")
self.accessor.create_metric(metric2)
self.flush()
def _callback(metric, done, total):
self.assertIsNotNone(metric)
self.assertTrue(done <= total)
def _errback(name):
self.assertIsNotNone(name)
self.accessor.map(_callback, errback=_errback)
class TestAccessorWithCassandraSASI(_BaseTestAccessorWithCassandraMetadata,
bg_test_utils.TestCaseWithAccessor):
pass
class TestAccessorWithCassandraLucene(_BaseTestAccessorWithCassandraMetadata,
bg_test_utils.TestCaseWithAccessor):
ACCESSOR_SETTINGS = {'use_lucene': True}
class TestAccessorWithCassandraData(bg_test_utils.TestCaseWithAccessor):
def fetch(self, metric, *args, **kwargs):
"""Helper to fetch points as a list."""
# default kwargs for stage.
if 'stage' not in kwargs:
kwargs['stage'] = metric.retention[0]
ret = self.accessor.fetch_points(metric, *args, **kwargs)
self.assertTrue(hasattr(ret, "__iter__"))
return list(ret)
def test_fetch_empty(self):
no_such_metric = bg_test_utils.make_metric("no.such.metric")
self.accessor.insert_points(_METRIC, _POINTS)
self.flush()
self.accessor.drop_all_metrics()
self.assertEqual(
len(self.fetch(no_such_metric, _POINTS_START, _POINTS_END)),
0,
)
self.assertFalse(
len(self.fetch(_METRIC, _POINTS_START, _POINTS_END)),
0,
)
def test_insert_empty(self):
# We've had a regression where inserting empty list would freeze
# the process
self.accessor.insert_points(_METRIC, [])
self.flush()
def test_insert_fetch(self):
self.accessor.create_metric(_METRIC)
self.accessor.insert_points(_METRIC, _POINTS)
self.flush()
# TODO: Test fetch at different stages for a given metric.
fetched = self.fetch(_METRIC, _QUERY_START, _QUERY_END)
# assertEqual is very slow when the diff is huge, so we give it a chance of
# failing early to avoid imprecise test timeouts.
self.assertEqual(_QUERY_RANGE, len(fetched))
self.assertEqual(_USEFUL_POINTS[:10], fetched[:10])
self.assertEqual(_USEFUL_POINTS[-10:], fetched[-10:])
self.assertEqual(_USEFUL_POINTS, fetched)
def test_insert_fetch_replicas(self):
self.accessor.shard = bg_accessor.pack_shard(replica=0, writer=0)
self.accessor.insert_points(_METRIC, _POINTS)
self.accessor.shard = bg_accessor.pack_shard(replica=3, writer=0xFFFF)
self.accessor.insert_points(_METRIC, _POINTS)
self.flush()
# TODO: Test fetch at different stages for a given metric.
fetched = self.fetch(_METRIC, _QUERY_START, _QUERY_END)
# assertEqual is very slow when the diff is huge, so we give it a chance of
# failing early to avoid imprecise test timeouts.
self.assertEqual(_QUERY_RANGE, len(fetched))
self.assertEqual(_USEFUL_POINTS[:10], fetched[:10])
self.assertEqual(_USEFUL_POINTS[-10:], fetched[-10:])
self.assertEqual(_USEFUL_POINTS, fetched)
def _get_version(self):
for host in self.cluster.metadata.all_hosts():
return version.LooseVersion(host.release_version)
return None
def test_create_datapoints_table_dtcs(self):
"""Validate that we can create a DTCS table."""
orig_cs = bg_cassandra._COMPACTION_STRATEGY
bg_cassandra._COMPACTION_STRATEGY = "DateTieredCompactionStrategy"
max_version = version.LooseVersion('3.8')
if self._get_version() > max_version:
print('Skipping DTCS test, incompatible version')
return
self._reset_keyspace(self.session, self.KEYSPACE)
# We create a fake metric to create the table. This also validate
# that breaking changes aren't introduced to the schema.
self.accessor.create_metric(_METRIC)
self.accessor.insert_points(_METRIC, _POINTS)
self.flush()
self.cluster.refresh_schema_metadata()
keyspace = None
for name, keyspace in self.cluster.metadata.keyspaces.items():
if name == self.accessor.keyspace:
break
datapoints_86400p_1s = keyspace.tables['datapoints_86400p_1s_0']
options = datapoints_86400p_1s.options
self.assertEqual(
options['compaction']['class'],
'org.apache.cassandra.db.compaction.DateTieredCompactionStrategy')
self.assertEqual(options['compaction']['base_time_seconds'], '901')
self.assertEqual(options['compaction']
['max_window_size_seconds'], '2000')
self.assertEqual(options['default_time_to_live'], 87300)
datapoints_10080_60s = keyspace.tables['datapoints_10080p_60s_aggr']
options = datapoints_10080_60s.options
self.assertEqual(
options['compaction']['class'],
'org.apache.cassandra.db.compaction.DateTieredCompactionStrategy')
self.assertEqual(options['compaction']['base_time_seconds'], '960')
self.assertEqual(options['compaction']
['max_window_size_seconds'], '120000')
self.assertEqual(options['default_time_to_live'], 605700)
bg_cassandra._COMPACTION_STRATEGY = orig_cs
def test_create_datapoints_table_twcs(self):
"""Validate that we can create a TWCS table."""
min_version = version.LooseVersion('3.8')
if self._get_version() < min_version:
print('Skipping TWCS test, incompatible version')
return
orig_cs = bg_cassandra._COMPACTION_STRATEGY
bg_cassandra._COMPACTION_STRATEGY = "TimeWindowCompactionStrategy"
self._reset_keyspace(self.session, self.KEYSPACE)
# We create a fake metric to create the table. This also validate
# that breaking changes aren't introduced to the schema.
self.accessor.create_metric(_METRIC)
self.accessor.insert_points(_METRIC, _POINTS)
self.flush()
self.cluster.refresh_schema_metadata()
keyspace = None
for name, keyspace in self.cluster.metadata.keyspaces.items():
if name == self.accessor.keyspace:
break
datapoints_86400p_1s = keyspace.tables['datapoints_86400p_1s_0']
options = datapoints_86400p_1s.options
self.assertEqual(
options['compaction']['class'],
'org.apache.cassandra.db.compaction.TimeWindowCompactionStrategy')
self.assertEqual(options['compaction']
['compaction_window_unit'], 'HOURS')
self.assertEqual(options['compaction']['compaction_window_size'], '1')
self.assertEqual(options['default_time_to_live'], 87300)
datapoints_10080_60s = keyspace.tables['datapoints_10080p_60s_aggr']
options = datapoints_10080_60s.options
self.assertEqual(
options['compaction']['class'],
'org.apache.cassandra.db.compaction.TimeWindowCompactionStrategy')
self.assertEqual(options['compaction']
['compaction_window_unit'], 'HOURS')
self.assertEqual(options['compaction']['compaction_window_size'], '3')
self.assertEqual(options['default_time_to_live'], 605700)
bg_cassandra._COMPACTION_STRATEGY = orig_cs
def test_syncdb(self):
retentions = [bg_accessor.Retention.from_string("60*1s:60*60s")]
self.accessor.syncdb(retentions=retentions, dry_run=True)
self.accessor.syncdb(retentions=retentions, dry_run=False)
if __name__ == "__main__":
unittest.main()
| 38.302926
| 93
| 0.618451
|
4a138a0fa3b2eca11a2054b19e5e6f71a466dbb9
| 2,920
|
py
|
Python
|
5.4.3-tf-grad-cam.py
|
83286415/DeepLearningWithPythonKeras
|
d3e7dd3b206d3d22a45ad4967a00edd26c4cbe75
|
[
"MIT"
] | null | null | null |
5.4.3-tf-grad-cam.py
|
83286415/DeepLearningWithPythonKeras
|
d3e7dd3b206d3d22a45ad4967a00edd26c4cbe75
|
[
"MIT"
] | null | null | null |
5.4.3-tf-grad-cam.py
|
83286415/DeepLearningWithPythonKeras
|
d3e7dd3b206d3d22a45ad4967a00edd26c4cbe75
|
[
"MIT"
] | null | null | null |
from vgg import vgg16
import tensorflow as tf
import numpy as np
from skimage import io
from skimage.transform import resize
from matplotlib import pyplot as plt
from imagenet_classes import class_names
from scipy.misc import imread, imresize
flags = tf.app.flags
flags.DEFINE_string("input", "laska.png", "Path to input image ['laska.png']")
flags.DEFINE_string("output", "laska_save.png", "Path to input image ['laska_save.png']")
flags.DEFINE_string("layer_name", "pool5", "Layer till which to backpropagate ['pool5']")
FLAGS = flags.FLAGS
def load_image(img_path):
print("Loading image")
img = imread(img_path, mode='RGB')
img = imresize(img, (224, 224))
# Converting shape from [224,224,3] tp [1,224,224,3]
x = np.expand_dims(img, axis=0)
# Converting RGB to BGR for VGG
x = x[:,:,:,::-1]
return x, img
def grad_cam(x, vgg, sess, predicted_class, layer_name, nb_classes):
print("Setting gradients to 1 for target class and rest to 0")
# Conv layer tensor [?,7,7,512]
conv_layer = vgg.layers[layer_name]
# [1000]-D tensor with target class index set to 1 and rest as 0
one_hot = tf.sparse_to_dense(predicted_class, [nb_classes], 1.0)
signal = tf.mul(vgg.layers['fc3'], one_hot)
loss = tf.reduce_mean(signal)
grads = tf.gradients(loss, conv_layer)[0]
# Normalizing the gradients
norm_grads = tf.div(grads, tf.sqrt(tf.reduce_mean(tf.square(grads))) + tf.constant(1e-5))
output, grads_val = sess.run([conv_layer, norm_grads], feed_dict={vgg.imgs: x})
output = output[0] # [7,7,512]
grads_val = grads_val[0] # [7,7,512]
weights = np.mean(grads_val, axis = (0, 1)) # [512]
cam = np.ones(output.shape[0 : 2], dtype = np.float32) # [7,7]
# Taking a weighted average
for i, w in enumerate(weights):
cam += w * output[:, :, i]
# Passing through ReLU
cam = np.maximum(cam, 0)
cam = cam / np.max(cam)
cam = resize(cam, (224,224))
# Converting grayscale to 3-D
cam3 = np.expand_dims(cam, axis=2)
cam3 = np.tile(cam3,[1,1,3])
return cam3
def main(_):
x, img = load_image(FLAGS.input)
sess = tf.Session()
print("\nLoading Vgg")
imgs = tf.placeholder(tf.float32, [None, 224, 224, 3])
vgg = vgg16(imgs, 'vgg16_weights.npz', sess)
print("\nFeedforwarding")
prob = sess.run(vgg.probs, feed_dict={vgg.imgs: x})[0]
preds = (np.argsort(prob)[::-1])[0:5]
print('\nTop 5 classes are')
for p in preds:
print(class_names[p], prob[p])
# Target class
predicted_class = preds[0]
# Target layer for visualization
layer_name = FLAGS.layer_name
# Number of output classes of model being used
nb_classes = 1000
cam3 = grad_cam(x, vgg, sess, predicted_class, layer_name, nb_classes)
img = img.astype(float)
img /= img.max()
# Superimposing the visualization with the image.
new_img = img+3*cam3
new_img /= new_img.max()
# Display and save
io.imshow(new_img)
plt.show()
io.imsave(FLAGS.output, new_img)
if __name__ == '__main__':
tf.app.run()
| 28.349515
| 90
| 0.693836
|
4a138b2917860914c9592469ab2a1b2b3193c6c4
| 1,287
|
py
|
Python
|
python-lib/quotes/spiders.py
|
lukin0110/quotes
|
0797ae3ffed19076aedd9b44ca107ad9742afcb4
|
[
"Apache-2.0"
] | 9
|
2018-06-22T20:39:41.000Z
|
2022-01-07T12:57:32.000Z
|
python-lib/quotes/spiders.py
|
lukin0110/quotes
|
0797ae3ffed19076aedd9b44ca107ad9742afcb4
|
[
"Apache-2.0"
] | 8
|
2016-10-17T12:44:40.000Z
|
2016-10-24T19:08:15.000Z
|
python-lib/quotes/spiders.py
|
lukin0110/quotes
|
0797ae3ffed19076aedd9b44ca107ad9742afcb4
|
[
"Apache-2.0"
] | 4
|
2019-01-25T19:01:40.000Z
|
2020-12-15T19:32:56.000Z
|
import scrapy
import tempfile
from scrapy.crawler import CrawlerProcess
from .write import write
class BrainyQuoteSpider(scrapy.Spider):
"""
Execute from the shell:
$ scrapy runspider spiders.py -o in.csv
"""
name = 'BrainyQuote'
start_urls = ['']
def parse(self, response):
for title in response.css('span.bqQuoteLink'):
yield {'title': title.css('a ::text').extract_first()}
next_page = response.css('ul.pagination > li:last-child > a ::attr(href)').extract_first()
if next_page:
yield scrapy.Request(response.urljoin(next_page), callback=self.parse)
def runner(url, file_uri='out.csv'):
"""
A runner for the Spider declared previously in code.
https://doc.scrapy.org/en/latest/topics/practices.html
http://stackoverflow.com/questions/23574636/scrapy-from-script-output-in-json
"""
temp = tempfile.NamedTemporaryFile()
try:
process = CrawlerProcess({
'USER_AGENT': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)',
'FEED_FORMAT': 'csv',
'FEED_URI': temp.name
})
process.crawl(BrainyQuoteSpider, start_urls=[url])
process.start()
write(temp.name, file_uri)
finally:
temp.close()
| 27.382979
| 98
| 0.63481
|
4a138b81ff0dcb3e84996373cf8c2f61c52dd838
| 604
|
py
|
Python
|
src/core/tests/widgets/test_icon.py
|
d3r3kk/toga
|
2d8c0eb30371c4ef4f0610251233569e9c618e93
|
[
"BSD-3-Clause"
] | 2
|
2019-02-19T17:19:24.000Z
|
2020-04-13T21:22:24.000Z
|
src/core/tests/widgets/test_icon.py
|
d3r3kk/toga
|
2d8c0eb30371c4ef4f0610251233569e9c618e93
|
[
"BSD-3-Clause"
] | 2
|
2019-10-26T20:54:06.000Z
|
2019-10-26T21:43:43.000Z
|
src/core/tests/widgets/test_icon.py
|
d3r3kk/toga
|
2d8c0eb30371c4ef4f0610251233569e9c618e93
|
[
"BSD-3-Clause"
] | 4
|
2019-02-13T17:54:15.000Z
|
2019-10-26T21:16:27.000Z
|
import unittest
from unittest.mock import MagicMock
import toga
import toga_dummy
class TestIcon(unittest.TestCase):
def setUp(self):
self.factory = MagicMock()
self.factory.Icon = MagicMock(return_value=MagicMock(spec=toga_dummy.factory.Icon))
self.test_path = "Example.bmp"
self.icon = toga.Icon(self.test_path)
def test_icon_bind(self):
self.assertEqual(self.icon._impl, None)
self.icon.bind(factory=toga_dummy.factory)
self.assertEqual(self.icon._impl.interface, self.icon)
self.assertEqual(self.icon.path, self.test_path)
| 28.761905
| 91
| 0.708609
|
4a138c84c2e4211d64e0e94123aa5c7e881018a8
| 24,719
|
py
|
Python
|
nanodet/data/dataset/coco.py
|
CPFelix/-nanodet
|
2af3228d06f7694d274c7b29276abd398630ca21
|
[
"Apache-2.0"
] | 14
|
2022-03-21T21:04:38.000Z
|
2022-03-29T12:49:34.000Z
|
nanodet/data/dataset/coco.py
|
CPFelix/-nanodet
|
2af3228d06f7694d274c7b29276abd398630ca21
|
[
"Apache-2.0"
] | null | null | null |
nanodet/data/dataset/coco.py
|
CPFelix/-nanodet
|
2af3228d06f7694d274c7b29276abd398630ca21
|
[
"Apache-2.0"
] | 1
|
2022-03-24T18:46:17.000Z
|
2022-03-24T18:46:17.000Z
|
# Copyright 2021 RangiLyu.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import cv2
import numpy as np
import torch
from pycocotools.coco import COCO
from .base import BaseDataset
import random
class CocoDataset(BaseDataset):
def get_data_info(self, ann_path):
"""
Load basic information of dataset such as image path, label and so on.
:param ann_path: coco json file path
:return: image info:
[{'license': 2,
'file_name': '000000000139.jpg',
'coco_url': 'http://images.cocodataset.org/val2017/000000000139.jpg',
'height': 426,
'width': 640,
'date_captured': '2013-11-21 01:34:01',
'flickr_url':
'http://farm9.staticflickr.com/8035/8024364858_9c41dc1666_z.jpg',
'id': 139},
...
]
"""
self.coco_api = COCO(ann_path)
self.cat_ids = sorted(self.coco_api.getCatIds())
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
self.cats = self.coco_api.loadCats(self.cat_ids)
self.img_ids = sorted(self.coco_api.imgs.keys())
img_info = self.coco_api.loadImgs(self.img_ids)
# 增加mosaic数据增强
if (isinstance(self.input_size, int)):
self.mosaic_border = [-self.input_size // 2, -self.input_size // 2]
else:
self.mosaic_border = [-self.input_size[1] // 2, -self.input_size[0] // 2] # 注意按照[H, W]格式,否则random_perspective函数会出现异常。
self.indices = range(len(self.img_ids))
return img_info
def get_per_img_info(self, idx):
img_info = self.data_info[idx]
file_name = img_info["file_name"]
height = img_info["height"]
width = img_info["width"]
id = img_info["id"]
if not isinstance(id, int):
raise TypeError("Image id must be int.")
info = {"file_name": file_name, "height": height, "width": width, "id": id}
return info
def get_img_annotation(self, idx):
"""
load per image annotation
:param idx: index in dataloader
:return: annotation dict
"""
img_id = self.img_ids[idx]
ann_ids = self.coco_api.getAnnIds([img_id])
anns = self.coco_api.loadAnns(ann_ids)
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
if self.use_instance_mask:
gt_masks = []
if self.use_keypoint:
gt_keypoints = []
for ann in anns:
if ann.get("ignore", False):
continue
x1, y1, w, h = ann["bbox"]
if ann["area"] <= 0 or w < 1 or h < 1:
continue
if ann["category_id"] not in self.cat_ids:
continue
# 转化为x1 y1 x2 y2
bbox = [x1, y1, x1 + w, y1 + h]
if ann.get("iscrowd", False):
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann["category_id"]])
if self.use_instance_mask:
gt_masks.append(self.coco_api.annToMask(ann))
if self.use_keypoint:
gt_keypoints.append(ann["keypoints"])
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
annotation = dict(
bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore
)
if self.use_instance_mask:
annotation["masks"] = gt_masks
if self.use_keypoint:
if gt_keypoints:
annotation["keypoints"] = np.array(gt_keypoints, dtype=np.float32)
else:
annotation["keypoints"] = np.zeros((0, 51), dtype=np.float32)
return annotation
def get_train_data(self, idx):
"""
Load image and annotation
:param idx:
:return: meta-data (a dict containing image, annotation and other information)
"""
img_info = self.get_per_img_info(idx)
file_name = img_info["file_name"]
image_path = os.path.join(self.img_path, file_name)
img = cv2.imread(image_path)
if img is None:
print("image {} read failed.".format(image_path))
raise FileNotFoundError("Cant load image! Please check image path!")
ann = self.get_img_annotation(idx)
meta = dict(
img=img, img_info=img_info, gt_bboxes=ann["bboxes"], gt_labels=ann["labels"]
)
if self.use_instance_mask:
meta["gt_masks"] = ann["masks"]
if self.use_keypoint:
meta["gt_keypoints"] = ann["keypoints"]
input_size = self.input_size
if self.multi_scale:
input_size = self.get_random_size(self.multi_scale, input_size)
# 增加mosaic数据增强,并设置概率
if ((random.random() < self.load_mosaic) and (self.mode == "train")):
img4, labels4, bbox4 = load_mosaic(self, idx)
meta['img_info']['height'] = img4.shape[0]
meta['img_info']['width'] = img4.shape[1]
meta['img'] = img4
meta['gt_labels'] = labels4
meta['gt_bboxes'] = bbox4
# 增加cut_mosaic数据增强,并设置概率
if ((random.random() < self.cut_mosaic) and (self.mode == "train")):
img4, labels4, bbox4 = cut_mosaic(self, idx)
meta['img_info']['height'] = img4.shape[0]
meta['img_info']['width'] = img4.shape[1]
meta['img'] = img4
meta['gt_labels'] = labels4
meta['gt_bboxes'] = bbox4
# 对香烟区域进行裁切(解决香烟目标过小问题)
if ((random.random() < self.crop_cigarette) and (self.mode == "train") and (2 in ann["labels"])):
img4, labels4, bbox4 = cut_cigarette(self, idx)
meta['img_info']['height'] = img4.shape[0]
meta['img_info']['width'] = img4.shape[1]
meta['img'] = img4
meta['gt_labels'] = labels4
meta['gt_bboxes'] = bbox4
# #保存预处理后的图片和对应标注
# img_draw = meta["img"].copy()
# for i, box in enumerate(meta["gt_bboxes"]):
# cv2.rectangle(img_draw, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (0, 0, 255), 2)
# imgname = "img_draw_" + str(random.randint(0, 10000)) + ".jpg"
# savePath = "/home/chenpengfei/temp/nanodet_20220222/"
# if not os.path.exists(savePath):
# os.makedirs(savePath)
# cv2.imwrite(savePath + imgname, img_draw)
# print("OK")
meta = self.pipeline(self, meta, input_size)
meta["img"] = torch.from_numpy(meta["img"].transpose(2, 0, 1))
return meta
def get_val_data(self, idx):
"""
Currently no difference from get_train_data.
Not support TTA(testing time augmentation) yet.
:param idx:
:return:
"""
# TODO: support TTA
return self.get_train_data(idx)
# 增加mosaic数据增强
def map_newsize(x, h0, w0, w, h, padw=0, padh=0):
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = x[:, 0] * w / w0 + padw # top left x
y[:, 1] = x[:, 1] * h / h0 + padh # top left y
y[:, 2] = x[:, 2] * w / w0 + padw # bottom right x
y[:, 3] = x[:, 3] * h / h0 + padh # bottom right y
return y
def load_image(self, i):
img_info = self.get_per_img_info(i)
file_name = img_info["file_name"]
image_path = os.path.join(self.img_path, file_name)
img = cv2.imread(image_path)
h0, w0 = img.shape[:2]
if (isinstance(self.input_size, int)):
r = self.input_size / max(h0, w0) # ratio
if r != 1: # if sizes are not equal
im = cv2.resize(img, (int(w0 * r), int(h0 * r)),
interpolation=cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR)
else:
r = max(self.input_size[0], self.input_size[1]) / max(h0, w0) # ratio
im = cv2.resize(img, (self.input_size[0], self.input_size[1]),
interpolation=cv2.INTER_AREA if r < 1 else cv2.INTER_LINEAR)
return im, img, img.shape[:2], im.shape[:2] # im, im_original, hw_original, hw_resized
def load_mosaic(self, idx):
# loads images in a 4-mosaic
labels4, segments4 = [], []
s = self.input_size
if (isinstance(s, int)):
yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
else:
yc, _, _, xc = [int(random.uniform(-x, 2 * y + x)) for x in self.mosaic_border for y in [s[1], s[0]]]
# xc = int(random.uniform(-self.mosaic_border[0], 2 * s[0] + self.mosaic_border[0]))
# yc = int(random.uniform(-self.mosaic_border[1], 2 * s[1] + self.mosaic_border[1]))
# print(yc, xc)
indices = [idx] + random.choices(self.indices, k=3) # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
if (isinstance(s, int)):
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
else:
img4 = np.full((s[1] * 2, s[0] * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
if (isinstance(s, int)):
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
else:
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s[0] * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
if (isinstance(s, int)):
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
else:
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s[1] * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
elif i == 3: # bottom right
if (isinstance(s, int)):
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
else:
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s[0] * 2), min(s[1] * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
# print("\n----------------------------\n")
# print(i)
# print(img4.shape)
# print(img.shape)
# print(y1a, y2a, x1a, x2a, y1b, y2b, x1b, x2b)
# print(img4[y1a:y2a, x1a:x2a].shape)
# print(img[y1b:y2b, x1b:x2b].shape)
# print("----------------------------\n")
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
# cv2.imwrite("/home/chenpengfei/temp/" + str(i) + ".jpg", img4)
padw = x1a - x1b
padh = y1a - y1b
# Labels
labels = []
ann = self.get_img_annotation(index).copy()
for i,box in enumerate(ann["bboxes"]):
label = np.append(ann["labels"][i], box)
labels.append(label)
labels = np.array(labels)
if labels.size > 0:
labels[:, 1:] = map_newsize(labels[:, 1:], h0, w0, w, h, padw, padh) # normalized xywh to pixel xyxy format
else:
continue # 如果没有标注信息则保存在labels4,不然之后concatenate会报错
labels4.append(labels)
# Concat/clip labels
# print(labels4)
labels4 = np.concatenate(labels4, 0)
if (isinstance(s, int)):
for x in (labels4[:, 1:], *segments4):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
else:
x_index = [1, 3]
y_index = [2, 4]
for x in (labels4[:, x_index], *segments4):
labels4[:, x_index] = np.clip(x, 0, 2 * s[0], out=x) # clip when using random_perspective()
for y in (labels4[:, y_index], *segments4):
labels4[:, y_index] = np.clip(y, 0, 2 * s[1], out=y) # clip when using random_perspective()
# img4, labels4 = replicate(img4, labels4) # replicate
img4_draw = img4.copy()
saveDir = "/home/chenpengfei/temp/nanodet_mosaic/"
if not os.path.exists(saveDir):
os.makedirs(saveDir)
for i in range(labels4.shape[0]):
if (int(labels4[i][0]) == 0):
cv2.rectangle(img4_draw, (int(labels4[i][1]), int(labels4[i][2])), (int(labels4[i][3]), int(labels4[i][4])),
(0, 0, 255), 2)
else:
cv2.rectangle(img4_draw, (int(labels4[i][1]), int(labels4[i][2])), (int(labels4[i][3]), int(labels4[i][4])),
(255, 0, 0), 2)
imagename = "img4_draw_" + str(random.randint(0, 10000)) + ".jpg"
cv2.imwrite(saveDir + imagename, img4_draw)
# Augment
# img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste'])
#
# img4, labels4 = random_perspective(img4, labels4, segments4,
# degrees=self.hyp['degrees'],
# translate=self.hyp['translate'],
# scale=self.hyp['scale'],
# shear=self.hyp['shear'],
# perspective=self.hyp['perspective'],
# border=self.mosaic_border) # border to remove
# img4_draw = img4.copy()
# for i in range(labels4.shape[0]):
# cv2.rectangle(img4_draw, (int(labels4[i][1]), int(labels4[i][2])), (int(labels4[i][3]), int(labels4[i][4])), (0, 0, 255), 2)
# cv2.imwrite("/home/chenpengfei/temp/img4_draw.jpg", img4_draw)
bbox4 = labels4[:, 1:].astype(np.float32)
labels4 = labels4[:, 0].astype(np.int64)
return img4, labels4, bbox4
# 判断坐标框大小是否符合要求
def smallBox(x, pixels):
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
w = y[2] - y[0]
h = y[3] - y[1]
if ((w < pixels) or (h < pixels)):
return False
else:
return True
# 变形版Masoic,每张图裁出中间区域四分之一进行拼接,相比masoic不会减小目标尺寸
def cut_mosaic(self, idx):
# loads images in a 4-mosaic
labels4, segments4 = [], []
s = self.input_size
if (isinstance(s, int)):
yc, xc = [int(random.uniform(0, s / 2)), int(random.uniform(0, s / 2))] # mosaic center x, y
else:
yc, xc = [int(random.uniform(0, s[1] / 2)), int(random.uniform(0, s[0] / 2))]
# xc = int(random.uniform(-self.mosaic_border[0], 2 * s[0] + self.mosaic_border[0]))
# yc = int(random.uniform(-self.mosaic_border[1], 2 * s[1] + self.mosaic_border[1]))
# print(yc, xc)
indices = [idx] + random.choices(self.indices, k=3) # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
part_w = int(w / 2)
part_h = int(h / 2)
# place img in img4
if i == 0: # top left
if (isinstance(s, int)):
img4 = np.full((s, s, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
else:
img4 = np.full((s[1], s[0], img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = 0, 0, part_w, part_h # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = xc, yc, xc + part_w, yc + part_h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = part_w, 0, w, part_h
x1b, y1b, x2b, y2b = xc, yc, xc + part_w, yc + part_h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = 0, part_h, part_w, h
x1b, y1b, x2b, y2b = xc, yc, xc + part_w, yc + part_h
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = part_w, part_h, w, h
x1b, y1b, x2b, y2b = xc, yc, xc + part_w, yc + part_h
# print("\n----------------------------\n")
# print(i)
# print(img4.shape)
# print(img.shape)
# print(y1a, y2a, x1a, x2a, y1b, y2b, x1b, x2b)
# print(img4[y1a:y2a, x1a:x2a].shape)
# print(img[y1b:y2b, x1b:x2b].shape)
# print("----------------------------\n")
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
# cv2.imwrite("/home/chenpengfei/temp/" + str(i) + ".jpg", img4)
padw = x1a - x1b
padh = y1a - y1b
# Labels
labels = []
ann = self.get_img_annotation(index).copy()
for i,box in enumerate(ann["bboxes"]):
label = np.append(ann["labels"][i], box)
labels.append(label)
labels = np.array(labels)
if labels.size > 0:
labels[:, 1:] = map_newsize(labels[:, 1:], h0, w0, w, h, padw, padh) # normalized xywh to pixel xyxy format
# 坐标截断,保证不越界
x_index = [1, 3]
y_index = [2, 4]
for index,x in enumerate(labels[:, x_index]):
labels[index, x_index] = np.clip(x, x1a, x2a, out=x) # clip when using random_perspective()
for index,y in enumerate(labels[:, y_index]):
labels[index, y_index] = np.clip(y, y1a, y2a, out=y) # clip when using random_perspective()
else:
continue # 如果没有标注信息则保存在labels4,不然之后concatenate会报错
# 增加截断后坐标的大小判断,过小的标注框过滤掉
pixels_hand = 10
pixels_cigarette = 2
delete_id = []
for row in range(labels.shape[0]):
if (labels[row][0] == 0):
if not smallBox(labels[row][1:], pixels_hand):
delete_id.append(row)
else:
if not smallBox(labels[row][1:], pixels_cigarette):
delete_id.append(row)
labels = np.delete(labels, delete_id, axis=0)
labels4.append(labels)
# Concat/clip labels
# print(labels4)
labels4 = np.concatenate(labels4, 0)
# img4, labels4 = replicate(img4, labels4) # replicate
# img4_draw = img4.copy()
# saveDir = "/home/chenpengfei/temp/nanodet_cut_mosaic/"
# if not os.path.exists(saveDir):
# os.makedirs(saveDir)
# for i in range(labels4.shape[0]):
# if (int(labels4[i][0]) == 0):
# cv2.rectangle(img4_draw, (int(labels4[i][1]), int(labels4[i][2])), (int(labels4[i][3]), int(labels4[i][4])), (0, 0, 255), 2)
# else:
# cv2.rectangle(img4_draw, (int(labels4[i][1]), int(labels4[i][2])), (int(labels4[i][3]), int(labels4[i][4])),
# (255, 0, 0), 2)
# imagename = "img4_draw_" + str(random.randint(0, 10000)) + ".jpg"
# cv2.imwrite(saveDir + imagename, img4_draw)
# Augment
# img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste'])
#
# img4, labels4 = random_perspective(img4, labels4, segments4,
# degrees=self.hyp['degrees'],
# translate=self.hyp['translate'],
# scale=self.hyp['scale'],
# shear=self.hyp['shear'],
# perspective=self.hyp['perspective'],
# border=self.mosaic_border) # border to remove
# img4_draw = img4.copy()
# for i in range(labels4.shape[0]):
# cv2.rectangle(img4_draw, (int(labels4[i][1]), int(labels4[i][2])), (int(labels4[i][3]), int(labels4[i][4])), (0, 0, 255), 2)
# cv2.imwrite("/home/chenpengfei/temp/img4_draw.jpg", img4_draw)
bbox4 = labels4[:, 1:].astype(np.float32)
labels4 = labels4[:, 0].astype(np.int64)
return img4, labels4, bbox4
# 计算IOU
def bb_intersection_over_union(boxA, boxB):
boxA = [int(x) for x in boxA]
boxB = [int(x) for x in boxB]
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
iou = interArea / float(boxAArea + boxBArea - interArea)
return iou
# 将香烟区域进行一定比例裁切
def cut_cigarette(self, idx):
labels, segments = [], []
# Load image
img, img_orgin, (h0, w0), (h, w) = load_image(self, idx)
# Labels
labels = []
ann = self.get_img_annotation(idx).copy()
iou_boxes = []
iou_labels = []
min_box = [0.0, 0.0, 0.0, 0.0]
for i, box in enumerate(ann["bboxes"]):
if ann["labels"][i] == 2:
iou_labels.append(ann["labels"][i].copy())
iou_boxes.append(ann["bboxes"][i].copy())
min_box = ann["bboxes"][i]
no_iou_boxes = []
no_iou_labels = []
for i, box in enumerate(ann["bboxes"]):
if ann["labels"][i] != 2:
iou = bb_intersection_over_union(min_box, box)
if iou > 0:
iou_labels.append(ann["labels"][i].copy())
iou_boxes.append(ann["bboxes"][i].copy())
min_box[0] = min(min_box[0], box[0])
min_box[1] = min(min_box[1], box[1])
min_box[2] = max(min_box[2], box[2])
min_box[3] = max(min_box[3], box[3])
else:
no_iou_labels.append(ann["labels"][i].copy())
no_iou_boxes.append(ann["bboxes"][i].copy())
crop_w = w0 / 2
crop_h = h0 / 2
center = [int((min_box[0] + min_box[2]) / 2), int((min_box[1] + min_box[3]) / 2)]
crop_box = [0.0, 0.0, 0.0, 0.0]
crop_box[0] = int(center[0] - crop_w / 2)
crop_box[1] = int(center[1] - crop_h / 2)
crop_box[2] = int(center[0] + crop_w / 2)
crop_box[3] = int(center[1] + crop_h / 2)
# 针对标注框最小外接框截断crop_box
if crop_box[0] > min_box[0]:
crop_box[0] = min_box[0]
if crop_box[1] > min_box[1]:
crop_box[1] = min_box[1]
if crop_box[2] < min_box[2]:
crop_box[2] = min_box[2]
if crop_box[3] < min_box[3]:
crop_box[3] = min_box[3]
# 针对原始图片尺寸截断crop_box
if crop_box[0] < 0:
crop_box[0] = 0
if crop_box[1] < 0:
crop_box[1] = 0
if crop_box[2] > w0:
crop_box[2] = w0
if crop_box[3] > h0:
crop_box[3] = h0
crop_box = [int(i) for i in crop_box]
img_crop = img_orgin[crop_box[1]:crop_box[3], crop_box[0]:crop_box[2]]
for i, box in enumerate(iou_boxes):
box1 = [0.0, 0.0, 0.0, 0.0]
box1[0] = box[0] - crop_box[0]
box1[1] = box[1] - crop_box[1]
box1[2] = box[2] - crop_box[0]
box1[3] = box[3] - crop_box[1]
label = np.append(iou_labels[i], box1)
labels.append(label)
for i, box in enumerate(no_iou_boxes):
iou = bb_intersection_over_union(crop_box, box)
if iou > 0:
if box[0] < crop_box[0]:
box[0] = crop_box[0]
if box[1] < crop_box[1]:
box[1] = crop_box[1]
if box[2] > crop_box[2]:
box[2] = crop_box[2]
if box[3] > crop_box[3]:
box[3] = crop_box[3]
# 截断后过小的框过滤掉
pixels_hand = 15
if not smallBox(box, pixels_hand):
continue
box1 = [0.0, 0.0, 0.0, 0.0]
box1[0] = box[0] - crop_box[0]
box1[1] = box[1] - crop_box[1]
box1[2] = box[2] - crop_box[0]
box1[3] = box[3] - crop_box[1]
label = np.append(no_iou_labels[i], box1)
labels.append(label)
labels = np.array(labels)
bboxs_crop = labels[:, 1:].astype(np.float32)
labels_crop = labels[:, 0].astype(np.int64)
return img_crop, labels_crop, bboxs_crop
| 40.857851
| 138
| 0.53825
|
4a138d0d59998bf2a759c9c88abaa9b09b15b3ba
| 1,838
|
py
|
Python
|
plot_MSE.py
|
hpphappy/reconstruct_ptychography
|
a002cdff01e6dbfe9417fd3d764b22a9bbad2d94
|
[
"MIT"
] | null | null | null |
plot_MSE.py
|
hpphappy/reconstruct_ptychography
|
a002cdff01e6dbfe9417fd3d764b22a9bbad2d94
|
[
"MIT"
] | null | null | null |
plot_MSE.py
|
hpphappy/reconstruct_ptychography
|
a002cdff01e6dbfe9417fd3d764b22a9bbad2d94
|
[
"MIT"
] | 1
|
2021-08-12T23:07:27.000Z
|
2021-08-12T23:07:27.000Z
|
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('pdf')
import h5py
import os
from util import *
import dxchange
import numpy as np
path = 'cell/NMF_ptycho_recon'
save_path = os.path.join(path, 'comparison')
if not os.path.exists(save_path):
os.makedirs(save_path)
grid_delta = np.load('cell/phantom/grid_delta.npy')
print('dimension of the sample = ' +', '.join(map(str,grid_delta.shape)))
grid_delta = np.squeeze(grid_delta)
n_sample_pixel = grid_delta.shape[0]*grid_delta.shape[1]
energy_kev = 5
k = (2*np.pi)/(1.24/energy_kev) #k in the unit of nm^-1
print(k)
matplotlib.rcParams['pdf.fonttype'] = 'truetype'
fontProperties = {'family': 'serif', 'serif': ['Helvetica'], 'weight': 'normal', 'size': 12}
plt.rc('font', **fontProperties)
n_s_ls = [1, 10, 30, 50, 100, 300, 500, 1000, 1500, 2000, 2500, 3000, 3500, 4000]
# spec = gridspec.GridSpec(1, 2 , width_ratios=[7, 0])
smse_normal_ls = []
fig, ax = plt.subplots(figsize=(8,5))
for i, n_s in enumerate(n_s_ls):
obj_dir = os.path.join(path, 'n2e7_nei' + str(n_s))
obj = dxchange.read_tiff(os.path.join(obj_dir, 'delta_ds_1.tiff'))
obj = obj[:,:,0]
smse_normal = (k**2)*np.sum((grid_delta - obj)**2)/n_sample_pixel
smse_normal_ls.append(smse_normal)
np.savez(os.path.join(save_path, "mse"), n_s_ls, smse_normal_ls)
ax.plot(n_s_ls, smse_normal_ls, '-s', color='#C000FF', linewidth=2, markerfacecolor='none', markeredgecolor='#C000FF', markeredgewidth=2, alpha=0.5, label = 'PCA')
ax.set_xticks(np.arange(0,4000,500))
Fontsize = 12
ax.set_xlabel('S', fontsize=Fontsize)
ax.set_ylabel('MSE', fontsize=Fontsize)
ax.legend(loc=1,fontsize=Fontsize,ncol=1)
# ax.set_xscale('log')
ax.set_yscale('log')
plt.savefig(os.path.join(save_path, 'MSE.pdf'), format='pdf')
#plt.savefig(os.path.join(save_path, 'SMSE.png'))
fig.clear()
plt.close(fig)
| 32.245614
| 163
| 0.708379
|
4a138e47f388418ef7df8b6c8f66a672b111f944
| 3,333
|
py
|
Python
|
test.py
|
sdtaylor/NIST-DSE
|
0b14407aa9301338d42a7802d02e91739a4f8012
|
[
"MIT"
] | null | null | null |
test.py
|
sdtaylor/NIST-DSE
|
0b14407aa9301338d42a7802d02e91739a4f8012
|
[
"MIT"
] | null | null | null |
test.py
|
sdtaylor/NIST-DSE
|
0b14407aa9301338d42a7802d02e91739a4f8012
|
[
"MIT"
] | null | null | null |
from skimage.external import tifffile
from skimage import io
from skimage import measure
from skimage.morphology import watershed
from scipy import ndimage as ndi
import matplotlib.pyplot as plt
from skimage.feature import peak_local_max
import numpy as np
def ndvi_from_hs(i):
red_bands = np.array([51,52,53,54,55,56,57,58,59])
nir_bands = np.array([95,96,97,98,99,100])
i_red = i[:,:,red_bands].mean(axis=2)
i_nir = i[:,:,nir_bands].mean(axis=2)
ndvi = (i_nir - i_red) / (i_nir + i_red)
return ndvi
# Returns a labeled np array and optionally coordinates of local maxima
def labels_from_watershed(height_image, ndvi_image, min_distance, ndvi_threshold,
return_coordinates=False):
# Only do local maxima and watershedding where:
height_mask = np.logical_or(ndvi_image>=ndvi_threshold, height_image>0)
local_maxi = peak_local_max(height_image,
min_distance=min_distance,
labels=height_mask,
threshold_abs=2, indices= False)
coordinates = peak_local_max(height_image,
min_distance=min_distance,
labels=height_mask,
threshold_abs=2, indices= True)
markers = ndi.label(local_maxi)[0]
labels = watershed(-chm_image, markers, mask=height_mask)
if return_coordinates:
return labels, coordinates
else:
return labels
# Draw a circle in an array of size (array_x, array_y)
# with circle center (center_x, center_y) and radius
def draw_circle(array_shape, circle_center ,radius):
rows, cols = np.indices((array_shape[0], array_shape[1]))
circle_center_row, circle_center_col = circle_center[0], circle_center[1]
circle_mask = (rows - circle_center_row)**2 + (cols - circle_center_col)**2 < radius**2
return circle_mask
# Get a single image with masks for many (potentially overlapping)
# circles. points are the center points for all circles
def get_circles_from_points(array_shape,points, radius):
a = np.zeros(array_shape).astype(bool)
for i in range(points.shape[0]):
circle = draw_circle(array_shape, points[i], radius=radius)
a = np.logical_or(a, circle)
return a
#######################################################
ndvi_cutoff = 2
local_maxima_min_distance = 5
apply_max_crown_radius = True
max_crown_radius = 10
chm_image = tifffile.imread('OSBS_039_chm.tif')
hs_image = tifffile.imread('OSBS_039_hyper.tif')
ndvi = ndvi_from_hs(hs_image)
labels, coordinates = labels_from_watershed(height_image=chm_image,
ndvi_image=ndvi,
ndvi_threshold=ndvi_cutoff,
min_distance=local_maxima_min_distance,
return_coordinates=True)
if apply_max_crown_radius:
circles = get_circles_from_points(array_shape=chm_image.shape,
points=coordinates,
radius=max_crown_radius)
labels[~circles] = 0
plt.imshow(labels, cmap=plt.cm.spectral)
plt.plot(coordinates[:, 1], coordinates[:, 0], 'r.')
#fig.tight_layout()
plt.show()
| 37.033333
| 91
| 0.631863
|
4a138f4c73402c45da2cbb276b5123f7eaea78c6
| 4,448
|
py
|
Python
|
safedelete/tests/test_admin.py
|
yuekui/django-safedelete
|
aa16098b31155f44a680a30980559cd468383e4a
|
[
"BSD-3-Clause"
] | null | null | null |
safedelete/tests/test_admin.py
|
yuekui/django-safedelete
|
aa16098b31155f44a680a30980559cd468383e4a
|
[
"BSD-3-Clause"
] | null | null | null |
safedelete/tests/test_admin.py
|
yuekui/django-safedelete
|
aa16098b31155f44a680a30980559cd468383e4a
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from django.contrib.admin.sites import AdminSite
from django.contrib.admin.views.main import ChangeList
from django.contrib.auth.models import User
from django.db import models
from django.test import RequestFactory, TestCase
from ..admin import SafeDeleteAdmin, highlight_deleted
from ..config import FIELD_NAME
from ..models import SafeDeleteModel
from .models import Article, Author, Category
class Order(SafeDeleteModel):
articles = models.ManyToManyField(Article)
class CategoryAdmin(SafeDeleteAdmin):
list_display = (highlight_deleted,) + SafeDeleteAdmin.list_display
admin.site.register(Category, CategoryAdmin)
class AdminTestCase(TestCase):
urls = 'safedelete.tests.urls'
def setUp(self):
self.author = Author.objects.create()
self.categories = (
Category.objects.create(name='é'),
Category.objects.create(),
Category.objects.create(),
)
self.articles = (
Article(
author=self.author
),
Article(
author=self.author,
category=self.categories[1]
),
Article(
author=self.author,
category=self.categories[2]
),
)
self.categories[1].delete()
self.request_factory = RequestFactory()
self.request = self.request_factory.get('/', {})
self.modeladmin_default = admin.ModelAdmin(Category, AdminSite())
self.modeladmin = CategoryAdmin(Category, AdminSite())
user = User.objects.create_superuser('super', 'email@domain.com', 'secret')
self.client.login(username='super', password='secret')
self.request.user = user
def tearDown(self):
self.client.logout()
def get_changelist(self, request, model, modeladmin):
args = [
request, model, modeladmin.list_display,
modeladmin.list_display_links, modeladmin.list_filter,
modeladmin.date_hierarchy, modeladmin.search_fields,
modeladmin.list_select_related, modeladmin.list_per_page,
modeladmin.list_max_show_all, modeladmin.list_editable,
modeladmin
]
# New parameter in Django 2.1
if hasattr(modeladmin, 'sortable_by'):
args.append(modeladmin.sortable_by)
return ChangeList(*args)
def test_admin_model(self):
changelist_default = self.get_changelist(self.request, Category, self.modeladmin_default)
changelist = self.get_changelist(self.request, Category, self.modeladmin)
self.assertEqual(changelist.get_filters(self.request)[0][0].title, FIELD_NAME.replace('_', ' '))
self.assertEqual(changelist.queryset.count(), 3)
self.assertEqual(changelist_default.queryset.count(), 2)
def test_admin_listing(self):
"""Test deleted objects are in red in admin listing."""
resp = self.client.get('/admin/safedelete/category/')
line = '<span class="deleted">{0}</span>'.format(self.categories[1])
self.assertContains(resp, line)
def test_admin_xss(self):
"""Test whether admin XSS is blocked."""
Category.objects.create(name='<script>alert(42)</script>'),
resp = self.client.get('/admin/safedelete/category/')
# It should be escaped
self.assertNotContains(resp, '<script>alert(42)</script>')
def test_admin_undelete_action(self):
"""Test objects are undeleted and action is logged."""
resp = self.client.post('/admin/safedelete/category/', data={
'index': 0,
'action': ['undelete_selected'],
'_selected_action': [self.categories[1].pk],
})
self.assertTemplateUsed(resp, 'safedelete/undelete_selected_confirmation.html')
category = Category.all_objects.get(
pk=self.categories[1].pk
)
self.assertTrue(getattr(self.categories[1], FIELD_NAME))
resp = self.client.post('/admin/safedelete/category/', data={
'index': 0,
'action': ['undelete_selected'],
'post': True,
'_selected_action': [self.categories[1].pk],
})
category = Category.objects.get(
pk=self.categories[1].pk
)
self.assertFalse(getattr(category, FIELD_NAME))
| 35.301587
| 104
| 0.641637
|
4a138fac8cfe9a3698492c156e5e2ac37788d4c3
| 621
|
py
|
Python
|
cogs/auto_tempunban.py
|
PyBot-Development/PyBot-v4
|
7fb821940bf43ded7d6996342b83afda4174d36e
|
[
"MIT"
] | null | null | null |
cogs/auto_tempunban.py
|
PyBot-Development/PyBot-v4
|
7fb821940bf43ded7d6996342b83afda4174d36e
|
[
"MIT"
] | null | null | null |
cogs/auto_tempunban.py
|
PyBot-Development/PyBot-v4
|
7fb821940bf43ded7d6996342b83afda4174d36e
|
[
"MIT"
] | null | null | null |
from discord.ext import commands, tasks
import discord
from resources import support, GLOBAL_DATABASE
class loop(commands.Cog, name="loop"):
def __init__(self, client):
self.client = client
self.autotempunban.start()
@tasks.loop(minutes=2)
async def autotempunban(self):
for i in await GLOBAL_DATABASE.GET_BANNED():
user = await self.client.fetch_user(i)
await GLOBAL_DATABASE.CHECK_TEMPBAN(user)
@autotempunban.before_loop
async def before_presence(self):
await self.client.wait_until_ready()
def setup(bot):
bot.add_cog(loop(bot))
| 31.05
| 53
| 0.690821
|
4a1390a2b0cf7bc0391a2699c3c3f0d5655c7a53
| 23,878
|
py
|
Python
|
esmvaltool/diag_scripts/shared/_diag.py
|
jvegasbsc/ESMValTool
|
792dd98a07b227779dc5733270b01c6502a784ed
|
[
"Apache-2.0"
] | 1
|
2022-01-27T22:55:19.000Z
|
2022-01-27T22:55:19.000Z
|
esmvaltool/diag_scripts/shared/_diag.py
|
jvegasbsc/ESMValTool
|
792dd98a07b227779dc5733270b01c6502a784ed
|
[
"Apache-2.0"
] | null | null | null |
esmvaltool/diag_scripts/shared/_diag.py
|
jvegasbsc/ESMValTool
|
792dd98a07b227779dc5733270b01c6502a784ed
|
[
"Apache-2.0"
] | null | null | null |
"""Convenience classes and functions to implement python diagnostics.
Example
-------
Import and use these basic classes by e.g.::
import esmvaltool.diag_scripts.shared as e
datasets = e.Datasets(cfg)
variables = e.Variables(cfg)
"""
import collections
import logging
import warnings
from esmvaltool import ESMValToolDeprecationWarning
from . import names as n
logger = logging.getLogger(__name__)
# Global variables
DEFAULT_INFO = 'not_specified'
# Variable class containing all relevant information
BaseVariable = collections.namedtuple('Variable', [n.SHORT_NAME,
n.STANDARD_NAME,
n.LONG_NAME,
n.UNITS])
class Variable(BaseVariable):
"""Variable class containing all relevant information.
Note
----
This class has been deprecated in version 2.2 and will be removed two minor
releases later in version 2.4.
"""
def __new__(cls, short_name, standard_name, long_name, units):
"""Deprecate this class."""
warnings.warn(
"'Variable' has been deprecated in version 2.2 and will be "
"removed two minor releases later in version 2.4",
ESMValToolDeprecationWarning)
self = super().__new__(cls, short_name, standard_name, long_name,
units)
return self
class Variables(object):
"""Class to easily access a recipe's variables in a diagnostic.
Note
----
This class has been deprecated in version 2.2 and will be removed two minor
releases later in version 2.4.
Examples
--------
Get all variables of a recipe configuration `cfg`::
variables = Variables(cfg)
Access information of a variable `tas`::
variables.short_name('tas')
variables.standard_name('tas')
variables.long_name('tas')
variables.units('tas')
Access :mod:`iris`-suitable dictionary of a variable `tas`::
variables.iris_dict('tas')
Check if variables `tas` and `pr` are available::
variables.vars_available('tas', 'pr')
"""
def __init__(self, cfg=None, **names):
"""Load variables.
Parameters
----------
cfg : dict, optional
Configuation dictionary of the recipe.
**names : dict or Variable, optional
Keyword arguments of the form `short_name=Variable_object` where
`Variable_object` can be given as :obj:`dict` or :class:`Variable`.
"""
warnings.warn(
"'Variables' has been deprecated in version 2.2 and will be "
"removed two minor releases later in version 2.4",
ESMValToolDeprecationWarning)
self._dict = {}
# Add variables from cfg file
if cfg is not None:
success = True
if isinstance(cfg, dict):
data = cfg.get(n.INPUT_DATA)
if isinstance(data, dict):
for info in data.values():
name = info.get(n.SHORT_NAME, DEFAULT_INFO)
attr = Variable(
name,
info.get(n.STANDARD_NAME, DEFAULT_INFO),
info.get(n.LONG_NAME, DEFAULT_INFO),
info.get(n.UNITS, DEFAULT_INFO))
self._add_to_dict(name, attr)
else:
success = False
else:
success = False
if not success:
logger.warning("%s is not a valid configuration file!", cfg)
if not self._dict:
logger.warning("Empty recipe configuration: the automatic "
"import of variables does not work for chained "
"scripts (using 'ancestors' key)")
# Add costum variables
self.add_vars(**names)
if not self._dict:
logger.warning("No variables found!")
def __repr__(self):
"""Representation of the class."""
output = ''
for (name, attr) in self._dict.items():
output += '{}: {}\n'.format(name, attr)
return output
def _add_to_dict(self, name, attr):
"""Add variable to class dictionary.
Parameters
----------
name : str
`short_name` of the variable.
attr : Variable
All other information of the variable.
"""
if name not in self._dict:
logger.debug("Added variable '%s' to collection", name)
self._dict[name] = attr
def add_vars(self, **names):
"""Add costum variables to the class.
Parameters
----------
**names : dict or Variable, optional
Keyword arguments of the form `short_name=Variable_object` where
`Variable_object` can be given as :obj:`dict` or :class:`Variable`.
"""
for (name, attr) in names.items():
if isinstance(attr, Variable):
attr_var = attr
else:
attr_var = Variable(
name,
attr.get(n.STANDARD_NAME, DEFAULT_INFO),
attr.get(n.LONG_NAME, DEFAULT_INFO),
attr.get(n.UNITS, DEFAULT_INFO))
self._add_to_dict(name, attr_var)
def iris_dict(self, var):
"""Access :mod:`iris` dictionary of the variable.
Parameters
----------
var : str
(Short) name of the variable.
Returns
-------
dict
Dictionary containing all attributes of the variable which can be
used directly in :mod:`iris` (`short_name` replaced by `var_name`).
"""
iris_dict = self._dict[var]._asdict()
iris_dict[n.VAR_NAME] = iris_dict.pop(n.SHORT_NAME)
return iris_dict
def long_name(self, var):
"""Access long name.
Parameters
----------
var : str
(Short) name of the variable.
Returns
-------
str
Long name of the variable.
"""
return getattr(self._dict[var], n.LONG_NAME)
def modify_var(self, var, **names):
"""Modify an already existing variable of the class.
Parameters
----------
var : str
(Short) name of the existing variable.
**names
Keyword arguments of the form `short_name=tas`.
Raises
------
ValueError
If `var` is not an existing variable.
TypeError
If a non-valid keyword argument is given.
"""
if var not in self._dict:
raise ValueError("Variable '{}' does not exist yet and cannot be "
"modified".format(var))
old_var = self._dict.pop(var)
new_var = {}
for name in Variable._fields:
new_var[name] = names.pop(name, getattr(old_var, name))
# Check if names is not empty (=non-valid keyword argument given)
if names:
raise TypeError("Non-valid keyword arguments "
"given: {}".format(names))
new_var = Variable(**new_var)
self._add_to_dict(var, new_var)
def short_name(self, var):
"""Access short name.
Parameters
----------
var : str
(Short) name of the variable.
Returns
-------
str
Short name of the variable.
"""
return getattr(self._dict[var], n.SHORT_NAME)
def short_names(self):
"""Get list of all `short_names`.
Returns
-------
list
List of all `short_names`.
"""
return list(self._dict)
def standard_name(self, var):
"""Access standard name.
Parameters
----------
var : str
(Short) name of the variable.
Returns
-------
str
Standard name of the variable.
"""
return getattr(self._dict[var], n.STANDARD_NAME)
def standard_names(self):
"""Get list of all `standard_names`.
Returns
-------
list
List of all `standard_names`.
"""
return [self.standard_name(name) for name in self._dict]
def units(self, var):
"""Access units.
Parameters
----------
var : str
(Short) name of the variable.
Returns
-------
str
Units of the variable.
"""
return getattr(self._dict[var], n.UNITS)
def var_name(self, var):
"""Access var name.
Parameters
----------
var : str
(Short) name of the variable.
Returns
-------
str
Var name (=short name) of the variable.
"""
return getattr(self._dict[var], n.SHORT_NAME)
def vars_available(self, *args):
"""Check if given variables are available.
Parameters
----------
*args
Short names of the variables to be tested.
Returns
-------
bool
`True` if variables are available, `False` if not.
"""
for var in args:
if var not in self._dict:
return False
return True
class Datasets(object):
"""Class to easily access a recipe's datasets in a diagnostic script.
Note
----
This class has been deprecated in version 2.2 and will be removed two minor
releases later in version 2.4.
Examples
--------
Get all variables of a recipe configuration `cfg`::
datasets = Datasets(cfg)
Access data of a dataset with path `dataset_path`::
datasets.get_data(path=dataset_path)
Access dataset information of the dataset::
datasets.get_dataset_info(path=dataset_path)
Access the data of all datasets with `exp=piControl`::
datasets.get_data_list(exp=piControl)
"""
def __init__(self, cfg):
"""Load datasets.
Load all datasets of the recipe and store them in three internal
:obj:`dict`/:obj:`list` containers: `self._paths`, `self._data` and
`self._datasets`.
Parameters
----------
cfg : dict, optional
Configuation dictionary of the recipe.
Raises
------
TypeError
If recipe configuration dictionary is not valid.
"""
warnings.warn(
"'Datasets' has been deprecated in version 2.2 and will be "
"removed two minor releases later in version 2.4",
ESMValToolDeprecationWarning)
self._iter_counter = 0
self._paths = []
self._data = {}
success = True
if isinstance(cfg, dict):
input_data = cfg.get(n.INPUT_DATA)
if isinstance(input_data, dict):
for path in input_data:
dataset_info = input_data[path]
if not isinstance(dataset_info, dict):
success = False
break
self._paths.append(path)
self._data[path] = None
self._datasets = input_data
else:
success = False
else:
success = False
if not success:
raise TypeError("{} is not a valid configuration "
"file".format(repr(cfg)))
self._n_datasets = len(self._paths)
if not self._paths:
logger.warning("No datasets found!")
logger.warning("Note: the automatic import of datasets does not "
"work for chained scripts (using 'ancestors' key)")
def __repr__(self):
"""Representation of the class."""
output = ''
for path in self._datasets:
output += repr(self._datasets[path]) + '\n'
return output
def __iter__(self):
"""Allow iteration through class."""
self._iter_counter = 0
return self
def __next__(self):
"""Allow iteration through class."""
if self._iter_counter >= self._n_datasets:
raise StopIteration()
next_element = self._paths[self._iter_counter]
self._iter_counter += 1
return next_element
def _is_valid_path(self, path):
"""Check if path is in class.
Parameters
----------
path : str
Path to be tested.
Returns
-------
bool
`True` if valid path, `False` if not.
"""
if path in self._paths:
return True
logger.warning("%s is not a valid dataset path", path)
return False
def _extract_paths(self, dataset_info, fail_when_ambiguous=False):
"""Get all paths matching a given `dataset_info`.
Parameters
----------
dataset_info : dict
Description of the desired datasets.
fail_when_ambiguous : bool, optional
Raise an exception when retrieved paths are ambiguous.
Returns
-------
list
All matching paths.
Raises
------
RuntimeError
If data given by `dataset_info` is ambiguous and
`fail_when_ambiguous` is set to `True`.
"""
paths = list(self._datasets)
for info in dataset_info:
paths = [path for path in paths if
self._datasets[path].get(info) == dataset_info[info]]
if not paths:
logger.warning("%s does not match any dataset", dataset_info)
return paths
if not fail_when_ambiguous:
return sorted(paths)
if len(paths) > 1:
msg = 'Given dataset information is ambiguous'
logger.error(msg)
raise RuntimeError(msg)
return sorted(paths)
def add_dataset(self, path, data=None, **dataset_info):
"""Add dataset to class.
Parameters
----------
path : str
(Unique) path to the dataset.
data: optional
Arbitrary object to be saved as data for the dataset.
**dataset_info: optional
Keyword arguments describing the dataset, e.g. `dataset=CanESM2`,
`exp=piControl` or `short_name=tas`.
"""
if path in self._paths:
logger.warning("%s already exists! Overwriting old data", path)
self._paths.remove(path)
self._paths.append(path)
self._data[path] = data
self._datasets[path] = dataset_info
def add_to_data(self, data, path=None, **dataset_info):
"""Add element to a dataset's data.
Notes
-----
Either `path` or a unique `dataset_info` description have to be given.
Fails when given information is ambiguous.
Parameters
----------
data
Element to be added to the dataset's data.
path : str, optional
Path to the dataset
**dataset_info: optional
Keyword arguments describing the dataset, e.g. `dataset=CanESM2`,
`exp=piControl` or `short_name=tas`.
Raises
------
RuntimeError
If data given by `dataset_info` is ambiguous.
"""
if path is not None:
if self._is_valid_path(path):
self._data[path] += data
return None
return None
paths = self._extract_paths(dataset_info, fail_when_ambiguous=True)
if paths:
self._data[paths[0]] += data
return None
def get_data(self, path=None, **dataset_info):
"""Access a dataset's data.
Notes
-----
Either `path` or a unique `dataset_info` description have to be
given. Fails when given information is ambiguous.
Parameters
----------
path : str, optional
Path to the dataset
**dataset_info: optional
Keyword arguments describing the dataset, e.g. `dataset=CanESM2`,
`exp=piControl` or `short_name=tas`.
Returns
-------
`data_object`
Data of the selected dataset.
Raises
------
RuntimeError
If data given by `dataset_info` is ambiguous.
"""
if path is not None:
if self._is_valid_path(path):
return self._data.get(path)
return None
paths = self._extract_paths(dataset_info, fail_when_ambiguous=True)
if not paths:
return None
return self._data[paths[0]]
def get_data_list(self, **dataset_info):
"""Access the datasets' data in a list.
Notes
-----
The returned data is sorted alphabetically respective to the `paths`.
Parameters
----------
**dataset_info: optional
Keyword arguments describing the dataset, e.g. `dataset=CanESM2`,
`exp=piControl` or `short_name=tas`.
Returns
-------
list
Data of the selected datasets.
"""
paths = self._extract_paths(dataset_info)
return [self._data[path] for path in paths]
def get_dataset_info(self, path=None, **dataset_info):
"""Access a dataset's information.
Notes
-----
Either `path` or a unique `dataset_info` description have to be
given. Fails when given information is ambiguous.
Parameters
----------
path : str, optional
Path to the dataset.
**dataset_info: optional
Keyword arguments describing the dataset, e.g. `dataset=CanESM2`,
`exp=piControl` or `short_name=tas`.
Returns
-------
dict
All dataset information.
Raises
------
RuntimeError
If data given by `dataset_info` is ambiguous.
"""
if path is not None:
if self._is_valid_path(path):
return self._datasets.get(path)
return None
paths = self._extract_paths(dataset_info, fail_when_ambiguous=True)
if not paths:
return None
return self._datasets[paths[0]]
def get_dataset_info_list(self, **dataset_info):
"""Access dataset's information in a list.
Notes
-----
The returned data is sorted alphabetically respective to the `paths`.
Parameters
----------
**dataset_info: optional
Keyword arguments describing the dataset, e.g. `dataset=CanESM2`,
`exp=piControl` or `short_name=tas`.
Returns
-------
list
Information dictionaries of the selected datasets.
"""
paths = self._extract_paths(dataset_info)
return [self._datasets[path] for path in paths]
def get_info(self, key, path=None, **dataset_info):
"""Access a 'dataset_info`'s `key`.
Notes
-----
Either `path` or a unique `dataset_info` description have to be
given. Fails when given information is ambiguous. If the `dataset_info`
does not contain the `key`, returns None.
Parameters
----------
key : str
Desired dictionary key.
path : str
Path to the dataset.
**dataset_info: optional
Keyword arguments describing the dataset, e.g. `dataset=CanESM2`,
`exp=piControl` or `short_name=tas`.
Returns
-------
str
`key` information of the given dataset.
Raises
------
RuntimeError
If data given by `dataset_info` is ambiguous.
"""
if path is not None:
if self._is_valid_path(path):
output = self._datasets[path].get(key)
if output is None:
logger.warning("Dataset %s does not contain '%s' "
"information", path, key)
return output
return None
paths = self._extract_paths(dataset_info, fail_when_ambiguous=True)
if not paths:
return None
output = self._datasets[paths[0]].get(key)
if output is None:
logger.warning("Dataset %s does not contain '%s' information",
path, key)
return output
def get_info_list(self, key, **dataset_info):
"""Access `dataset_info`'s `key` values.
Notes
-----
The returned data is sorted alphabetically respective to the `paths`.
Parameters
----------
**dataset_info: optional
Keyword arguments describing the dataset, e.g. `dataset=CanESM2`,
`exp=piControl` or `short_name=tas`.
Returns
-------
list
`key` information of the selected datasets.
"""
paths = self._extract_paths(dataset_info)
output = [self._datasets[path].get(key) for path in paths]
if None in output:
logger.warning("One or more datasets do not contain '%s' "
"information", key)
return output
def get_path(self, **dataset_info):
"""Access a dataset's path.
Notes
-----
A unique `dataset_info` description has to be given. Fails when given
information is ambiguous.
Parameters
----------
**dataset_info: optional
Keyword arguments describing the dataset, e.g. `dataset=CanESM2`,
`exp=piControl` or `short_name=tas`.
Returns
-------
str
Path of the selected dataset.
Raises
------
RuntimeError
If data given by `dataset_info` is ambiguous.
"""
paths = self._extract_paths(dataset_info, fail_when_ambiguous=True)
if not paths:
return None
return paths[0]
def get_path_list(self, **dataset_info):
"""Access dataset's paths in a list.
Notes
-----
The returned data is sorted alphabetically respective to the `paths`.
Parameters
----------
**dataset_info: optional
Keyword arguments describing the dataset, e.g. `dataset=CanESM2`,
`exp=piControl` or `short_name=tas`.
Returns
-------
list
Paths of the selected datasets.
"""
return self._extract_paths(dataset_info)
def set_data(self, data, path=None, **dataset_info):
"""Set element as a dataset's data.
Notes
-----
Either `path` or a unique `dataset_info` description have to be
given. Fails when if given information is ambiguous.
Parameters
----------
data
Element to be set as the dataset's data.
path : str, optional
Path to the dataset.
**dataset_info: optional
Keyword arguments describing the dataset, e.g. `dataset=CanESM2`,
`exp=piControl` or `short_name=tas`.
Raises
------
RuntimeError
If data given by `dataset_info` is ambiguous.
"""
if path is not None:
if self._is_valid_path(path):
self._data[path] = data
return None
return None
paths = self._extract_paths(dataset_info, fail_when_ambiguous=True)
if paths:
self._data[paths[0]] = data
return None
| 28.596407
| 79
| 0.54033
|
4a1391719bbce360831028e7784bf5b170c514cb
| 4,302
|
py
|
Python
|
dbbact_server/dbidval.py
|
amnona/dbBact
|
4cae61c26352a7f2d032ef74aee860c66583c616
|
[
"MIT"
] | null | null | null |
dbbact_server/dbidval.py
|
amnona/dbBact
|
4cae61c26352a7f2d032ef74aee860c66583c616
|
[
"MIT"
] | 3
|
2021-02-14T11:55:51.000Z
|
2022-01-21T09:52:32.000Z
|
dbbact_server/dbidval.py
|
amnona/dbbact-server
|
54dc776444b061373d928aab63c4284d8837671a
|
[
"MIT"
] | null | null | null |
import psycopg2
from .utils import debug
def GetIdFromDescription(con, cur, table, description, noneok=False, addifnone=False, commit=True):
"""
Get the id based on a value for a given table (and add if doesn't exists if addifnone=True)
input:
con,cur
table : str
Name of the table to search
value : str
The value to search for
noneok : bool (optional)
False (default) fails if value is None, True returns 0 if None encountered
addifnone : bool (optional)
False (default) to return without adding if item does not exist. True to add if item does not exist
commit : bool (optional)
True (default) to commit if adding new item, False to skip commit
output:
cid : int
the id of the value, -1 if not found, -2 if error
"""
try:
if description is None:
if noneok:
return 0
else:
return -1
description = description.lower()
cur.execute('SELECT id from %s WHERE description=%s LIMIT 1' % (table, '%s'), [description])
if cur.rowcount == 0:
if not addifnone:
debug(2, "value %s not found in table %s" % (description, table))
return -1
err, cid = AddItem(con, cur, table, description, allowreplicate=True, commit=commit)
if err:
debug(7, 'error when adding term: %s' % err)
return -2
debug(2, 'new term added to table. reported as found')
return cid
cid = cur.fetchone()[0]
debug(2, "value %s found in table %s id %d" % (description, table, cid))
return cid
except psycopg2.DatabaseError as e:
debug(8, "error %s in GetIdFromValue" % e)
return -2
def AddItem(con, cur, table, description, allowreplicate=False, commit=True):
"""
Add a id,description to table and return the id.
If item already exists, behavior depends on allowreplicate:
False (default) - just return the id of the existing item
True - add a new item
input:
con,cur
table : str
Name of the table to search
description : str
the description to add
allowreplicate : bool (optional)
False (default) - just return the id of the existing item
True - add a new item
commit : bool (optional)
True (default) to commit, False to skip the commit
output:
err : str
Error message or empty string if ok
sid : int
the id of the added item
"""
try:
description = description.lower()
if not allowreplicate:
# search if exists
sid = GetIdFromDescription(con, cur, table, description)
if sid >= 0:
debug(2, 'AddItem - item %s already exists. id is %d' % (description, sid))
return '', sid
# should create new item
cur.execute('INSERT INTO %s (description) VALUES (%s) RETURNING id' % (table, '%s'), [description])
sid = cur.fetchone()[0]
debug(2, 'AddItem - added new item %s. id is %d' % (description, sid))
if commit:
con.commit()
return '', sid
except psycopg2.DatabaseError as e:
debug(8, "error %s in AddItem" % e)
return 'Error %s in AddItem' % e, -2
def GetDescriptionFromId(con, cur, table, cid):
"""
Get the description for id cid in table
input:
con,cur
table : str
table : str
Name of the table to search
cid : int
the id to get the description for
output:
err : str
Error message or empty string if ok
description : str
the description of the id
"""
try:
cur.execute('SELECT description FROM %s WHERE id=%s LIMIT 1' % (table, '%s'), [cid])
if cur.rowcount == 0:
debug(2, 'id not found in table %s' % table)
return 'id not found in table %s' % table, ''
description = cur.fetchone()[0]
debug(1, 'found description %s for id %d in table %s' % (description, cid, table))
return '', description
except psycopg2.DatabaseError as e:
debug(8, "error %s in GetDescriptionFromId" % e)
return 'Error %s in GetDescriptionFromId' % e, ''
| 34.142857
| 107
| 0.584612
|
4a1391bce9290943996595c59cc21fb73b6b249f
| 801
|
py
|
Python
|
darling_ansible/python_venv/lib/python3.7/site-packages/oci/streaming/__init__.py
|
revnav/sandbox
|
f9c8422233d093b76821686b6c249417502cf61d
|
[
"Apache-2.0"
] | null | null | null |
darling_ansible/python_venv/lib/python3.7/site-packages/oci/streaming/__init__.py
|
revnav/sandbox
|
f9c8422233d093b76821686b6c249417502cf61d
|
[
"Apache-2.0"
] | null | null | null |
darling_ansible/python_venv/lib/python3.7/site-packages/oci/streaming/__init__.py
|
revnav/sandbox
|
f9c8422233d093b76821686b6c249417502cf61d
|
[
"Apache-2.0"
] | 1
|
2020-06-25T03:12:58.000Z
|
2020-06-25T03:12:58.000Z
|
# coding: utf-8
# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from __future__ import absolute_import
from .stream_client import StreamClient
from .stream_client_composite_operations import StreamClientCompositeOperations
from .stream_admin_client import StreamAdminClient
from .stream_admin_client_composite_operations import StreamAdminClientCompositeOperations
from . import models
__all__ = ["StreamClient", "StreamClientCompositeOperations", "StreamAdminClient", "StreamAdminClientCompositeOperations", "models"]
| 53.4
| 245
| 0.826467
|
4a1391ef9418c9e1b053d66fb428891890dab26e
| 322
|
py
|
Python
|
config/api_router.py
|
luiscberrocal/pty_shopping_list
|
b6ef499c001c1b2c1d338f94b130c1b3e8c7a796
|
[
"MIT"
] | 1
|
2022-01-19T23:57:19.000Z
|
2022-01-19T23:57:19.000Z
|
config/api_router.py
|
luiscberrocal/pty_shopping_list
|
b6ef499c001c1b2c1d338f94b130c1b3e8c7a796
|
[
"MIT"
] | 15
|
2022-01-15T19:02:55.000Z
|
2022-03-31T21:32:22.000Z
|
config/api_router.py
|
luiscberrocal/pty_shopping_list
|
b6ef499c001c1b2c1d338f94b130c1b3e8c7a796
|
[
"MIT"
] | 1
|
2022-01-21T12:54:45.000Z
|
2022-01-21T12:54:45.000Z
|
from django.conf import settings
from rest_framework.routers import DefaultRouter, SimpleRouter
from pty_shopping_list.users.api.views import UserViewSet
if settings.DEBUG:
router = DefaultRouter()
else:
router = SimpleRouter()
router.register("users", UserViewSet)
app_name = "api"
urlpatterns = router.urls
| 20.125
| 62
| 0.785714
|
4a13937cfed2fd7686f6bbb97041be075f4564b4
| 348
|
py
|
Python
|
mirari/INV/migrations/0002_auto_20190608_2204.py
|
gcastellan0s/mirariapp
|
24a9db06d10f96c894d817ef7ccfeec2a25788b7
|
[
"MIT"
] | null | null | null |
mirari/INV/migrations/0002_auto_20190608_2204.py
|
gcastellan0s/mirariapp
|
24a9db06d10f96c894d817ef7ccfeec2a25788b7
|
[
"MIT"
] | 18
|
2019-12-27T19:58:20.000Z
|
2022-02-27T08:17:49.000Z
|
mirari/INV/migrations/0002_auto_20190608_2204.py
|
gcastellan0s/mirariapp
|
24a9db06d10f96c894d817ef7ccfeec2a25788b7
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.5 on 2019-06-09 03:04
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('INV', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='fiscalmx',
old_name='nocer',
new_name='noCer',
),
]
| 18.315789
| 47
| 0.566092
|
4a1395b63f80c98d09e69ff42527e3dbeab29ab4
| 1,449
|
py
|
Python
|
tests/test_linked_list.py
|
nyakaz73/datastructure_collection
|
19922d8d9f95b3588daaafb780c02fa0196d9790
|
[
"MIT"
] | 1
|
2020-07-27T21:01:37.000Z
|
2020-07-27T21:01:37.000Z
|
tests/test_linked_list.py
|
nyakaz73/datastructure_collection
|
19922d8d9f95b3588daaafb780c02fa0196d9790
|
[
"MIT"
] | null | null | null |
tests/test_linked_list.py
|
nyakaz73/datastructure_collection
|
19922d8d9f95b3588daaafb780c02fa0196d9790
|
[
"MIT"
] | null | null | null |
import unittest
from datastructure_collection import LinkedList
class TestLinkedList(unittest.TestCase):
def test_empty_linked(self):
linked = LinkedList()
self.assertEqual(len(linked), 0)
self.assertTrue(linked.isEmpty())
def test_add(self):
linked = LinkedList()
for _ in range(10):
'''
add() method is O(1) run time
append() method is O(N) run-time
'''
linked.add(_)
linked.remove(5)
linked.append(55)
linked.append(56)
linked.append(7)
linked.add(59)
linked.append(79)
with self.assertRaises(AssertionError):
linked.remove(5)
def test_append(self):
linked = LinkedList()
linked.append(55)
linked.append(56)
linked.append(7)
linked.remove(56)
with self.assertRaises(AssertionError):
linked.remove(56)
def test_remove(self):
linked = LinkedList()
linked.append(55)
linked.append(56)
linked.append(7)
linked.remove(56)
with self.assertRaises(AssertionError):
linked.remove(56)
def test_contains(self):
linked = LinkedList()
linked.append(55)
linked.append(56)
linked.append(7)
linked.remove(56)
self.assertFalse(56 in linked)
if __name__ == '__main__':
unittest.main()
| 25.875
| 47
| 0.571429
|
4a139715b1f3411f17514223c63b946aa9847cbc
| 3,848
|
py
|
Python
|
zeus/common/ipc/comm_by_zmq.py
|
TianQi-777/xingtian
|
9b1678ad6ff12f00c2826a7ec7f42d5350b83b31
|
[
"MIT"
] | 240
|
2020-08-15T15:11:49.000Z
|
2022-03-28T07:26:23.000Z
|
zeus/common/ipc/comm_by_zmq.py
|
TianQi-777/xingtian
|
9b1678ad6ff12f00c2826a7ec7f42d5350b83b31
|
[
"MIT"
] | 20
|
2020-08-29T06:18:21.000Z
|
2022-03-21T04:35:57.000Z
|
zeus/common/ipc/comm_by_zmq.py
|
TianQi-777/xingtian
|
9b1678ad6ff12f00c2826a7ec7f42d5350b83b31
|
[
"MIT"
] | 69
|
2020-08-15T15:41:53.000Z
|
2022-03-16T08:27:47.000Z
|
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Communication by zmq."""
import pyarrow
import zmq
from absl import logging
from zeus.common.util.register import Registers
ZMQ_MIN_PORT = 20000
ZMQ_MAX_PORT = 40000
@Registers.comm
class CommByZmq(object):
"""Communication by zmq."""
def __init__(self, comm_info):
"""Initialize."""
super(CommByZmq, self).__init__()
# For Controller, there is no 'addr' parameter given.
logging.debug("zmq start with comm_info: {}".format(comm_info))
addr = comm_info.get("addr", "*")
port = comm_info.get("port")
zmq_type = comm_info.get("type", "PUB")
comm_type = {
"PUB": zmq.PUB,
"SUB": zmq.SUB,
"PUSH": zmq.PUSH,
"PULL": zmq.PULL,
"REP": zmq.REP,
"REQ": zmq.REQ,
}.get(zmq_type)
context = zmq.Context()
socket = context.socket(comm_type)
self._type = zmq_type
self.bound_port = None
if "*" in addr:
# socket.bind("tcp://*:" + str(port))
bound_port = socket.bind_to_random_port("tcp://*",
min_port=ZMQ_MIN_PORT,
max_port=ZMQ_MAX_PORT,
max_tries=100)
self.bound_port = bound_port
else:
socket.connect("tcp://" + str(addr) + ":" + str(port))
self.socket = socket
def send(self, ctr_info, data, name=None, block=True):
"""Send message."""
# msg = pickle.dumps(data)
msg = []
msg.append(pyarrow.serialize(ctr_info).to_buffer())
msg.append(pyarrow.serialize(data).to_buffer())
self.socket.send_multipart(msg)
def recv(self, name=None, block=True):
"""Receive message."""
msg = self.socket.recv_multipart()
ctr_info = pyarrow.deserialize(msg[0])
data = pyarrow.deserialize(msg[1])
# data = pickle.loads(msg)
return ctr_info, data
def send_bytes(self, ctr_info, data):
"""Send bytes."""
msg = list()
msg.append(ctr_info)
msg.append(data)
self.socket.send_multipart(msg, copy=False)
def recv_bytes(self, block):
"""Receive bytes."""
recv_data = self.socket.recv_multipart()
ctr_info, data = recv_data[0], recv_data[1]
return ctr_info, data
def __str__(self):
"""Rewrite the ste func, to return the class info."""
return str({
"port": self.bound_port,
"type": self._type
})
def close(self):
"""Close."""
if self.socket:
self.socket.close()
| 34.981818
| 79
| 0.606809
|
4a139772c4ea1ea54c50f18f91e161b419c68e07
| 4,344
|
py
|
Python
|
hanlp/pretrained/mtl.py
|
yatwql/HanLP
|
584ce7e5ed1b8f2209e14f32c44a55bb5a822e31
|
[
"Apache-2.0"
] | null | null | null |
hanlp/pretrained/mtl.py
|
yatwql/HanLP
|
584ce7e5ed1b8f2209e14f32c44a55bb5a822e31
|
[
"Apache-2.0"
] | null | null | null |
hanlp/pretrained/mtl.py
|
yatwql/HanLP
|
584ce7e5ed1b8f2209e14f32c44a55bb5a822e31
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2020-12-22 13:16
from hanlp_common.constant import HANLP_URL
OPEN_TOK_POS_NER_SRL_DEP_SDP_CON_ELECTRA_SMALL_ZH = HANLP_URL + 'mtl/open_tok_pos_ner_srl_dep_sdp_con_electra_small_20201223_035557.zip'
"Electra (:cite:`clark2020electra`) small version of joint tok, pos, ner, srl, dep, sdp and con model trained on open-source Chinese corpus."
OPEN_TOK_POS_NER_SRL_DEP_SDP_CON_ELECTRA_BASE_ZH = HANLP_URL + 'mtl/open_tok_pos_ner_srl_dep_sdp_con_electra_base_20201223_201906.zip'
"Electra (:cite:`clark2020electra`) base version of joint tok, pos, ner, srl, dep, sdp and con model trained on open-source Chinese corpus."
CLOSE_TOK_POS_NER_SRL_DEP_SDP_CON_ELECTRA_SMALL_ZH = HANLP_URL + 'mtl/close_tok_pos_ner_srl_dep_sdp_con_electra_small_20210111_124159.zip'
"Electra (:cite:`clark2020electra`) small version of joint tok, pos, ner, srl, dep, sdp and con model trained on close-source Chinese corpus."
CLOSE_TOK_POS_NER_SRL_DEP_SDP_CON_ELECTRA_BASE_ZH = HANLP_URL + 'mtl/close_tok_pos_ner_srl_dep_sdp_con_electra_base_20210111_124519.zip'
"Electra (:cite:`clark2020electra`) base version of joint tok, pos, ner, srl, dep, sdp and con model trained on close-source Chinese corpus."
CLOSE_TOK_POS_NER_SRL_DEP_SDP_CON_ERNIE_GRAM_ZH = HANLP_URL + 'mtl/close_tok_pos_ner_srl_dep_sdp_con_ernie_gram_base_aug_20210904_145403.zip'
"ERNIE (:cite:`xiao-etal-2021-ernie`) base version of joint tok, pos, ner, srl, dep, sdp and con model trained on close-source Chinese corpus."
UD_ONTONOTES_TOK_POS_LEM_FEA_NER_SRL_DEP_SDP_CON_MT5_SMALL = HANLP_URL + 'mtl/ud_ontonotes_tok_pos_lem_fea_ner_srl_dep_sdp_con_mt5_small_20210228_123458.zip'
'mT5 (:cite:`xue-etal-2021-mt5`) small version of joint tok, pos, lem, fea, ner, srl, dep, sdp and con model trained on UD and OntoNotes5 corpus.'
UD_ONTONOTES_TOK_POS_LEM_FEA_NER_SRL_DEP_SDP_CON_XLMR_BASE = HANLP_URL + 'mtl/ud_ontonotes_tok_pos_lem_fea_ner_srl_dep_sdp_con_xlm_base_20220608_003435.zip'
'''
XLM-R (:cite:`conneau-etal-2020-unsupervised`) base version of joint tok, pos, lem, fea, ner, srl, dep, sdp and con model trained on UD 2.10 and OntoNotes5 corpus.
The following 130 languages are supported: ``Afrikaans, Akkadian, Akuntsu, Albanian, Amharic, AncientGreek (to 1453), Ancient Hebrew, Apurinã, Arabic, Armenian, AssyrianNeo-Aramaic, Bambara, Basque, Beja, Belarusian, Bengali, Bhojpuri, Breton, Bulgarian, Catalan, Cebuano, Central Siberian Yupik, Chinese, Chukot, ChurchSlavic, Coptic, Croatian, Czech, Danish, Dutch, Emerillon, English, Erzya, Estonian, Faroese, Finnish, French, Galician, German, Gothic, Guajajára, Guarani, Hebrew, Hindi, Hittite, Hungarian, Icelandic, Indonesian, Irish, Italian, Japanese, Javanese, K\'iche\', Kangri, Karelian, Karo(Brazil), Kazakh, Khunsari, Komi-Permyak, Komi-Zyrian, Korean, Latin, Latvian, Ligurian, LiteraryChinese, Lithuanian, Livvi, LowGerman, Madi, Makuráp, Maltese, Manx, Marathi, MbyáGuaraní, Modern Greek (1453-), Moksha, Mundurukú, Nayini, Neapolitan, Nigerian Pidgin, NorthernKurdish, Northern Sami, Norwegian, OldFrench (842-ca. 1400), OldRussian, Old Turkish, Persian, Polish, Portuguese, Romanian, Russia Buriat, Russian, Sanskrit, ScottishGaelic, Serbian, SkoltSami, Slovak, Slovenian, Soi, South Levantine Arabic, Spanish, Swedish, SwedishSign Language, SwissGerman, Tagalog, Tamil, Tatar, Telugu, Thai, Tupinambá, Turkish, Uighur, Ukrainian, Umbrian, UpperSorbian, Urdu, Urubú-Kaapor, Vietnamese, Warlpiri, Welsh, Western Armenian, WesternFrisian, Wolof, Xibe, Yakut, Yoruba, YueChinese``.
Performance: ``{con UCM: 20.31% LCM: 16.82% UP: 77.50% UR: 76.63% UF: 77.06% LP: 71.25% LR: 70.46% LF: 70.85%}{ner P: 79.93% R: 80.76% F1: 80.34%}{sdp/dm UF: 93.71% LF: 93.00%}{sdp/pas UF: 97.63% LF: 96.37%}{sdp/psd UF: 93.08% LF: 80.95%}{srl [predicate P: 90.95% R: 84.25% F1: 87.47%][e2e P: 78.89% R: 67.32% F1: 72.65%]}{tok P: 98.50% R: 98.70% F1: 98.60%}{ud [lemmas Accuracy:85.95%][upos Accuracy:89.95%][deps UAS: 85.78% LAS: 78.51%][feats Accuracy:82.18%]}``.
'''
NPCMJ_UD_KYOTO_TOK_POS_CON_BERT_BASE_CHAR_JA = HANLP_URL + 'mtl/npcmj_ud_kyoto_tok_pos_ner_dep_con_srl_bert_base_char_ja_20210914_133742.zip'
'BERT (:cite:`devlin-etal-2019-bert`) base char encoder trained on NPCMJ/UD/Kyoto corpora with decoders including tok, pos, ner, dep, con, srl.'
# Will be filled up during runtime
ALL = {}
| 140.129032
| 1,397
| 0.78361
|
4a1398e7503321d4486174e451aa4cb5de874d6f
| 7,406
|
py
|
Python
|
src/datadog_api_client/v1/model/organization_create_body.py
|
MichaelTROEHLER/datadog-api-client-python
|
12c46626622fb1277bb1e172753b342c671348bd
|
[
"Apache-2.0"
] | null | null | null |
src/datadog_api_client/v1/model/organization_create_body.py
|
MichaelTROEHLER/datadog-api-client-python
|
12c46626622fb1277bb1e172753b342c671348bd
|
[
"Apache-2.0"
] | null | null | null |
src/datadog_api_client/v1/model/organization_create_body.py
|
MichaelTROEHLER/datadog-api-client-python
|
12c46626622fb1277bb1e172753b342c671348bd
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
import re # noqa: F401
import sys # noqa: F401
import nulltype # noqa: F401
from datadog_api_client.v1.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from datadog_api_client.v1.model.organization_billing import OrganizationBilling
from datadog_api_client.v1.model.organization_subscription import OrganizationSubscription
globals()['OrganizationBilling'] = OrganizationBilling
globals()['OrganizationSubscription'] = OrganizationSubscription
class OrganizationCreateBody(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'billing': (OrganizationBilling,), # noqa: E501
'name': (str,), # noqa: E501
'subscription': (OrganizationSubscription,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'billing': 'billing', # noqa: E501
'name': 'name', # noqa: E501
'subscription': 'subscription', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, billing, name, subscription, *args, **kwargs): # noqa: E501
"""OrganizationCreateBody - a model defined in OpenAPI
Args:
billing (OrganizationBilling):
name (str): The name of the new child-organization, limited to 32 characters.
subscription (OrganizationSubscription):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.billing = billing
self.name = name
self.subscription = subscription
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| 40.25
| 110
| 0.600459
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.