id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
8178150 | <gh_stars>0
import math
from copy import deepcopy
import pandas as pd
import pytest
from sfa_api.conftest import (
BASE_URL, copy_update, variables, agg_types,
VALID_OBS_JSON, demo_forecasts, demo_group_cdf,
VALID_AGG_JSON, demo_aggregates)
def test_get_all_aggregates(api):
res = api.get('/aggregates/',
base_url=BASE_URL)
assert res.status_code == 200
resp = res.get_json()
for agg in resp:
assert 'observations' in agg
def test_post_aggregate_success(api):
res = api.post('/aggregates/',
base_url=BASE_URL,
json=VALID_AGG_JSON)
assert res.status_code == 201
assert 'Location' in res.headers
@pytest.mark.parametrize('payload,message', [
(copy_update(VALID_AGG_JSON, 'variable', 'other'),
f'{{"variable":["Must be one of: {variables}."]}}'),
(copy_update(VALID_AGG_JSON, 'aggregate_type', 'cov'),
f'{{"aggregate_type":["Must be one of: {agg_types}."]}}'),
(copy_update(VALID_AGG_JSON, 'interval_label', 'instant'),
'{"interval_label":["Must be one of: beginning, ending."]}'),
({}, '{"aggregate_type":["Missing data for required field."],"description":["Missing data for required field."],"interval_label":["Missing data for required field."],"interval_length":["Missing data for required field."],"name":["Missing data for required field."],"timezone":["Missing data for required field."],"variable":["Missing data for required field."]}'), # NOQA
(copy_update(VALID_AGG_JSON, 'interval_length', '61'),
f'{{"interval_length":["Must be a divisor of one day."]}}'),
])
def test_post_aggregate_bad_request(api, payload, message):
res = api.post('/aggregates/',
base_url=BASE_URL,
json=payload)
assert res.status_code == 400
assert res.get_data(as_text=True) == f'{{"errors":{message}}}\n'
def test_get_aggregate_links(api, aggregate_id):
res = api.get(f'/aggregates/{aggregate_id}',
base_url=BASE_URL)
resp = res.get_json()
assert 'aggregate_id' in resp
assert '_links' in resp
def test_get_aggregate_links_404(api, missing_id):
res = api.get(f'/aggregates/{missing_id}',
base_url=BASE_URL)
assert res.status_code == 404
def test_delete_aggregate(api):
res = api.post('/aggregates/',
base_url=BASE_URL,
json=VALID_AGG_JSON)
assert res.status_code == 201
new_id = res.get_data(as_text=True)
res = api.get(f'/aggregates/{new_id}',
base_url=BASE_URL)
assert res.status_code == 200
res = api.delete(f'/aggregates/{new_id}',
base_url=BASE_URL)
assert res.status_code == 204
res = api.get(f'/aggregates/{new_id}',
base_url=BASE_URL)
assert res.status_code == 404
def test_delete_aggregate_missing(api, missing_id):
res = api.delete(f'/aggregates/{missing_id}',
base_url=BASE_URL)
assert res.status_code == 404
def test_get_aggregate_metadata(api, aggregate_id):
res = api.get(f'/aggregates/{aggregate_id}/metadata',
base_url=BASE_URL)
assert res.status_code == 200
resp = res.get_json()
assert 'aggregate_id' in resp
assert 'variable' in resp
assert 'observations' in resp
assert 'aggregate_type' in resp
assert resp['interval_value_type'] == 'interval_mean'
assert resp['created_at'].endswith('+00:00')
assert resp['modified_at'].endswith('+00:00')
assert resp['observations'][0]['created_at'].endswith('+00:00')
def test_get_aggregate_metadata_404(api, missing_id):
res = api.get(f'/aggregates/{missing_id}/metadata',
base_url=BASE_URL)
assert res.status_code == 404
@pytest.mark.parametrize('payload', [
{},
{'name': 'new aggregate name'},
{'description': 'is here', 'timezone': 'UTC', 'extra_parameters': 'new'}
])
def test_update_aggregate_add_obs(api, aggregate_id, payload):
r1 = api.post('/observations/',
base_url=BASE_URL,
json=VALID_OBS_JSON)
obs_id = r1.get_data(as_text=True)
payload.update({'observations': [{
'observation_id': obs_id,
'effective_from': '2029-01-01 01:23:00Z'}]})
res = api.post(f'/aggregates/{aggregate_id}/metadata',
json=payload,
base_url=BASE_URL)
assert res.status_code == 200
r2 = api.get(f'/aggregates/{aggregate_id}/metadata',
base_url=BASE_URL)
assert r2.status_code == 200
did = False
for obs in r2.json['observations']:
if obs['observation_id'] == obs_id:
did = True
assert obs['effective_from'] == (
'2029-01-01T01:23:00+00:00')
assert did
@pytest.mark.parametrize('payload', [
{},
{'name': 'new aggregate name'},
{'description': 'is here', 'timezone': 'UTC', 'extra_parameters': 'new'},
pytest.param({'name': 0}, marks=pytest.mark.xfail(strict=True))
])
def test_update_aggregate_no_obs(api, aggregate_id, payload):
res = api.post(f'/aggregates/{aggregate_id}/metadata',
json=payload,
base_url=BASE_URL)
assert res.status_code == 200
r2 = api.get(f'/aggregates/{aggregate_id}/metadata',
base_url=BASE_URL)
assert r2.status_code == 200
res = r2.json
for k, v in payload.items():
assert res[k] == v
def test_update_aggregate_add_obs_404_agg(api, missing_id):
payload = {'observations': [{
'observation_id': '123e4567-e89b-12d3-a456-426655440000',
'effective_from': '2019-01-01 01:23:00Z'}]}
res = api.post(f'/aggregates/{missing_id}/metadata',
json=payload,
base_url=BASE_URL)
assert res.status_code == 404
def test_update_aggregate_add_obs_404_obs(api, missing_id, aggregate_id):
payload = {'observations': [{
'observation_id': missing_id,
'effective_from': '2019-01-01 01:23:00Z'}]}
res = api.post(f'/aggregates/{aggregate_id}/metadata',
json=payload,
base_url=BASE_URL)
assert res.status_code == 404
@pytest.mark.parametrize('payload,intext', [
({'observations': [{
'observation_id': '123e4567-e89b-12d3-a456-426655440000',
'effective_from': '2019-01-01 01:23:00Z'}]},
'present and valid'),
])
def test_update_aggregate_add_obs_bad_req(api, payload, aggregate_id, intext):
res = api.post(f'/aggregates/{aggregate_id}/metadata',
json=payload,
base_url=BASE_URL)
assert res.status_code == 400
assert intext in res.get_data(as_text=True)
@pytest.mark.parametrize('field,val,intext', [
('interval_length', 300, 'interval length is not less'),
('variable', 'dni', 'same variable'),
('interval_value_type', 'interval_min', 'interval_value_type'),
])
def test_update_aggregate_add_obs_bad_obs(api, aggregate_id, intext,
field, val):
r1 = api.post('/observations/',
base_url=BASE_URL,
json=copy_update(VALID_OBS_JSON, field, val))
obs_id = r1.get_data(as_text=True)
payload = {'observations': [
{'observation_id': obs_id,
'effective_from': '2019-01-01 01:23:00Z'}]}
res = api.post(f'/aggregates/{aggregate_id}/metadata',
json=payload,
base_url=BASE_URL)
assert res.status_code == 400
assert intext in res.get_data(as_text=True)
def test_update_aggregate_add_obs_bad_many(api, aggregate_id):
r1 = api.post('/observations/',
base_url=BASE_URL,
json=copy_update(VALID_OBS_JSON, 'interval_length', 300))
obs_id = r1.get_data(as_text=True)
payload = {'observations': [
{'observation_id': obs_id,
'effective_from': '2019-01-01 01:23:00Z'},
{'observation_id': '123e4567-e89b-12d3-a456-426655440000',
'effective_from': '2019-01-01 01:23:00Z'}
]}
res = api.post(f'/aggregates/{aggregate_id}/metadata',
json=payload,
base_url=BASE_URL)
assert res.status_code == 400
assert 'present and valid' in res.get_data(as_text=True)
assert 'interval length is not less' in res.get_data(as_text=True)
def test_update_aggregate_remove_obs(api, aggregate_id):
payload = {'observations': [{
'observation_id': '123e4567-e89b-12d3-a456-426655440000',
'effective_until': '2019-01-01 01:23:00Z'}]}
res = api.post(f'/aggregates/{aggregate_id}/metadata',
json=payload,
base_url=BASE_URL)
assert res.status_code == 200
r2 = api.get(f'/aggregates/{aggregate_id}/metadata',
base_url=BASE_URL)
assert r2.json['observations'][0]['effective_until'] == (
'2019-01-01T01:23:00+00:00')
def test_update_aggregate_remove_obs_404_agg(api, missing_id):
payload = {'observations': [{
'observation_id': '123e4567-e89b-12d3-a456-426655440000',
'effective_until': '2019-01-01 01:23:00Z'}]}
res = api.post(f'/aggregates/{missing_id}/metadata',
json=payload,
base_url=BASE_URL)
assert res.status_code == 404
def test_update_aggregate_remove_obs_no_obs(api, missing_id, aggregate_id):
payload = {'observations': [{
'observation_id': missing_id,
'effective_until': '2019-01-01 01:23:00Z'}]}
res = api.post(f'/aggregates/{aggregate_id}/metadata',
json=payload,
base_url=BASE_URL)
assert res.status_code == 200 # no effect
@pytest.mark.parametrize('payload,intext', [
({'observations': [{
'observation_id': '123e4567-e89b-12d3-a456-426655440000'}]},
'Specify one of'),
({'observations': [{
'observation_id': '123e4567-e89b-12d3-a456-426655440000',
'effective_from': 'notatime'}]},
'Not a valid datetime'),
({'observations': [{
'observation_id': '123e4567-e89b-12d3-a456-426655440000',
'effective_until': 'notatime'}]},
'Not a valid datetime'),
({'observations': [{
'observation_id': '123e4567-e89b-12d3-a456-426655440000',
'effective_from': '2019-01-01 00:00:00Z',
'effective_until': '2019-01-01 00:00:00Z'}]},
'Only specify one of'),
({'observations': [{
'observation_id': '123e4567-e89b-12d3-a456-426655440000',
'effective_until': '2050-01-01 03:14:07Z'}]},
'Exceeds maximum'),
({'observations': [
{'observation_id': '123e4567-e89b-12d3-a456-426655440000',
'effective_from': '2019-01-01 00:00:00Z'},
{'observation_id': '123e4567-e89b-12d3-a456-426655440000',
'effective_until': 'notatime'}
]},
'1'),
])
def test_update_aggregate_bad_req(api, aggregate_id, payload, intext):
res = api.post(f'/aggregates/{aggregate_id}/metadata',
json=payload,
base_url=BASE_URL)
assert res.status_code == 400
assert intext in res.get_data(as_text=True)
def test_get_aggregate_values(api, aggregate_id, startend):
res = api.get(f'/aggregates/{aggregate_id}/values{startend}',
headers={'Accept': 'application/json'},
base_url=BASE_URL)
assert res.status_code == 200
assert res.json['aggregate_id'] == aggregate_id
assert 'values' in res.json
assert 'timestamp' in res.json['values'][0]
assert 'value' in res.json['values'][0]
assert 'quality_flag' in res.json['values'][0]
def test_get_aggregate_values_startendtz(api, aggregate_id, startend):
res = api.get(f'/aggregates/{aggregate_id}/values'
'?start=20190101T0000Z&end=2020-01-01T00:00:00',
headers={'Accept': 'application/json'},
base_url=BASE_URL)
assert res.status_code == 200
assert res.json['aggregate_id'] == aggregate_id
assert 'values' in res.json
assert 'timestamp' in res.json['values'][0]
assert 'value' in res.json['values'][0]
assert 'quality_flag' in res.json['values'][0]
def test_get_aggregate_values_csv(api, aggregate_id, startend):
res = api.get(f'/aggregates/{aggregate_id}/values{startend}',
headers={'Accept': 'text/csv'},
base_url=BASE_URL)
assert res.status_code == 200
data = res.get_data(as_text=True)
assert aggregate_id in data
assert 'timestamp,value,quality_flag' in data
def test_get_aggregate_values_outside_range(api, aggregate_id):
res = api.get(f'/aggregates/{aggregate_id}/values',
headers={'Accept': 'application/json'},
base_url=BASE_URL,
query_string={'start': '2018-01-01T00:00:00Z',
'end': '2018-01-02T00:00:00Z'})
assert res.status_code == 422
assert res.json['errors']['values'] == [
'No effective observations in data']
def test_get_aggregate_values_422(api, aggregate_id, startend):
payload = {'observations': [{
'observation_id': '123e4567-e89b-12d3-a456-426655440000',
'effective_until': '2019-01-01 01:23:00Z'}]}
res = api.post(f'/aggregates/{aggregate_id}/metadata',
json=payload,
base_url=BASE_URL)
assert res.status_code == 200
res = api.get(f'/aggregates/{aggregate_id}/values{startend}',
headers={'Accept': 'application/json'},
base_url=BASE_URL)
assert res.status_code == 422
assert 'missing keys <KEY>' in res.get_data(
as_text=True)
def test_get_aggregate_values_obs_deleted(api, aggregate_id, missing_id,
startend):
res = api.delete('/observations/123e4567-e89b-12d3-a456-426655440000',
base_url=BASE_URL)
assert res.status_code == 204
res = api.get(f'/aggregates/{aggregate_id}/values{startend}',
headers={'Accept': 'application/json'},
base_url=BASE_URL)
assert res.status_code == 422
def test_get_aggregate_values_limited_effective(api, aggregate_id, startend):
payload = {'observations': [{
'observation_id': '123e4567-e89b-12d3-a456-426655440000',
'effective_until': '2019-04-17 01:23:00Z'}]}
res = api.post(f'/aggregates/{aggregate_id}/metadata',
json=payload,
base_url=BASE_URL)
assert res.status_code == 200
res = api.get(f'/aggregates/{aggregate_id}/values{startend}',
headers={'Accept': 'application/json'},
base_url=BASE_URL)
assert res.status_code == 200
assert res.json['values'][0]['value'] is None
# datapoint just before effective_from is None
assert res.json['values'][6 + 24*103]['value'] is None
# datapoint just inside/equal to effective_from is not None
assert not math.isnan(res.json['values'][7 + 24*103]['value'])
# datapoint just before effective_until is not null
assert not math.isnan(res.json['values'][7 + 24*106]['value'])
# datpoint after effective_until is null
assert res.json['values'][8 + 24*106]['value'] is None
assert res.json['values'][-1]['value'] is None
def test_get_aggregate_values_no_data_after_effective(api, aggregate_id):
# https://github.com/SolarArbiter/solarforecastarbiter-api/issues/219
# Regression test for #219, previously, requests for values after any of
# the included observations `effective_from` was set resulted in an error.
payload = {'observations': [{
'observation_id': '123e4567-e89b-12d3-a456-426655440000',
'effective_until': '2019-04-14 07:00:00Z'}, {
'observation_id': 'b1dfe2cb-9c8e-43cd-afcf-c5a6feaf81e2',
'effective_until': '2019-04-14 07:00:00Z'}]}
res = api.post(f'/aggregates/{aggregate_id}/metadata',
json=payload,
base_url=BASE_URL)
assert res.status_code == 200
startend = '?start=2019-04-14T08:00Z&end=2019-04-17T07:00Z'
res = api.get(f'/aggregates/{aggregate_id}/values{startend}',
headers={'Accept': 'application/json'},
base_url=BASE_URL)
assert res.status_code == 200
values = res.json['values']
assert not math.isnan(values[0]['value'])
assert not math.isnan(values[-1]['value'])
def test_get_aggregate_values_404(api, missing_id, startend):
res = api.get(f'/aggregates/{missing_id}/values{startend}',
headers={'Accept': 'application/json'},
base_url=BASE_URL)
assert res.status_code == 404
def test_get_aggregate_forecasts(api, aggregate_id):
res = api.get(f'/aggregates/{aggregate_id}/forecasts/single',
base_url=BASE_URL)
assert res.status_code == 200
assert isinstance(res.get_json(), list)
agg_forecasts = res.get_json()
assert len(agg_forecasts) == 2
agg_fx = agg_forecasts[0]
assert agg_fx['forecast_id'] in demo_forecasts
assert agg_fx['aggregate_id'] == aggregate_id
def test_get_aggregate_forecasts_404(api, missing_id):
res = api.get(f'/aggregates/{missing_id}/forecasts/single',
base_url=BASE_URL)
assert res.status_code == 404
def test_get_aggregate_cdf_forecast_groups(api, aggregate_id):
res = api.get(f'/aggregates/{aggregate_id}/forecasts/cdf',
base_url=BASE_URL)
assert res.status_code == 200
assert isinstance(res.get_json(), list)
agg_cdf_forecasts = res.get_json()
assert len(agg_cdf_forecasts) == 1
agg_cdf = agg_cdf_forecasts[0]
expected = list(demo_group_cdf.values())[-1]
assert agg_cdf['forecast_id'] == expected['forecast_id']
assert agg_cdf['aggregate_id'] == aggregate_id
def test_get_aggregate_cdf_forecasts_404(api, missing_id):
res = api.get(f'/aggregates/{missing_id}/forecasts/cdf',
base_url=BASE_URL)
assert res.status_code == 404
def test_aggregate_values_deleted_observation(api, observation_id, startend):
r1 = api.post('/aggregates/',
base_url=BASE_URL,
json=VALID_AGG_JSON)
assert r1.status_code == 201
assert 'Location' in r1.headers
aggregate_id = r1.get_data(as_text=True)
payload = {'observations': [{
'observation_id': observation_id,
'effective_from': '2029-01-01 01:23:00Z'}]}
r2 = api.post(f'/aggregates/{aggregate_id}/metadata',
json=payload,
base_url=BASE_URL)
assert r2.status_code == 200
r3 = api.delete(f'/observations/{observation_id}',
base_url=BASE_URL)
assert r3.status_code == 204
r4 = api.get(f'/aggregates/{aggregate_id}/values{startend}',
base_url=BASE_URL)
assert r4.status_code == 422
errors = r4.json['errors']
assert errors['values'] == ['Deleted Observation data cannot be retrieved '
'to include in Aggregate']
@pytest.mark.parametrize('queryparams,label,exp1,exp2', [
('?start=2019-04-14T13:00Z&end=2019-04-14T14:00Z', 'beginning',
{'timestamp': '2019-04-14T13:00:00Z', 'value': 78.30793},
{'timestamp': '2019-04-14T14:00:00Z', 'value': 303.016}),
('?start=2019-04-14T13:30Z&end=2019-04-14T14:30Z', 'beginning',
{'timestamp': '2019-04-14T13:00:00Z', 'value': 78.30793},
{'timestamp': '2019-04-14T14:00:00Z', 'value': 303.016}),
('?start=2019-04-14T13:00Z&end=2019-04-14T14:00Z', 'ending',
{'timestamp': '2019-04-14T13:00:00Z', 'value': -0.77138242},
{'timestamp': '2019-04-14T14:00:00Z', 'value': 93.286025}),
('?start=2019-04-14T13:30Z&end=2019-04-14T14:30Z', 'ending',
{'timestamp': '2019-04-14T14:00:00Z', 'value': 93.286025},
{'timestamp': '2019-04-14T15:00:00Z', 'value': 323.98858333}),
])
def test_aggregate_values_interval_label(
api, observation_id, label, exp1, exp2, queryparams):
agg = deepcopy(VALID_AGG_JSON)
agg['interval_label'] = label
r1 = api.post('/aggregates/',
base_url=BASE_URL,
json=agg)
assert r1.status_code == 201
assert 'Location' in r1.headers
aggregate_id = r1.get_data(as_text=True)
payload = {'observations': [{
'observation_id': observation_id,
'effective_from': '2019-04-14 06:00:00Z'}]}
api.post(f'/aggregates/{aggregate_id}/metadata',
json=payload,
base_url=BASE_URL)
r3 = api.get(
f'/aggregates/{aggregate_id}/values{queryparams}',
headers={'Accept': 'application/json'},
base_url=BASE_URL)
values = r3.json['values']
assert len(values) == 2
exp1.update({'quality_flag': 0})
exp2.update({'quality_flag': 0})
assert values[0] == exp1
assert values[1] == exp2
@pytest.mark.parametrize('label,expected', [
('beginning', {'timestamp': '2019-04-14T13:00:00Z', 'value': 78.30793}),
('ending', {'timestamp': '2019-04-14T14:00:00Z', 'value': 93.286025}),
])
def test_aggregate_values_inside_interval(
api, observation_id, label, expected):
# Ensure that a request for data inside an interval returns the whole
# interval that contains that data.
agg = deepcopy(VALID_AGG_JSON)
agg['interval_label'] = label
r1 = api.post('/aggregates/',
base_url=BASE_URL,
json=agg)
assert r1.status_code == 201
assert 'Location' in r1.headers
aggregate_id = r1.get_data(as_text=True)
payload = {'observations': [{
'observation_id': observation_id,
'effective_from': '2019-04-14 06:00:00Z'}]}
api.post(f'/aggregates/{aggregate_id}/metadata',
json=payload,
base_url=BASE_URL)
r3 = api.get(
f'/aggregates/{aggregate_id}/values'
'?start=2019-04-14T13:30Z&end=2019-04-14T13:59Z',
headers={'Accept': 'application/json'},
base_url=BASE_URL)
values = r3.json['values']
expected.update({'quality_flag': 0})
assert len(values) == 1
assert values[0] == expected
def test_aggregate_delete_obs(api, aggregate_id):
obsid = '123e4567-e89b-12d3-a456-426655440000'
r1 = api.get(f'/aggregates/{aggregate_id}/metadata',
base_url=BASE_URL)
allobs = [o['observation_id'] for o in r1.json['observations']]
assert obsid in allobs
res = api.delete(f'/aggregates/{aggregate_id}/observations/{obsid}',
base_url=BASE_URL)
assert res.status_code == 204
r2 = api.get(f'/aggregates/{aggregate_id}/metadata',
base_url=BASE_URL)
allobs = [o['observation_id'] for o in r2.json['observations']]
assert obsid not in allobs
def test_aggregate_delete_obs_404_agg(api, missing_id):
obsid = '123e4567-e89b-12d3-a456-426655440000'
res = api.delete(f'/aggregates/{missing_id}/observations/{obsid}',
base_url=BASE_URL)
assert res.status_code == 404
def test_aggregate_delete_obs_no_obs(api, missing_id, aggregate_id):
res = api.delete(f'/aggregates/{aggregate_id}/observations/{missing_id}',
base_url=BASE_URL)
assert res.status_code == 204 # no effect
def test_get_aggregate_values_all_none(api, aggregate_id):
"""Ensure that Null values are computed correctly. Regression test for
GH 296
"""
start = '20190101T0000Z'
end = '20190201T0000Z'
agg = demo_aggregates[aggregate_id]
obs_data = pd.DataFrame(
index=pd.date_range(start, end, freq='5T'),
data={'value': None, 'quality_flag': 0},
)
obs_data['timestamp'] = obs_data.index.strftime('%Y%m%dT%H%MZ')
value_payload = {'values': obs_data.to_dict(orient='records', )}
for obs in agg['observations']:
api.post(
f'/observations/{obs["observation_id"]}/values?donotvalidate=true',
base_url=BASE_URL,
json=value_payload
)
res = api.get(f'/aggregates/{aggregate_id}/values?start={start}&end={end}',
headers={'Accept': 'application/json'},
base_url=BASE_URL)
assert res.status_code == 200
assert res.json['aggregate_id'] == aggregate_id
values = res.json['values']
for val in values:
assert val['value'] is None
| StarcoderdataPython |
1688316 | #!/usr/bin/env python
#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OpenType-related data."""
__author__ = '<EMAIL> (<NAME>)'
from nototools import unicode_data
OMPL = {}
def _set_ompl():
"""Set up OMPL.
OMPL is defined to be the list of mirrored pairs in Unicode 5.1:
http://www.microsoft.com/typography/otspec/ttochap1.htm#ltrrtl
"""
global OMPL
unicode_data.load_data()
bmg_data = unicode_data._bidi_mirroring_glyph_data
OMPL = {char:bmg for (char, bmg) in bmg_data.items()
if float(unicode_data.age(char)) <= 5.1}
ZWSP = [0x200B]
JOINERS = [0x200C, 0x200D]
BIDI_MARKS = [0x200E, 0x200F]
DOTTED_CIRCLE = [0x25CC]
# From the various script-specific specs at
# http://www.microsoft.com/typography/SpecificationsOverview.mspx
SPECIAL_CHARACTERS_NEEDED = {
'Arab': JOINERS + BIDI_MARKS + DOTTED_CIRCLE,
'Beng': ZWSP + JOINERS + DOTTED_CIRCLE,
'Bugi': ZWSP + JOINERS + DOTTED_CIRCLE,
'Deva': ZWSP + JOINERS + DOTTED_CIRCLE,
'Gujr': ZWSP + JOINERS + DOTTED_CIRCLE,
'Guru': ZWSP + JOINERS + DOTTED_CIRCLE,
# Hangul may not need the special characters:
# https://code.google.com/p/noto/issues/detail?id=147#c2
# 'Hang': ZWSP + JOINERS,
'Hebr': BIDI_MARKS + DOTTED_CIRCLE,
'Java': ZWSP + JOINERS + DOTTED_CIRCLE,
'Khmr': ZWSP + JOINERS + DOTTED_CIRCLE,
'Knda': ZWSP + JOINERS + DOTTED_CIRCLE,
'Laoo': ZWSP + DOTTED_CIRCLE,
'Mlym': ZWSP + JOINERS + DOTTED_CIRCLE,
'Mymr': ZWSP + JOINERS + DOTTED_CIRCLE,
'Orya': ZWSP + JOINERS + DOTTED_CIRCLE,
'Sinh': ZWSP + JOINERS + DOTTED_CIRCLE,
'Syrc': JOINERS + BIDI_MARKS + DOTTED_CIRCLE,
'Taml': ZWSP + JOINERS + DOTTED_CIRCLE,
'Telu': ZWSP + JOINERS + DOTTED_CIRCLE,
'Thaa': BIDI_MARKS + DOTTED_CIRCLE,
'Thai': ZWSP + DOTTED_CIRCLE,
'Tibt': ZWSP + JOINERS + DOTTED_CIRCLE,
}
# www.microsoft.com/typography/otspec/os2.html#ur
# bit, block name, block range
_unicoderange_data = """0\tBasic Latin\t0000-007F
1\tLatin-1 Supplement\t0080-00FF
2\tLatin Extended-A\t0100-017F
3\tLatin Extended-B\t0180-024F
4\tIPA Extensions\t0250-02AF
\tPhonetic Extensions\t1D00-1D7F
\tPhonetic Extensions Supplement\t1D80-1DBF
5\tSpacing Modifier Letters\t02B0-02FF
\tModifier Tone Letters\tA700-A71F
6\tCombining Diacritical Marks\t0300-036F
\tCombining Diacritical Marks Supplement\t1DC0-1DFF
7\tGreek and Coptic\t0370-03FF
8\tCoptic\t2C80-2CFF
9\tCyrillic\t0400-04FF
\tCyrillic Supplement\t0500-052F
\tCyrillic Extended-A\t2DE0-2DFF
\tCyrillic Extended-B\tA640-A69F
10\tArmenian\t0530-058F
11\tHebrew\t0590-05FF
12\tVai\tA500-A63F
13\tArabic\t0600-06FF
\tArabic Supplement\t0750-077F
14\tNKo\t07C0-07FF
15\tDevanagari\t0900-097F
16\tBengali\t0980-09FF
17\tGurmukhi\t0A00-0A7F
18\tGujarati\t0A80-0AFF
19\tOriya\t0B00-0B7F
20\tTamil\t0B80-0BFF
21\tTelugu\t0C00-0C7F
22\tKannada\t0C80-0CFF
23\tMalayalam\t0D00-0D7F
24\tThai\t0E00-0E7F
25\tLao\t0E80-0EFF
26\tGeorgian\t10A0-10FF
\tGeorgian Supplement\t2D00-2D2F
27\tBalinese\t1B00-1B7F
28\tHangul Jamo\t1100-11FF
29\tLatin Extended Additional\t1E00-1EFF
\tLatin Extended-C\t2C60-2C7F
\tLatin Extended-D\tA720-A7FF
30\tGreek Extended\t1F00-1FFF
31\tGeneral Punctuation\t2000-206F
\tSupplemental Punctuation\t2E00-2E7F
32\tSuperscripts And Subscripts\t2070-209F
33\tCurrency Symbols\t20A0-20CF
34\tCombining Diacritical Marks For Symbols\t20D0-20FF
35\tLetterlike Symbols\t2100-214F
36\tNumber Forms\t2150-218F
37\tArrows\t2190-21FF
\tSupplemental Arrows-A\t27F0-27FF
\tSupplemental Arrows-B\t2900-297F
\tMiscellaneous Symbols and Arrows\t2B00-2BFF
38\tMathematical Operators\t2200-22FF
\tSupplemental Mathematical Operators\t2A00-2AFF
\tMiscellaneous Mathematical Symbols-A\t27C0-27EF
\tMiscellaneous Mathematical Symbols-B\t2980-29FF
39\tMiscellaneous Technical\t2300-23FF
40\tControl Pictures\t2400-243F
41\tOptical Character Recognition\t2440-245F
42\tEnclosed Alphanumerics\t2460-24FF
43\tBox Drawing\t2500-257F
44\tBlock Elements\t2580-259F
45\tGeometric Shapes\t25A0-25FF
46\tMiscellaneous Symbols\t2600-26FF
47\tDingbats\t2700-27BF
48\tCJK Symbols And Punctuation\t3000-303F
49\tHiragana\t3040-309F
50\tKatakana\t30A0-30FF
\tKatakana Phonetic Extensions\t31F0-31FF
51\tBopomofo\t3100-312F
\tBopomofo Extended\t31A0-31BF
52\tHangul Compatibility Jamo\t3130-318F
53\tPhags-pa\tA840-A87F
54\tEnclosed CJK Letters And Months\t3200-32FF
55\tCJK Compatibility\t3300-33FF
56\tHangul Syllables\tAC00-D7AF
57\tNon-Plane 0 *\tD800-DFFF
58\tPhoenician\t10900-1091F
59\tCJK Unified Ideographs\t4E00-9FFF
\tCJK Radicals Supplement\t2E80-2EFF
\tKangxi Radicals\t2F00-2FDF
\tIdeographic Description Characters\t2FF0-2FFF
\tCJK Unified Ideographs Extension A\t3400-4DBF
\tCJK Unified Ideographs Extension B\t20000-2A6DF
\tKanbun\t3190-319F
60\tPrivate Use Area (plane 0)\tE000-F8FF
61\tCJK Strokes\t31C0-31EF
\tCJK Compatibility Ideographs\tF900-FAFF
\tCJK Compatibility Ideographs Supplement\t2F800-2FA1F
62\tAlphabetic Presentation Forms\tFB00-FB4F
63\tArabic Presentation Forms-A\tFB50-FDFF
64\tCombining Half Marks\tFE20-FE2F
65\tVertical Forms\tFE10-FE1F
\tCJK Compatibility Forms\tFE30-FE4F
66\tSmall Form Variants\tFE50-FE6F
67\tArabic Presentation Forms-B\tFE70-FEFF
68\tHalfwidth And Fullwidth Forms\tFF00-FFEF
69\tSpecials\tFFF0-FFFF
70\tTibetan\t0F00-0FFF
71\tSyriac\t0700-074F
72\tThaana\t0780-07BF
73\tSinhala\t0D80-0DFF
74\tMyanmar\t1000-109F
75\tEthiopic\t1200-137F
\tEthiopic Supplement\t1380-139F
\tEthiopic Extended\t2D80-2DDF
76\tCherokee\t13A0-13FF
77\tUnified Canadian Aboriginal Syllabics\t1400-167F
78\tOgham\t1680-169F
79\tRunic\t16A0-16FF
80\tKhmer\t1780-17FF
\tKhmer Symbols\t19E0-19FF
81\tMongolian\t1800-18AF
82\tBraille Patterns\t2800-28FF
83\tYi Syllables\tA000-A48F
\tYi Radicals\tA490-A4CF
84\tTagalog\t1700-171F
\tHanunoo\t1720-173F
\tBuhid\t1740-175F
\tTagbanwa\t1760-177F
85\tOld Italic\t10300-1032F
86\tGothic\t10330-1034F
87\tDeseret\t10400-1044F
88\tByzantine Musical Symbols\t1D000-1D0FF
\tMusical Symbols\t1D100-1D1FF
\tAncient Greek Musical Notation\t1D200-1D24F
89\tMathematical Alphanumeric Symbols\t1D400-1D7FF
90\tPrivate Use (plane 15)\tFF000-FFFFD
\tPrivate Use (plane 16)\t100000-10FFFD
91\tVariation Selectors\tFE00-FE0F
\tVariation Selectors Supplement\tE0100-E01EF
92\tTags\tE0000-E007F
93\tLimbu\t1900-194F
94\tTai Le\t1950-197F
95\tNew Tai Lue\t1980-19DF
96\tBuginese\t1A00-1A1F
97\tGlagolitic\t2C00-2C5F
98\tTifinagh\t2D30-2D7F
99\tYijing Hexagram Symbols\t4DC0-4DFF
100\tSyloti Nagri\tA800-A82F
101\tLinear B Syllabary\t10000-1007F
\tLinear B Ideograms\t10080-100FF
\tAegean Numbers\t10100-1013F
102\tAncient Greek Numbers\t10140-1018F
103\tUgaritic\t10380-1039F
104\tOld Persian\t103A0-103DF
105\tShavian\t10450-1047F
106\tOsmanya\t10480-104AF
107\tCypriot Syllabary\t10800-1083F
108\tKharoshthi\t10A00-10A5F
109\tTai Xuan Jing Symbols\t1D300-1D35F
110\tCuneiform\t12000-123FF
\tCuneiform Numbers and Punctuation\t12400-1247F
111\tCounting Rod Numerals\t1D360-1D37F
112\tSundanese\t1B80-1BBF
113\tLepcha\t1C00-1C4F
114\tOl Chiki\t1C50-1C7F
115\tSaurashtra\tA880-A8DF
116\tKayah Li\tA900-A92F
117\tRejang\tA930-A95F
118\tCham\tAA00-AA5F
119\tAncient Symbols\t10190-101CF
120\tPhaistos Disc\t101D0-101FF
121\tCarian\t102A0-102DF
\tLycian\t10280-1029F
\tLydian\t10920-1093F
122\tDomino Tiles\t1F030-1F09F
\tMahjong Tiles\t1F000-1F02F
"""
ur_data = []
ur_bucket_info = [[] for i in range(128)]
def _setup_unicoderange_data():
"""The unicoderange data used in the os/2 table consists of slightly under
128 'buckets', each of which consists of one or more 'ranges' of codepoints.
Each range has a name, start, and end. Bucket 57 is special, it consists of
all non-BMP codepoints and overlaps the other ranges, though in the data it
corresponds to the high and low UTF-16 surrogate code units. The other ranges
are all disjoint.
We build two tables. ur_data is a list of the ranges, consisting of the
start, end, bucket index, and name. It is sorted by range start. ur_bucket_info
is a list of buckets in bucket index order; each entry is a list of the tuples
in ur_data that belong to that bucket.
This is called by functions that require these tables. On first use it builds
ur_data and ur_bucket_info, which should remain unchanged thereafter."""
if ur_data:
return
index = 0
for line in _unicoderange_data.splitlines():
index_str, name, urange = line.split('\t')
range_start_str, range_end_str = urange.split('-')
range_start = int(range_start_str, 16)
range_end = int(range_end_str, 16)
if index_str:
index = int(index_str)
tup = (range_start, range_end, index, name)
ur_data.append(tup)
ur_bucket_info[index].append(tup)
ur_data.sort()
def collect_unicoderange_info(cmap):
"""Return a list of 2-tuples, the first element a count of the characters in a
range, the second element the 4-tuple of information about that range: start,
end, bucket number, and name. Only ranges for which the cmap has a character
are included."""
_setup_unicoderange_data()
range_count = 0
index = 0
limit = len(ur_data)
result = []
for cp in sorted(cmap):
while index < limit:
tup = ur_data[index]
if cp <= tup[1]:
# the ranges are disjoint and some characters fall into no
# range, e.g. Javanese.
if cp >= tup[0]:
range_count += 1
break
if range_count:
result.append((range_count, ur_data[index]))
range_count = 0
index += 1
if range_count:
result.append((range_count, ur_data[index]))
return result
def unicoderange_bucket_info_name(bucket_info):
return ', '.join(t[3] for t in bucket_info)
def unicoderange_bucket_info_size(bucket_info):
return sum(t[1] - t[0] + 1 for t in bucket_info)
def unicoderange_bucket_index_to_info(bucket_index):
if bucket_index < 0 or bucket_index >= 128:
raise ValueError('bucket_index %s out of range' % bucket_index)
_setup_unicoderange_data()
return ur_bucket_info[bucket_index]
def unicoderange_bucket_index_to_name(bucket_index):
return unicoderange_bucket_info_name(unicoderange_bucket_index_to_info(bucket_index))
if not OMPL:
_set_ompl()
| StarcoderdataPython |
22119 | import testtools
from oslo_log import log
from tempest.api.compute import base
import tempest.api.compute.flavors.test_flavors as FlavorsV2Test
import tempest.api.compute.flavors.test_flavors_negative as FlavorsListWithDetailsNegativeTest
import tempest.api.compute.flavors.test_flavors_negative as FlavorDetailsNegativeTest
from tempest.common.utils import data_utils
from tempest.lib import exceptions as lib_exc
from tempest.lib import decorators
from tempest import test
from tempest import config
CONF = config.CONF
LOG = log.getLogger(__name__)
class HybridFlavorsV2TestJSON(FlavorsV2Test.FlavorsV2TestJSON):
"""Test flavors"""
@testtools.skip("testscenarios are not active.")
@test.SimpleNegativeAutoTest
class HybridFlavorsListWithDetailsNegativeTestJSON(FlavorsListWithDetailsNegativeTest.FlavorsListWithDetailsNegativeTestJSON):
"""Test FlavorsListWithDetails"""
@testtools.skip("testscenarios are not active.")
@test.SimpleNegativeAutoTest
class HybridFlavorDetailsNegativeTestJSON(FlavorDetailsNegativeTest.FlavorDetailsNegativeTestJSON):
"""Test FlavorsListWithDetails"""
| StarcoderdataPython |
9702349 | # Eclipse SUMO, Simulation of Urban MObility; see https://eclipse.org/sumo
# Copyright (C) 2016-2020 German Aerospace Center (DLR) and others.
# SUMOPy module
# Copyright (C) 2012-2017 University of Bologna - DICAM
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License 2.0 which is available at
# https://www.eclipse.org/legal/epl-2.0/
# This Source Code may also be made available under the following Secondary
# Licenses when the conditions for such availability set forth in the Eclipse
# Public License 2.0 are satisfied: GNU General Public License, version 2
# or later which is available at
# https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html
# SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-or-later
# @file publictransportservices.py
# @author <NAME>
# @date
import os
import sys
import string
from xml.sax import saxutils, parse, handler
import numpy as np
from collections import OrderedDict
from coremodules.modules_common import *
from coremodules.network.routing import get_mincostroute_edge2edge
import agilepy.lib_base.classman as cm
import agilepy.lib_base.arrayman as am
import agilepy.lib_base.xmlman as xm
from agilepy.lib_base.geometry import *
from agilepy.lib_base.processes import Process, CmlMixin
from agilepy.lib_base.misc import get_inversemap
from coremodules.simulation import results as res
from coremodules.demand.demandbase import DemandobjMixin
#from coremodules.network.network import SumoIdsConf, MODES
# These are all possible linktypes
# example
# https://github.com/planetsumo/sumo/blob/master/tests/sumo/extended/busses/three_busses/input_additional.add.xml
class PtLines(DemandobjMixin, am.ArrayObjman):
def __init__(self, ident, demand, **kwargs):
self._init_objman(ident=ident,
parent=demand,
name='Public transport lines',
info='Object, containing information on public transport line services.',
#xmltag = ('flows','flow','id'),
version=0.2,
**kwargs)
self._init_attributes()
# def set_version(self, verion = 0.1):
# self._version = 0.1
def _init_attributes(self):
# attrsman = self # = self.get_attrsman()
demand = self.parent
net = self.get_net()
self.add(cm.ObjConf(PtLinks('ptlinks', self)))
self.add_col(am.ArrayConf('linenames', default='',
dtype='object',
perm='rw',
name='Line name',
info='This is the official name or number of the line. Note that the same line may have several line services for different service periods.',
xmltag='line',
))
self.add_col(am.ArrayConf('times_begin', 0,
name='Begin time',
unit='s',
perm='rw',
info='Time when service begins.',
xmltag='begin',
))
self.add_col(am.ArrayConf('times_end', 0,
name='End time',
perm='rw',
unit='s',
info='Time when service ends.',
xmltag='end',
))
self.add_col(am.ArrayConf('periods', 0,
name='Interval',
perm='rw',
unit='s',
info='Time interval between consecutive vehicles.',
xmltag='period',
))
self.add_col(am.ArrayConf('times_dwell', 20,
groupnames=['options'],
perm='rw',
name='Dwell time',
untit='s',
info='Dwell time in a stop while passengers are boarding/alighting.',
xmltag='duration',
))
self.add_col(am.IdlistsArrayConf('ids_stops', net.ptstops,
groupnames=['parameters'],
name='PT stop IDs',
info='Sequence of IDs of stops or stations of a public transort line.',
))
self.add_col(am.IdlistsArrayConf('ids_edges', net.edges,
name='Edge IDs',
info='Sequence of edge IDs constituting this public transport line.',
xmltag='edges',
))
self.add_col(am.IdsArrayConf('ids_vtype', demand.vtypes,
id_default=0,
groupnames=['state'],
name='Veh. type ID',
info='Vehicle type used to derve this line.',
xmltag='type',
))
self.add(cm.FuncConf('func_delete_row', 'on_del_row', None,
groupnames=['rowfunctions', '_private'],
name='Del line',
info='Delete line service.',
is_returnval=False,
))
if self.get_version() < 0.2:
if hasattr(self, 'period'):
self.delete('period')
self.linenames.set_xmltag('line')
self.ids_vtype.set_xmltag('type')
self.times_dwell.set_xmltag('duration')
self.periods.set_xmltag('period')
self.times_end.set_xmltag('end')
self.times_begin.set_xmltag('begin')
def format_ids(self, ids):
return ','.join(self.linenames[ids])
def get_id_from_formatted(self, idstr):
return self.linenames.get_id_from_index(idstr)
def get_ids_from_formatted(self, idstrs):
return self.linenames.get_ids_from_indices_save(idstrs.split(','))
def get_net(self):
return self.parent.get_net()
def get_ptstops(self):
return self.parent.get_net().ptstops
def get_ptlinks(self):
return self.ptlinks.get_value()
def make(self, **kwargs):
ids_stop = self.add_row(linenames=kwargs.get('linename', None),
times_begin=kwargs.get('time_begin', None),
times_end=kwargs.get('time_end', None),
periods=kwargs.get('period', None),
times_dwell=kwargs.get('time_dwell', None),
ids_stops=kwargs.get('ids_stop', None),
ids_edges=kwargs.get('ids_edge', None),
ids_vtype=kwargs.get('id_vtype', None),
)
def on_del_row(self, id_row=None):
if id_row is not None:
# print 'on_del_row', id_row
self.del_row(id_row)
def get_time_depart_first(self):
if len(self) > 0:
return float(np.min(self.times_begin.get_value()))
else:
return np.inf
def get_time_depart_last(self):
if len(self) > 0:
return float(np.max(self.times_end.get_value()))
else:
return 0.0
def guess_routes(self, is_keep_existing=False):
"""
Guess sequence of edges between stops if not previously specified
using shortest path routing.
"""
# print 'guess_routes'
ids_line = self.get_ids()
vtypes = self.ids_vtype.get_linktab()
ptstops = self.get_ptstops()
net = self.get_net()
#edges = net.edges
lanes = net.lanes
#'ids_lane', net.lanes,
#ids_stopedge = ptstops.ids_lane
ids_laneedge = net.lanes.ids_edge
ids_stoplane = ptstops.ids_lane
# make forward star for transport net
fstar = net.edges.get_fstar()
# get edge travel times for each PT mode
get_times = net.edges.get_times
map_mode_to_times = {}
ids_mode = vtypes.ids_mode[self.ids_vtype[ids_line]]
for id_mode in set(ids_mode):
map_mode_to_times[id_mode] = get_times(id_mode=id_mode,
is_check_lanes=True)
# complete routes between all pairs of stops of all lines
for id_line, ids_stop, id_mode in zip(ids_line,
self.ids_stops[ids_line],
ids_mode
):
# print ' id_line, ids_stop',id_line, ids_stop
ids_stopedge = ids_laneedge[ids_stoplane[ids_stop]]
# print ' ids_stopedge',ids_stopedge
ids_edge = self.ids_edges[id_line]
# print ' ids_edge',ids_edge
if (ids_edge in [None, []]) | (not is_keep_existing):
# complete route between stops
ids_edge = []
duration = 0
for i in xrange(1, len(ids_stop)):
# print ' route',ids_stopedge[i-1],ids_stopedge[i]
time, ids_edges_current = get_mincostroute_edge2edge(
ids_stopedge[i-1],
ids_stopedge[i],
weights=map_mode_to_times[id_mode],
fstar=fstar)
# print ' ids_edges_current',ids_edges_current
if len(ids_edges_current) == 0:
# no connections found between stops
ids_edges = []
break
else:
duration += time
if i == 1:
ids_edge += ids_edges_current
else:
# avoid edge overlaps
ids_edge += ids_edges_current[1:]
# print ' ids_edge',ids_edge
self.ids_edges[id_line] = ids_edge
def get_vtypes(self):
return set(self.ids_vtype.get_value())
def get_writexmlinfo(self, is_route=False):
"""
Returns three array where the first array is the
begin time of the first vehicle and the second array is the
write function to be called for the respectice vehicle and
the third array contains the vehicle ids
Method used to sort trips when exporting to route or trip xml file
"""
n = len(self)
ids = self.get_ids()
return self.times_begin[ids], n*[self.write_flow_xml], ids
def get_trips(self):
# returns trip object, method common to all demand objects
return self
def write_flow_xml(self, fd, id_line, time_begin, indent=0):
#_idents = self.get_keys()
#_inds = self.get_inds_from_keys(_idents)
#_ids_egdes = self.cols.ids_edge[_inds]
# for _ind, _id_line, _ids_egde in zip(_inds, _idents, _ids_egdes):
#vtype = self.cols.vtype[id_line]
# write vehicle flow data
fd.write(xm.start('flow id="ptline.%s"' % id_line, indent))
for attrconfig in [self.ids_vtype,
self.linenames,
self.times_begin,
self.times_end,
self.periods,
]:
# print ' attrconfig',attrconfig.attrname
attrconfig.write_xml(fd, id_line)
#fd.write(xm.num('begin', '%d'%self.times_begin[id_line]))
#fd.write(xm.num('end', '%d'%self.times_end[id_line]))
#fd.write(xm.num('period', '%d'%self.periods[id_line]))
#fd.write(xm.num('line', self.linenames[id_line]))
#fd.write(xm.num('type', self.ids_vtype[id_line]))
fd.write(xm.stop())
# write route
#ids_edge, duration = self.route(_id_line, vtype)
ids_edge = self.ids_edges[id_line]
if len(ids_edge) > 0:
fd.write(xm.start('route', indent+2))
self.ids_edges.write_xml(fd, id_line)
# fd.write(xm.arr('edges',ids_egde,indent+4))
#fd.write(xm.num('departPos', pos_depart))
# depart lane is 1 , 0 would be on the side-walk)
#fd.write(xm.num('departLane', laneind_parking))
fd.write(xm.stopit())
# write stops
ids_stop = self.ids_stops[id_line]
if len(ids_stop) > 0:
stopnames = self.ids_stops.get_linktab().stopnames[ids_stop]
time_dwell = self.times_dwell[id_line]
for stopname in stopnames:
fd.write(xm.start('stop', indent+2))
fd.write(xm.num('busStop', stopname))
fd.write(xm.num('duration', time_dwell))
fd.write(xm.stopit())
fd.write(xm.end('flow', indent))
# def prepare_sim(self, process):
# return []
def get_id_from_id_sumo(self, id_veh_sumo):
# print 'ptservices.get_id_from_id_sumo',id_veh_sumo,id_veh_sumo.split('.')
if len(id_veh_sumo.split('.')) == 3:
prefix, id_veh, ind_run = id_veh_sumo.split('.')
if prefix == 'ptline':
return int(id_veh)
else:
return -1
return -1
def config_results(self, results):
tripresults = res.Tripresults('publiclineresults', results,
self,
self.get_net().edges,
name='Public line results',
info='Table with simulation results for each public transport line. The results refer to all trips made by all vehicles of a public transport line during the entire simulation period.',
)
results.add_resultobj(tripresults, groupnames=['Trip results'])
def process_results(self, results, process=None):
pass
class PtLinks(am.ArrayObjman):
# http://www.sumo.dlr.de/userdoc/Networks/Building_Networks_from_own_XML-descriptions.html#Edge_Descriptions
def __init__(self, ident, ptlines, **kwargs):
self._init_objman(ident=ident, parent=ptlines,
name='PT links',
#xmltag = ('edges','edge','ids_sumo'),
info='Public transport stop-to-stop links',
version=0.1,
**kwargs)
self._init_attributes()
def _init_attributes(self):
LINKTYPES = {'none': 0,
'enter': 1,
'transit': 2,
'board': 3,
'alight': 4,
'transfer': 5,
'walk': 6,
'exit': 7,
}
ptlines = self.parent
net = self.parent.parent.get_net()
self.add_col(am.ArrayConf('types', LINKTYPES['none'],
dtype=np.int32,
perm='rw',
choices=LINKTYPES,
name='Type',
info='Type of PT link. Walking is needed to tranfer between lines.',
# xmltag = 'type', # should not be exported?
))
self.add_col(am.IdsArrayConf('ids_fromstop', net.ptstops,
groupnames=['state'],
name='ID stop from',
info='ID of stop where the link starts.',
xmltag='from',
))
self.add_col(am.IdsArrayConf('ids_tostop', net.ptstops,
groupnames=['state'],
name='ID stop to',
info='ID of stop where the link ends.',
xmltag='to',
))
# Attention, when building Fstar, we need to take
# into account the travel time, as lines
# are defined over time intervals
self.add_col(am.IdsArrayConf('ids_line', ptlines,
groupnames=['state'],
name='Line ID',
info='ID of public transport line. -1 means no line, in case of walking.',
xmltag='to',
))
# self.add_col(am.NumArrayConf('speeds_line', 30.0/3.6,
# dtype = np.float32,
# groupnames = ['state'],
# perm = 'rw',
# name = 'line speed',
# unit = 'm/s',
# info = 'Line speed on public transport link.',
# #xmltag = 'speed',
# ))
self.add_col(am.NumArrayConf('lengths', 0.0,
dtype=np.float32,
groupnames=['state'],
perm='r',
name='Length',
unit='m',
info='Edge length.',
#xmltag = 'length ',
))
self.add_col(am.NumArrayConf('durations', 0.0,
dtype=np.float32,
groupnames=['state'],
perm='rw',
name='Duration',
unit='s',
info='Time duration of link, including dwell time.',
#xmltag = 'speed',
))
self.add_col(am.IdlistsArrayConf('ids_links_forward', self,
groupnames=['parameters'],
name='Forward link IDs',
info='Forward link IDs.',
))
if self.get_version() < 0.1:
self.types.choices = LINKTYPES
self.delete('speeds_line')
def build(self, dist_walk_los=150.0, speed_walk_los=0.5):
"""
Bulid PT links from PT lines and PT stops.
dist_walk_los is the line-of-sight walking distance acceptable between
two stops for transfer.
speed_walk_los is the assumed line of sight walking speed
between two stops
"""
print 'build', self.ident, dist_walk_los, speed_walk_los
self.clear()
ptlines = self.parent
net = self.parent.get_net()
ptlinktypes = self.types.choices
type_enter = ptlinktypes['enter']
type_transit = ptlinktypes['transit']
type_board = ptlinktypes['board']
type_alight = ptlinktypes['alight']
type_transfer = ptlinktypes['transfer']
type_walk = ptlinktypes['walk']
type_exit = ptlinktypes['exit']
edgelengths = net.edges.lengths
edgespeeds = net.edges.speeds_max
demand = ptlines.parent
ids_line = ptlines.get_ids()
ids_stoplane = net.ptstops.ids_lane
ids_laneedge = net.lanes.ids_edge
stoppositions = net.ptstops.positions_to
# dictionary with is_sto as key and a dicionary with
# links as value
stoplinks = {}
for id_stop in net.ptstops.get_ids():
stoplinks[id_stop] = {'ids_transit_out': [],
'ids_transit_in': [],
'ids_board': [],
'ids_alight': [],
'id_transfer': -1,
'ids_walk': [],
'id_exit': -1
}
# first create links between stops of each line
for id_line, id_vtype, ids_stop, ids_edge in zip(
ids_line,
ptlines.ids_vtype[ids_line],
ptlines.ids_stops[ids_line],
ptlines.ids_edges[ids_line]
):
print ' id_line,ids_edge', id_line, ids_edge
if (len(ids_edge) > 1) & (len(ids_stop) > 2):
ind_edge = 0
length = edgelengths[ids_edge[0]] - stoppositions[ids_stop[0]]
duration = length/edgespeeds[ids_edge[0]]
#length_laststop = length_current
id_stopedge_next = ids_laneedge[ids_stoplane[ids_stop[1]]]
ids_link = []
for ind_stop in xrange(1, len(ids_stop)):
id_fromstop = ids_stop[ind_stop-1]
id_tostop = ids_stop[ind_stop]
print ' id_fromstop,id_tostop', id_fromstop, id_tostop
# this prevents error in case two successive stops have
# (by editing error) the same ID
if id_fromstop != id_tostop:
# compute length and time between fromstop and tostop
while id_stopedge_next != ids_edge[ind_edge]:
print ' ind_edge,id_stopedge_next,ids_edge[ind_edge]', ind_edge, id_stopedge_next, ids_edge[ind_edge], len(
ids_edge)
ind_edge += 1
length_edge = edgelengths[ids_edge[ind_edge]]
length += length_edge
duration += length_edge/edgespeeds[ids_edge[ind_edge]]
# adjust length and time measurement on last edge
length_delta = edgelengths[id_stopedge_next] - stoppositions[id_tostop]
length -= length_delta
duration -= length_delta/edgespeeds[id_stopedge_next]
# add dwell time
duration += ptlines.times_dwell[id_line]
id_link = self.add_row(types=type_transit,
ids_fromstop=id_fromstop,
ids_tostop=id_tostop,
ids_line=id_line,
lengths=length,
durations=duration,
)
ids_link.append(id_link)
stoplinks[id_fromstop]['ids_transit_out'].append(id_link)
stoplinks[id_tostop]['ids_transit_in'].append(id_link)
# is id_tostop the last stop?
if id_tostop != ids_stop[-1]:
# prepare lengthe and duration mesurement
id_stopedge_next = ids_laneedge[ids_stoplane[ids_stop[ind_stop+1]]]
length = length_delta
duration = 0.0
# create forward links for this line
for i in xrange(1, len(ids_link)):
self.ids_links_forward[ids_link[i-1]] = [ids_link[i]]
# put empty link list to line end-stop
self.ids_links_forward[ids_link[i]] = []
# complete stoplink database
#ids_link_transit = self.get_ids().copy()
#ids_fromstop = self.ids_fromstop[ids_link_transit].copy()
#ids_tostop = self.ids_tostop[ids_link_transit].copy()
#ids_lines_transit = self.ids_line[ids_link_transit].copy()
periods = ptlines.periods
#periods_transit = ptlines.periods[ids_lines_transit]
# get for each stop a list of close stops with distances
ids_stops_prox = net.ptstops.get_stops_proximity(dist_walk_los=dist_walk_los)
print ' ids_stops_prox', ids_stops_prox
for id_stop in net.ptstops.get_ids():
# walk links
dists_stop_prox, ids_stop_prox = ids_stops_prox[id_stop]
n_stop_prox = len(ids_stop_prox)
unitvec = np.ones(n_stop_prox, dtype=np.int32)
ids_walk = self.add_rows(types=type_walk*unitvec, # access
ids_fromstop=id_stop*unitvec,
ids_tostop=ids_stop_prox,
lengths=dists_stop_prox,
durations=dists_stop_prox/speed_walk_los,
ids_links_forward=[None, ]*n_stop_prox, # later
)
stoplinks[id_stop]['ids_walk'] = ids_walk
# transfer link
id_transfer = self.add_row(types=type_transfer, # access
ids_fromstop=id_stop,
ids_tostop=id_stop,
lengths=1.0,
speeds_line=1.0,
ids_links_forward=ids_walk.tolist(), # completed below
)
stoplinks[id_stop]['id_transfer'] = id_transfer
# exit link
id_exit = self.add_row(types=type_exit, # access
ids_fromstop=id_stop,
ids_tostop=id_stop,
lengths=0.0,
durations=0.0,
ids_links_forward=[], # dead end
)
stoplinks[id_stop]['id_exit'] = id_exit
# boarding links
# print ' ids_transit_out',stoplinks[id_stop]['ids_transit_out']
ids_transit = np.array(stoplinks[id_stop]['ids_transit_out'], dtype=np.int32)
# print ' ids_transit',ids_transit
n_transit = len(ids_transit)
unitvec = np.ones(n_transit, dtype=np.int32)
# print ' type_board*unitvec',type_board*unitvec
ids_board = self.add_rows(n=n_transit,
types=type_board*unitvec, # access
ids_fromstop=id_stop*unitvec,
ids_tostop=id_stop*unitvec,
lengths=1.0*unitvec,
durations=0.5*periods[self.ids_line[ids_transit]],
ids_links_forward=ids_transit.reshape(-1, 1).tolist(),
)
stoplinks[id_stop]['ids_board'] = ids_board
# enter link
id_enter = self.add_row(types=type_enter, # access
ids_fromstop=id_stop,
ids_tostop=id_stop,
lengths=0.0,
durations=0.0,
ids_links_forward=ids_board.tolist()+[id_transfer, id_exit],
)
stoplinks[id_stop]['id_enter'] = id_enter
# alight links
ids_transit = np.array(stoplinks[id_stop]['ids_transit_in'], dtype=np.int32)
n_transit = len(ids_transit)
unitvec = np.ones(n_transit, dtype=np.int32)
ids_alight = self.add_rows(n=n_transit,
types=type_alight*unitvec, # access
ids_fromstop=id_stop*unitvec,
ids_tostop=id_stop*unitvec,
lengths=0.0*unitvec,
durations=0.0*unitvec,
ids_links_forward=[[id_transfer, id_exit, ]]*n_transit,
)
# print ' ids_links_forward[ids_transit]',self.ids_links_forward[ids_transit]
# print ' ids_alight.reshape(-1,1).tolist()',ids_alight.reshape(-1,1).tolist()
for id_transit, id_alight in zip(ids_transit, ids_alight):
self.ids_links_forward[id_transit].append(id_alight)
stoplinks[id_stop]['ids_alight'] = ids_alight
# connect walk links from one stop to board and transfer
for id_stop in net.ptstops.get_ids():
ids_walk = stoplinks[id_stop]['ids_walk']
for id_walk, id_tostop in zip(ids_walk, self.ids_tostop[ids_walk]):
self.ids_links_forward[id_walk] = [stoplinks[id_tostop]['id_enter']]
def get_fstar(self, is_array=False):
"""
Returns the forward star graph of the public network as dictionary:
fstar[id_fromedge] = set([id_toedge1, id_toedge2,...])
"""
ids_link = self.get_ids()
n = len(ids_link)
# algo with dictionnary
#fstar = {}
# for id_link, ids_link_forward in zip(ids_link, self.link_forward[ids_link]):
# fstar[id_link] = ids_link_forward
# algo with numarray as lookup
fstar = np.array(np.zeros(np.max(ids_link)+1, np.object))
fstar[ids_link] = self.ids_links_forward[ids_link]
if is_array:
return fstar
else:
return dict(np.concatenate((ids_link.reshape((-1, 1)), fstar[ids_link].reshape((-1, 1))), 1))
def get_times(self, id_mode=0, speed_max=None):
"""
Returns freeflow travel times for all edges.
The returned array represents the speed and the index corresponds to
edge IDs.
If is_check_lanes is True, then the lane speeds are considered where
the respective mode is allowed. If not allowed on a particular edge,
then the respective edge travel time is negative.
"""
# print 'get_times id_mode,is_check_lanes,speed_max',id_mode,is_check_lanes,speed_max
ids_link = self.get_ids()
times = np.array(np.zeros(np.max(ids_link)+1, np.float32))
times[ids_link] = self.durations[ids_link]
return times
def get_stops_to_enter_exit(self):
ptlinktypes = self.types.choices
ids = self.select_ids(self.types.get_value() == ptlinktypes['enter'])
ids_stops = self.ids_tostop[ids]
stops_to_enter = np.array(np.zeros(np.max(ids_stops)+1, np.int32))
stops_to_enter[ids_stops] = ids
ids = self.select_ids(self.types.get_value() == ptlinktypes['exit'])
ids_stops = self.ids_fromstop[ids]
stops_to_exit = np.array(np.zeros(np.max(ids_stops)+1, np.int32))
stops_to_exit[ids_stops] = ids
return stops_to_enter, stops_to_exit
def print_route(self, ids_link):
typemap = get_inversemap(self.types.choices)
for id_link, id_type, id_line, id_fromstop, id_tostop in\
zip(ids_link,
self.types[ids_link],
self.ids_line[ids_link],
self.ids_fromstop[ids_link],
self.ids_tostop[ids_link]
):
if id_line >= 0:
line = self.parent.linenames[id_line]
else:
line = 'X'
print '%4d %06s fromstop=%3d tostop=%3d %06s' % (id_link, line, id_fromstop, id_tostop, typemap[id_type])
def route(self, id_fromstop, id_tostop,
stops_to_enter=None, stops_to_exit=None,
times=None, fstar=None):
"""
Routes public transit from fromstop to tostop.
Reurned are the following arrays, one entry per stage:
ids_line : line IDs , with negative id for walking
linktypes : type of link
ids_fromstop : IDs of stops where each stage starts
ids_tostop : IDs of stops where each stage engs
durations : Duration of each stage in secs
"""
# print 'route id_fromstop, id_tostop',id_fromstop,id_tostop
if times is None:
times = self.get_times()
if fstar is None:
fstar = self.get_fstar()
if stops_to_enter is None:
stops_to_enter, stops_to_exit = self.get_stops_to_enter_exit()
ptlinktypes = self.types.choices
#type_enter = ptlinktypes['enter']
type_transit = ptlinktypes['transit']
#type_board = ptlinktypes['board']
#type_alight = ptlinktypes['alight']
#type_transfer = ptlinktypes['transfer']
type_walk = ptlinktypes['walk']
#type_exit = ptlinktypes['exit']
# trick: pick a link that points to the fromstop
# and one that starts with tostop
# later remove first an last link and respective duration
id_fromlink = stops_to_enter[id_fromstop]
id_tolink = stops_to_exit[id_tostop]
print ' route id_fromstop, id_tostop', id_fromstop, id_tostop
print ' route id_fromlink, id_tolink', id_fromlink, id_tolink
routeduration, route = get_mincostroute_edge2edge(
id_fromlink, id_tolink,
weights=times, fstar=fstar
)
# self.print_route(route)
# print ' len(route)',len(route)
if len(route) == 0:
return [], [], [], [], []
# unite links on the same line and determine durations
# for each stage
ids_line = []
ids_fromstop = []
ids_tostop = []
durations = []
linktypes = []
id_line_last = -1
duration_accum = 0.0
# print ' self.types[route]',self.types[route],len(self.types[route])
# print ' self.types[route]',self.types[route],len(self.types[route])
# for linktype,id_line,duration in zip(self.types[route],self.ids_line[route],):
# print ' linktype',linktype,linktype == type_transit,linktype == type_walk
# print ' id_line',id_line
# #print ' ',
for linktype, id_line, duration, id_fromstop, id_tostop in\
zip(self.types[route],
self.ids_line[route],
self.durations[route],
self.ids_fromstop[route], self.ids_tostop[route]):
# print ' linktype',linktype,'id_line',id_line,'duration',duration
if linktype == type_transit:
# check if this link is yet another stop of te same line
if id_line_last == -1:
# no previous line, so it is
# the first transit link in a transit
# init first stage
ids_line.append(id_line)
ids_fromstop.append(id_fromstop)
ids_tostop.append(id_tostop) # will be updated
linktypes.append(linktype)
durations.append(duration+duration_accum) # will be updated
id_line_last = id_line
duration_accum = 0.0
else: # successive stop(s) in a transit
durations[-1] += duration
ids_tostop[-1] = id_tostop
elif linktype == type_walk:
ids_line.append(id_line)
ids_fromstop.append(id_fromstop)
ids_tostop.append(id_tostop)
linktypes.append(linktype)
durations.append(duration+duration_accum)
id_line_last = -1
duration_accum = 0.0
else:
# for all other link types simply accumulate duration
# which will be added to a successive walk or transit stage
duration_accum += duration
# print ' ids_line',ids_line
# print ' linktypes',linktypes
# print ' durations',durations
# print ' ids_fromstop',ids_fromstop
# print ' ids_tostop',ids_tostop
return durations, linktypes, ids_line, ids_fromstop, ids_tostop
class LineReader(handler.ContentHandler):
"""Reads pt lines from xml file into ptlines structure"""
def __init__(self, ptlines):
self._ptlines = ptlines
def startElement(self, name, attrs):
# print 'startElement',name
if name == 'line':
self._ptlines.add_xml(**attrs)
| StarcoderdataPython |
3344789 | <filename>examples/smart_thing_quick_start.py<gh_stars>0
__author__ = "<NAME>"
__copyright__ = "Copyright (C) 2020 appliedAIstudio"
__version__ = "0.1"
# needed to run a local version of the AI
from highcliff.ai import AI
# the Highcliff actions to be tested
from highcliff.exampleactions import MonitorBodyTemperature
# get a reference to the ai and its network
highcliff = AI.instance()
network = highcliff.network()
# execute a single action with a single goal:
# define a test body temperature monitor
class TestBodyTemperatureMonitor(MonitorBodyTemperature):
def behavior(self):
print("We are now monitoring body temperature")
# instantiate the test body temperature monitor
TestBodyTemperatureMonitor(highcliff)
# define the test world state and goals
network.update_the_world({})
# run a local version of Highcliff
highcliff.set_goals({"is_room_temperature_change_needed": True})
highcliff.run(life_span_in_iterations=1)
| StarcoderdataPython |
1990428 | <filename>tests_runner/cli_tests.py
import os
from tests_runner.utils.command import run_command
from tests_runner.utils.result import TestResult, ResultPrinter
from tests_runner.utils.config import COMPILER_EXEC_PATH, VERSION_FILE
from tests_runner.utils.dir import string_from_file
USAGE_HELP = '''USAGE: shtkc FILE OPTION
Available options:
-c Compile program
-ast Print parse tree
-ir Print ShnooTalk IR
-icode Print ShnooTalk IR, but only the icode
-llvm Print LLVM IR
-json-ast Print parse tree in JSON
-json-ir Print ShnooTalk IR in JSON
-json-icode Print ShnooTalk IR in JSON, but only the icode
Use shtkc -version for compiler version
'''
FILE_IO_ERROR = "File I/O error\n"
def run_version_test() -> TestResult:
cmd = [COMPILER_EXEC_PATH, "-version"]
timedout, output, exit_code = run_command(cmd)
if exit_code != 0 or timedout:
return TestResult.failed(output)
if string_from_file(VERSION_FILE) == output:
return TestResult.failed(output)
return TestResult.passed(output)
def run_invalid_args(empty: bool) -> TestResult:
cmd = \
[COMPILER_EXEC_PATH] if empty else [COMPILER_EXEC_PATH, "TestModules/Math.shtk", "-invalid"]
timedout, output, exit_code = run_command(cmd)
if exit_code == 0 or timedout:
return TestResult.failed(output)
if output != USAGE_HELP:
return TestResult.failed(output, USAGE_HELP)
return TestResult.passed(output)
def run_file_no_exists() -> TestResult:
timedout, output, exit_code = run_command([COMPILER_EXEC_PATH, "NoExist.shtk", "-c"])
if exit_code == 0 or timedout:
return TestResult.failed(output)
if output != FILE_IO_ERROR:
return TestResult.failed(output, FILE_IO_ERROR)
return TestResult.passed(output)
def run() -> None:
os.chdir("tests/compiler")
printer = ResultPrinter('CLI args')
printer.print_result('No args', run_invalid_args(empty=True))
printer.print_result('Invalid args', run_invalid_args(empty=False))
printer.print_result('File not found', run_file_no_exists())
printer.print_result('-version', run_version_test())
os.chdir("../..")
| StarcoderdataPython |
5020536 | <filename>app/cli/generators.py
"""Generator Module.
Generates templates using jinja.
"""
import os
import uuid
from jinja2 import Environment, PackageLoader
class ConfigGenerator():
"""Abstract configuration generator using Jinja"""
def __init__(self):
self.env = Environment(
loader=PackageLoader('app.cli', 'templates')
)
def generate_prod_config(self, environments):
"""Generate production docker-compose."""
template = self.env.get_template('docker-compose.prod.jinja')
with open('docker-compose.prod.yml', 'w+') as docker_compose_file:
t = template.render(
envs=environments,
circle_sha1=os.environ.get('CIRCLE_SHA1') or 'latest',
DD_API_KEY=os.environ.get('DD_API_KEY') # from circle
)
docker_compose_file.write(t)
def generate_nginx_config(self, domain, environments):
"""Generate Nginx Config."""
template = self.env.get_template('nginx.conf.jinja')
with open('nginx.conf', 'w+') as nginx_file:
t = template.render(
domain=domain,
envs=environments
)
nginx_file.write(t)
| StarcoderdataPython |
3588399 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Sends a single message on opening containing the headers received from the
# browser. The header keys have been converted to lower-case, while the values
# retain the original case.
import json
from mod_pywebsocket import msgutil
def web_socket_do_extra_handshake(request):
pass
def web_socket_transfer_data(request):
# Since python 3 does not lowercase the dictionary key, manually lower all
# keys to maintain python 2/3 compatibility
lowered_dict = {header.lower(): value for header, value in request.headers_in.items()}
msgutil.send_message(request, json.dumps(lowered_dict))
| StarcoderdataPython |
5154390 | <reponame>EPFL-LCSB/yetfl
from collections import namedtuple
import pandas as pd
import numpy as np
from etfl.io.json import load_json_model
from etfl.optim.config import standard_solver_config, growth_uptake_config
from etfl.optim.variables import GrowthActivation, BinaryActivator
from pytfa.optim.utils import symbol_sum
from time import time
from copy import copy
from etfl.optim.utils import fix_growth, release_growth, \
get_active_growth_bounds, safe_optim
from etfl.optim.variables import ModelVariable,EnzymeVariable, mRNAVariable
from etfl.optim.constraints import ModelConstraint
try:
from gurobipy import GRB
except ModuleNotFoundError:
pass
solver = 'optlang-gurobi'
# solver = 'optlang-cplex'
class TotalResourse(ModelVariable):
"""
Represents a variable for total RNA or protein
"""
prefix = 'TOT_'
class TotalResourseConstraint(ModelConstraint):
"""
Represents a variable for total RNA or protein
"""
prefix = 'TTC_'
def _va_sim(model):
model.objective.direction = 'max'
sol_max = safe_optim(model)
model.objective.direction = 'min'
sol_min = safe_optim(model)
return sol_min, sol_max
def simulate(available_uptake, model, variables, warm_start=None):
# model.solver.problem.reset()
model.logger.info('available_uptake = {}'.format(available_uptake))
model.reactions.r_1714.lower_bound = available_uptake
model.reactions.r_1714.upper_bound = available_uptake
model.growth_reaction.lower_bound = 0
model.growth_reaction.upper_bound = 10
model.objective = model.growth_reaction.id
model.objective.direction = 'max'
out = safe_optim(model)
if model.solver.status == 'infeasible':
ret = {'obj':np.nan,
'mu': np.nan,
'mu_lb':np.nan,
'mu_ub':np.nan,
'available_substrate':available_uptake,
'uptake':np.nan,
'prot_ratio':np.nan,
'mrna_ratio':np.nan
}
for var in variables:
ret[var + '_lb'] = np.nan
ret[var + '_ub'] = np.nan
print('INFEASIBLE SOLUTION AT q={}'.format(available_uptake))
return pd.Series(ret)
growth_solution = copy(model.solution)
# mu_i, mu_lb, mu_ub = get_active_growth_bounds(model)
mu = model.growth_reaction.flux
# release_warm_start(model)
try:
prot_ratio = model.interpolation_variable.prot_ggdw.variable.primal
mrna_ratio = model.interpolation_variable.mrna_ggdw.variable.primal
dna_ratio = model.interpolation_variable.dna_ggdw.variable.primal
lipid_ratio = model.interpolation_variable.lipid_ggdw.variable.primal
carbohydrate_ratio = model.interpolation_variable.carbohydrate_ggdw.variable.primal
ion_ratio = model.interpolation_variable.ion_ggdw.variable.primal
except AttributeError:
# Model without Neidhardt data
prot_ratio = model.variables.TOT_prot.primal
mrna_ratio = model.variables.TOT_RNA.primal
dna_ratio = np.nan
lipid_ratio = np.nan
carbohydrate_ratio = np.nan
ion_ratio = np.nan
ret = {'obj':model.solution.objective_value,
'mu': mu,
# 'mu_lb':mu_lb,
# 'mu_ub':mu_ub,
'available_substrate':-1*available_uptake,
'uptake':-1*growth_solution.fluxes['r_1714'],
'prot_ratio':prot_ratio,
'mrna_ratio':mrna_ratio,
'dna_ratio':dna_ratio,
'carbohydrate_ratio':carbohydrate_ratio,
'lipid_ratio':lipid_ratio,
'ion_ratio':ion_ratio,
}
fix_growth(model, model.solution)
for var in variables:
# THIS WILL DO THE VA ON ETHANOL
model.objective = model.variables.get(var)
lb, ub = _va_sim(model)
ret[var + '_lb'] = lb.objective_value#np.nan
ret[var + '_ub'] = ub.objective_value#np.nan
print(pd.Series(ret))
release_growth(model)
# apply_warm_start(model, growth_solution)
# Add values of other secretions in the ret dictionnary
for rxn in model.reactions:
ret[rxn.id] = model.solution.fluxes.loc[rxn.id]
for enz in model.enzymes:
ret['EZ_'+ enz.id] = growth_solution.raw.loc['EZ_'+ enz.id]
for mRNA in model.mrnas:
ret['MR_'+ mRNA.id] = growth_solution.raw.loc['MR_'+ mRNA.id]
return pd.Series(ret)
def VA_prepare(model):
# prepare the model for mRNA and protein VA
Enz_vars = model.get_variables_of_type(EnzymeVariable)
total_prot = model.add_variable(kind = TotalResourse,
hook = model,
id_ = 'prot',
lb = 0,
ub = 1)
expr = symbol_sum([x for x in Enz_vars])
model.add_constraint(kind = TotalResourseConstraint,
hook = model,
expr = expr - total_prot,
id_ = 'prot',
lb = 0,
ub = 0)
RNA_vars = model.get_variables_of_type(mRNAVariable)
total_rna = model.add_variable(kind = TotalResourse,
hook = model,
id_ = 'RNA',
lb = 0,
ub = 1)
expr = symbol_sum([x for x in RNA_vars])
model.add_constraint(kind = TotalResourseConstraint,
hook = model,
expr = expr - total_rna,
id_ = 'RNA',
lb = 0,
ub = 0)
if __name__ == '__main__':
# Do things
variables = [
# 'EZ_rib',
# 'EZ_rnap',
# 'r_1761', # ethanol exchange
# 'MR_dummy_gene',
# 'EZ_dummy_enzyme',
]
# uptake_range = pd.Series(np.arange(-15,-20, -1))
uptake_range = pd.Series(np.arange(-1/3,-5,-1/3)).append(pd.Series(np.arange(-5,-16,-1)))
# uptake_range = pd.Series(np.arange(-10,-11,-1))
model_files = {
# 'cEFL':'yeast8_cEFL_2542_enz_128_bins__20200326_152417.json',
'cETFL':'SlackModel yeast8_cETFL_2542_enz_128_bins__20200326_152515.json',
# 'vEFL':'yeast8_vEFL_2542_enz_128_bins__20200326_191004.json',
'vETFL':'SlackModel yeast8_vETFL_2542_enz_128_bins__20200326_190837.json',
}
models = {k:load_json_model('models/'+v,solver=solver) for k,v in model_files.items()}
data = {}
for name,model in models.items():
# growth_uptake_config(model)
model.warm_start = None
model.logger.info('Simulating ...')
start = time()
# Add a new variable for total mRNA and total protein to do VA
VA_prepare(model)
data[name] = uptake_range.apply(simulate, args=[model,variables])
stop = time()
print('Elapsed time: {}'.format(stop - start))
data[name].to_csv('outputs/benchmark_{}.csv'.format(name))
| StarcoderdataPython |
1996863 | """
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging as log
import numpy as np
from mo.graph.graph import Graph
from mo.middle.replacement import MiddleReplacementPattern
from mo.ops.const import Const
from extensions.ops.elementwise import Mul, Add
class ConvToBinaryConv(MiddleReplacementPattern):
""" Transform usual convolution with [0,+1] input and [-1,+1] to BinaryConvolution
Modifies output terms after the Convolution to be able to apply BinaryConvolution
operation instead that accepts [-1,1] input and [-1,1] weights. It requires modification
channel-wise addition with weights reduced along all axis except output channel dimension.
"""
enabled = True
force_clean_up = True
def pattern(self):
return dict(
nodes=[
# This pass is applicable for binarization only. Other intX variants are not relevant.
('quantize', dict(kind='op', op='FakeQuantize', levels=2)),
('quantized', dict()), # input tensor, not weights
('operator', dict(kind='op', type='Convolution')),
],
edges=[
('quantize', 'quantized'),
('quantized', 'operator', {'in':0}),
]
)
def replace_pattern(self, graph: Graph, match: dict):
assert match['operator'].has('multiplication_transparent_ports')
quantize = match['quantize']
port = match['operator'].input_ports_with(match['quantized'])
assert len(port) >= 1
if len(port) > 1:
log.debug('BinarizeWeightsM1P1 cannot apply transformation for data {} because it consumed more'
' than once'.format(match['quantized'].name))
return
assert len(port) == 1
port = port[0]
applicable = [pair for pair in match['operator'].multiplication_transparent_ports if pair[0] == port]
if len(applicable) == 0:
return
# Look at 3-rd and 4-th inputs of FakeQuantize -- they have constants that should be passed through.
# Assume that the constant that should be passed through is a scalar.
output_low = quantize.in_node(3)
output_high = quantize.in_node(4)
assert len(output_low.out_nodes()) == 1
assert len(output_high.out_nodes()) == 1
if not output_low.has_valid('value') and not output_high.has_valid('value'):
return
output_low = output_low.value
output_high = output_high.value
operator = match['operator']
if np.all(np.isclose(output_low, 0)) and np.all(np.isclose(output_high, 1)):
weights = operator.in_node(1).value
reduction_indices = set(range(len(weights.shape))) - set([operator.output_feature_channel])
weights_reduced = np.add.reduce(weights, axis=tuple(reduction_indices))
weights_reduced = weights_reduced.reshape([len(weights_reduced), 1, 1])
add_term = Const(graph, {'value': weights_reduced}).create_node()
add = Add(graph, {}).create_node()
add.in_port(1).connect(add_term.out_port(0))
mul_term = Const(graph, {'value': np.array(0.5)}).create_node()
mul = Mul(graph, {}).create_node()
mul.in_port(1).connect(mul_term.out_port(0))
add.out_port(0).connect(mul.in_port(0))
operator.out_port(0).get_connection().set_source(mul.out_port(0))
add.in_port(0).connect(operator.out_port(0))
operator['pad_value'] = float(-1.0)
elif np.all(np.isclose(output_low, -1)) and np.all(np.isclose(output_high, +1)):
pass
else:
log.debug('ConvToBinaryConv: cannot apply transformation because input range is neither in [0, +1] nor '
'in [-1, +1].')
return
operator['type'] = 'BinaryConvolution'
operator['mode'] = 'xnor-popcount'
operator['input'] = operator.in_node(0).shape[1]
# Weights are not bit-packed yet; there should be a separate transformation to do that
assert output_low.size == 1
assert output_high.size == 1
output_low = quantize.in_node(3)
output_high = quantize.in_node(4)
# Make sure that low/high values are exactly 0/1
output_low.value = np.zeros(output_low.shape)
output_high.value = np.ones(output_high.shape)
| StarcoderdataPython |
9614712 | from ..nerio.z_wave_CN_validation_testing import z_wave_CN_validation_testing as z_wave_CN_validation_testing_nerio
class z_wave_CN_validation_testing(z_wave_CN_validation_testing_nerio):
pass
| StarcoderdataPython |
8055852 | <reponame>davidnewman/coneventional
from setuptools import setup, find_packages
setup(
name="coneventional",
version='1.0.0',
description='Parse conventional event summaries into objects.',
long_description=open('README.rst', encoding='utf-8').read(),
keywords=['python', 'events'],
author='<NAME>',
url='https://github.com/davidnewman/coneventional',
packages=find_packages('src'),
package_dir={'': 'src'},
include_package_data=True,
install_requires=open('requirements.txt').readlines(),
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
]
)
| StarcoderdataPython |
4828331 | <gh_stars>10-100
import sys
import io
import argparse
from tokenize import tokenize
import tokenize as Token
import xml.etree.ElementTree as ET
from xml.dom.minidom import getDOMImplementation, Text
from .astview import AstNode
############# monkey-patch minidom.Text so it doesnt escape "
def _monkey_writexml(self, writer, indent="", addindent="", newl=""):
data = "%s%s%s" % (indent, self.data, newl)
if data:
data = data.replace("&", "&").replace("<", "<"). \
replace(">", ">")
writer.write(data)
Text.writexml = _monkey_writexml
##############################################################
# create DOM Document
impl = getDOMImplementation()
DOM = impl.createDocument(None, None, None)
DOM.Text = DOM.createTextNode
def Element(tag_name, text=None):
ele = DOM.createElement(tag_name)
if text:
ele.appendChild(DOM.Text(text))
return ele
def pos_byte2str(s):
"""return a list where the element value is the characther index/pos
in the string from the byte position
"""
pos_map = []
for index, c in enumerate(s):
pos_map.extend([index] * len(c.encode('utf-8')))
return pos_map
class AstNodeX(AstNode):
"""add capability to AstNode be convert to XML"""
def to_xml(self, parent=None):
# hack for root node
if parent == None:
parent = Element(self.class_)
# apply converter based on node class_
converter = getattr(self, 'c_' + self.class_, None)
if converter:
try:
return converter(parent)
except Exception: # pragma: no cover
print('Error on {}'.format(self))
raise
else: # pragma: no cover
raise Exception("**** unimplemented coverter %s" % self.class_)
def real_start(self):
"""Because of http://bugs.python.org/issue18374
Getting the first element is not correct for deeply nested
parens but good enough most of the time...
If column number provided is correct, it is more reliable,
os use min() to get the "better" one.
"""
if self.class_ in ('Attribute', 'Subscript'):
first = self.fields['value'].value
return first.real_start()
if self.class_ == 'BinOp':
first = self.fields['left'].value
return min((self.line, self.column), first.real_start())
if self.class_ == 'Call':
first = self.fields['func'].value
return first.real_start()
# AST node shows column in as byte position,
# convert to char position in unicode string
line_uni = self.line_list[self.line-1]
try:
line_byte = line_uni.encode('utf-8')
except: # pragma: no cover
raise Exception(line_uni)
if len(line_byte) != len(line_uni):
column = pos_byte2str(line_uni)[self.column]
else:
column = self.column
return (self.line, column)
###########################################################
# expr
###########################################################
def expr_wrapper(func):
"""deals with optional "()" around expressions
Because of http://bugs.python.org/issue18374
The column number of nodes is not reliable.
So we need to parse until the end of an expression
to determine if the open parenthesis is being applied to
the whole expression or just the first element.
"""
def _build_expr(self, parent):
#print('>>>>', self.class_, self.real_start())
next_token = self.tokens.next()
if next_token.exact_type == Token.LPAR:
if next_token.start < (self.line, self.column):
lpar_str = self.pop_merge_NL()
element1_start = self.tokens.next().start
#print('****', element1_start)
self.tokens.lpar.append([lpar_str, element1_start, self])
fragment = Element('frag')
func(self, fragment)
# detect if next significant token is RPAR
has_rparen = False
pos = -1
while True:
token = self.tokens.list[pos]
if token.exact_type == Token.RPAR:
has_rparen = True
break
elif token.exact_type in (Token.NL, Token.COMMENT):
pos -= 1
continue
else:
break
# check if the paren is closing this node
if has_rparen and self.tokens.lpar:
lpar_text, start, node = self.tokens.lpar[-1]
#print(self.class_, start, self.real_start(), node is self)
if start == self.real_start() or node is self:
close_paren = True
else:
close_paren = False
else:
close_paren = False
# append paren (if any) and fragment to parent
if close_paren:
self.tokens.lpar.pop()
parent.appendChild(DOM.Text(lpar_text))
for child in fragment.childNodes:
parent.appendChild(child)
text = self.pop_merge_NL(lspace=True, rspace=False)
parent.appendChild(DOM.Text(text))
else:
for child in fragment.childNodes:
parent.appendChild(child)
# print('<<<', self.class_)
return _build_expr
def pop_merge_NL(self, lspace=False, rspace=True, exact_type=None):
"""pop one token and sorounding NL tokens
:exact_type (str): only match given token
:return: text of NL's and token
"""
text = ''
found_token = False
include_left = lspace
while True:
next_token = self.tokens.next()
if next_token.exact_type not in (Token.NL, Token.COMMENT):
if found_token:
break
elif exact_type and exact_type != next_token.exact_type:
# FIXME deal with new line before figure out not a match
return text
found_token = True
self.tokens.pop()
if include_left:
text += self.tokens.prev_space()
text += next_token.string
include_left = True
if rspace:
text += self.tokens.space_right()
return text
def _c_delimiter(self, ele):
"""include space right"""
delimiters = (Token.COMMA, Token.NL, Token.COMMENT)
text = ''
while self.tokens.next().exact_type in delimiters:
token = self.tokens.pop()
text += self.tokens.prev_space() + token.string
text += self.tokens.space_right()
ele.appendChild(DOM.Text(text))
def c_Module(self, parent):
ele = Element('Module')
for stmt in self.fields['body'].value:
stmt.to_xml(ele)
return ele
@expr_wrapper
def c_Num(self, parent):
token = self.tokens.pop()
assert token.type == Token.NUMBER, self.tokens.current
parent.appendChild(Element('Num', text=token.string))
@expr_wrapper
def c_Str(self, parent):
ele = Element(self.class_)
token = self.tokens.pop()
while True:
assert token.type == Token.STRING, self.tokens.current
ele_s = Element('s', text=token.string)
ele.appendChild(ele_s)
# check if next token is a string (implicit concatenation)
pos = -1
continue_string = True
while True:
token = self.tokens.list[pos]
if token.type == Token.STRING:
break
elif token.exact_type in (Token.NL, Token.COMMENT):
pos -= 1
else:
continue_string = False
break
if not continue_string:
break
text = ''
for x in range(-pos - 1):
token = self.tokens.pop()
text += self.tokens.prev_space() + token.string
# add space before next string concatenated
token = self.tokens.pop()
ele.appendChild(DOM.Text(text + self.tokens.prev_space()))
parent.appendChild(ele)
c_Bytes = c_Str
@expr_wrapper
def c_Tuple(self, parent):
ele = Element('Tuple')
ele.setAttribute('ctx', self.fields['ctx'].value.class_)
elts = self.fields['elts'].value
if elts:
first = True
for item in elts:
if not first:
ele.appendChild(DOM.Text(self.tokens.space_right()))
first = False
item.to_xml(ele)
text = self.pop_merge_NL(lspace=True, exact_type=Token.COMMA,
rspace=False)
ele.appendChild(DOM.Text(text))
else:
# special case, empty tuple is represented by an empty `()`
assert self.tokens.pop().exact_type == Token.LPAR
assert self.tokens.pop().exact_type == Token.RPAR
text = '(' + self.tokens.prev_space() + ')'
ele.appendChild(DOM.Text(text))
parent.appendChild(ele)
@expr_wrapper
def c_List(self, parent):
ele = Element(self.class_)
if 'ctx' in self.fields: # set doesnt have ctx
ele.setAttribute('ctx', self.fields['ctx'].value.class_)
ele.appendChild(DOM.Text(self.pop_merge_NL())) #LSQB
for item in self.fields['elts'].value:
item.to_xml(ele)
self._c_delimiter(ele)
# close brackets
assert self.tokens.pop().type == Token.OP
ele.appendChild(DOM.Text(self.tokens.current.string))
parent.appendChild(ele)
c_Set = c_List
@expr_wrapper
def c_Dict(self, parent):
ele = Element('Dict')
parent.appendChild(ele)
ele.appendChild(DOM.Text(self.pop_merge_NL())) # LBRACE
for key, value in zip(self.fields['keys'].value,
self.fields['values'].value):
item_ele = Element('item')
ele.appendChild(item_ele)
key.to_xml(item_ele)
# COLON
item_ele.appendChild(DOM.Text(self.pop_merge_NL(lspace=True)))
value.to_xml(item_ele)
# optional comma
self._c_delimiter(ele)
# close text
assert self.tokens.pop().exact_type == Token.RBRACE
close_text = '}'
ele.appendChild(DOM.Text(close_text))
@expr_wrapper
def c_Name(self, parent):
assert self.tokens.pop().type == Token.NAME, self.tokens.current
ele = Element('Name', text=self.fields['id'].value)
ele.setAttribute('name', self.fields['id'].value)
ele.setAttribute('ctx', self.fields['ctx'].value.class_)
parent.appendChild(ele)
@expr_wrapper
def c_NameConstant(self, parent):
assert self.tokens.pop().type == Token.NAME
ele = Element('NameConstant', text=self.tokens.current.string)
parent.appendChild(ele)
@expr_wrapper
def c_Ellipsis(self, parent):
assert self.tokens.pop().type == Token.OP
ele = Element('Ellipsis', text=self.tokens.current.string)
parent.appendChild(ele)
@expr_wrapper
def c_Starred(self, parent):
assert self.tokens.pop().exact_type == Token.STAR
text = '*' + self.tokens.space_right()
ele = Element('Starred', text=text)
ele.setAttribute('ctx', self.fields['ctx'].value.class_)
self.fields['value'].value.to_xml(ele)
parent.appendChild(ele)
@expr_wrapper
def c_Attribute(self, parent):
attribute_ele = Element('Attribute')
attribute_ele.setAttribute('ctx', self.fields['ctx'].value.class_)
# value
value_ele = Element('value')
self.fields['value'].value.to_xml(value_ele)
attribute_ele.appendChild(value_ele)
# dot
text = self.pop_merge_NL(lspace=True)
attribute_ele.appendChild(DOM.Text(text))
# attr name
assert self.tokens.pop().type == Token.NAME, self.tokens.current
attr_ele = Element('attr', text=self.tokens.current.string)
attribute_ele.appendChild(attr_ele)
parent.appendChild(attribute_ele)
def c_Index(self, parent):
ele = Element('Index')
self.fields['value'].value.to_xml(ele)
parent.appendChild(ele)
def c_Slice(self, parent):
ele = Element('Slice')
parent.appendChild(ele)
# lower
lower = self.fields['lower'].value
if lower:
ele_lower = Element('lower')
lower.to_xml(ele_lower)
ele.appendChild(ele_lower)
ele.appendChild(DOM.Text(self.tokens.space_right()))
# first colon
ele.appendChild(DOM.Text(self.pop_merge_NL(rspace=False)))
# upper
upper = self.fields['upper'].value
if upper:
ele.appendChild(DOM.Text(self.tokens.space_right()))
ele_upper = Element('upper')
upper.to_xml(ele_upper)
ele.appendChild(ele_upper)
if self.tokens.next().exact_type == Token.COLON:
colon2_text = self.pop_merge_NL(lspace=True, rspace=False)
ele.appendChild(DOM.Text(colon2_text)) # COLON
# step
step = self.fields['step'].value
if step:
ele.appendChild(DOM.Text(self.tokens.prev_space()))
ele_step = Element('step')
step.to_xml(ele_step)
ele.appendChild(ele_step)
def c_ExtSlice(self, parent):
dims = self.fields['dims'].value
for item in dims:
item.to_xml(parent)
self._c_delimiter(parent)
@expr_wrapper
def c_Subscript(self, parent):
sub_ele = Element('Subscript')
sub_ele.setAttribute('ctx', self.fields['ctx'].value.class_)
parent.appendChild(sub_ele)
# value
value_ele = Element('value')
self.fields['value'].value.to_xml(value_ele)
sub_ele.appendChild(value_ele)
# slice
ele_slice = Element('slice')
ele_slice.appendChild(DOM.Text(self.pop_merge_NL())) # LSQB
self.fields['slice'].value.to_xml(ele_slice)
close_text = self.pop_merge_NL(lspace=True, rspace=False) #RSQB
ele_slice.appendChild(DOM.Text(close_text))
sub_ele.appendChild(ele_slice)
@expr_wrapper
def c_Yield(self, parent):
assert self.tokens.pop().string == 'yield'
yield_text = self.tokens.current.string + self.tokens.space_right()
ele = Element(self.class_, text=yield_text)
# from (only for YieldFrom)
if self.class_ == 'YieldFrom':
assert self.tokens.pop().string == 'from'
from_text = self.tokens.current.string + self.tokens.space_right()
ele.appendChild(DOM.Text(from_text))
# value
value = self.fields['value'].value
if value:
value.to_xml(ele)
parent.appendChild(ele)
c_YieldFrom = c_Yield
@expr_wrapper
def c_BinOp(self, parent):
ele = Element(self.class_)
self.fields['left'].value.to_xml(ele)
# operator
op = self.fields['op'].value
op_text = self.pop_merge_NL(lspace=True) # OP
ele.appendChild(Element(op.class_, text=op_text))
# right value
self.fields['right'].value.to_xml(ele)
parent.appendChild(ele)
@expr_wrapper
def c_BoolOp(self, parent):
ele = Element(self.class_)
ele.setAttribute('op', self.fields['op'].value.class_)
for index, value in enumerate(self.fields['values'].value):
if index:
# prepend operator text to all values but first one
op_text = self.pop_merge_NL(lspace=True)
ele.appendChild(DOM.Text(op_text))
ele_value = Element('value')
value.to_xml(ele_value)
ele.appendChild(ele_value)
parent.appendChild(ele)
@expr_wrapper
def c_UnaryOp(self, parent):
self.tokens.pop() # operator can be an OP or NAME
op_text = self.tokens.current.string
ele = Element(self.class_, text=op_text)
self.tokens.write_non_ast_tokens(ele)
ele.setAttribute('op', self.fields['op'].value.class_)
self.fields['operand'].value.to_xml(ele)
parent.appendChild(ele)
CMP_TOKEN_COUNT = {
'Lt': 1, # <
'Eq': 1, # ==
'Gt': 1, # >
'GtE': 1, # >=
'In': 1, # in
'Is': 1, # is
'IsNot': 2, # is not
'Lt': 1, # <
'LtE': 1, # <=
'NotEq': 1, # !=
'NotIn': 2, # not in
}
@expr_wrapper
def c_Compare(self, parent):
ele = Element(self.class_)
ele_left = Element('value')
self.fields['left'].value.to_xml(ele_left)
ele.appendChild(ele_left)
for op, value in zip(self.fields['ops'].value,
self.fields['comparators'].value):
cmp_text = self.tokens.space_right()
for token in range(self.CMP_TOKEN_COUNT[op.class_]):
cmp_text += self.pop_merge_NL()
ele_op = Element('cmpop', text=cmp_text)
ele.appendChild(ele_op)
# value
ele_value = Element('value')
value.to_xml(ele_value)
ele.appendChild(ele_value)
parent.appendChild(ele)
def _c_call_keyword(self, parent, keyword):
ele_keyword = Element('keyword')
parent.appendChild(ele_keyword)
# arg
assert self.tokens.pop().type == Token.NAME, self.tokens.current
ele_arg = Element('arg', text=keyword.fields['arg'].value)
ele_keyword.appendChild(ele_arg)
# equal
ele_keyword.appendChild(DOM.Text(self.pop_merge_NL(lspace=True)))
# value
ele_val = Element('value')
keyword.fields['value'].value.to_xml(ele_val)
ele_keyword.appendChild(ele_val)
self._c_delimiter(parent)
def _c_call_star_arg(self, ele, xarg, field):
token = self.tokens.pop()
# START DOUBLESTAR
assert token.type == Token.OP, self.tokens.current
text = token.string + self.tokens.space_right()
ele_xargs = Element(field, text=text)
xarg.to_xml(ele_xargs)
ele.appendChild(ele_xargs)
# optional comma
self._c_delimiter(ele)
def _c_call_keywords_starargs(self, ele):
# keywords args can appear both before and after starargs
# so it is required to sort them by position
keywds_and_star = []
# get starargs
starargs = self.fields['starargs'].value
if starargs:
start_pos = (starargs.line, starargs.column)
keywds_and_star.append((start_pos, 'starargs', starargs))
# get keywords
keywords = self.fields['keywords'].value
for keyword in keywords:
kw_val = keyword.fields['value'].value
start_pos = (kw_val.line, kw_val.column)
keywds_and_star.append((start_pos, 'keyword', keyword))
# add keywords and starargs
for _, atype, arg in sorted(keywds_and_star):
if atype == 'starargs':
self._c_call_star_arg(ele, arg, 'starargs')
else:
self._c_call_keyword(ele, arg)
@expr_wrapper
def c_Call(self, parent):
ele = Element('Call')
# func
ele_func = Element('func')
self.fields['func'].value.to_xml(ele_func)
ele.appendChild(ele_func)
ele.appendChild(DOM.Text(self.pop_merge_NL(lspace=True))) # LPAR
# args
args = self.fields['args'].value
if args:
ele_args = Element('args')
ele.appendChild(ele_args)
for arg in args:
arg.to_xml(ele_args)
# optional comma
self._c_delimiter(ele_args)
self._c_call_keywords_starargs(ele)
kwargs = self.fields['kwargs'].value
if kwargs:
self._c_call_star_arg(ele, kwargs, 'kwargs')
assert self.tokens.pop().exact_type == Token.RPAR, self.tokens.current
ele.appendChild(DOM.Text(')'))
parent.appendChild(ele)
@expr_wrapper
def c_IfExp(self, parent):
ele = Element('IfExpr')
parent.appendChild(ele)
# body
ele_body = Element('body')
self.fields['body'].value.to_xml(ele_body)
ele.appendChild(ele_body)
# if
ele.appendChild(DOM.Text(self.pop_merge_NL(lspace=True)))
# test
ele_test = Element('test')
self.fields['test'].value.to_xml(ele_test)
ele.appendChild(ele_test)
# else
ele.appendChild(DOM.Text(self.pop_merge_NL(lspace=True)))
# orelse
ele_orelse = Element('orelse')
self.fields['orelse'].value.to_xml(ele_orelse)
ele.appendChild(ele_orelse)
@expr_wrapper
def c_GeneratorExp(self, parent):
ele = Element(self.class_)
if self.class_ != 'GeneratorExp':
ele.appendChild(DOM.Text(self.pop_merge_NL())) #LSQB
if 'elt' in self.fields: # GeneratorExp ListComp SetComp
# elt
ele_elt = Element('elt')
ele.appendChild(ele_elt)
self.fields['elt'].value.to_xml(ele_elt)
else: # DictComp
ele_key = Element('key')
ele.appendChild(ele_key)
self.fields['key'].value.to_xml(ele_key)
ele.appendChild(DOM.Text(self.pop_merge_NL(lspace=True))) # COLON
ele_value = Element('value')
ele.appendChild(ele_value)
self.fields['value'].value.to_xml(ele_value)
# generators
ele_gen = Element('generators')
ele.appendChild(ele_gen)
for gen in self.fields['generators'].value:
ele_comp = Element('comprehension')
ele_gen.appendChild(ele_comp)
# for
for_text = self.pop_merge_NL(lspace=True) # for
ele_comp.appendChild(DOM.Text(for_text))
# target
ele_target = Element('target')
gen.fields['target'].value.to_xml(ele_target)
ele_comp.appendChild(ele_target)
# in
in_text = self.pop_merge_NL(lspace=True) # in
ele_comp.appendChild(DOM.Text(in_text))
# iter
ele_iter = Element('iter')
gen.fields['iter'].value.to_xml(ele_iter)
ele_comp.appendChild(ele_iter)
# ifs
ifs = gen.fields['ifs'].value
if ifs:
ele_ifs = Element('ifs')
ele_comp.appendChild(ele_ifs)
for gif in ifs:
ele_if = Element('if')
ele_ifs.appendChild(ele_if)
# if
if_text = self.pop_merge_NL(lspace=True) # if
ele_if.appendChild(DOM.Text(if_text))
# target
gif.to_xml(ele_if)
# close brackets
if self.class_ != 'GeneratorExp':
close_text = self.pop_merge_NL(lspace=True, rspace=False)
ele.appendChild(DOM.Text(close_text))
parent.appendChild(ele)
c_ListComp = c_GeneratorExp
c_SetComp = c_GeneratorExp
c_DictComp = c_GeneratorExp
@expr_wrapper
def c_Lambda(self, parent):
assert self.tokens.pop().string == 'lambda'
ele = Element('Lambda', text='lambda' + self.tokens.space_right())
# arguments
ele_arguments = Element('arguments')
self._arguments(ele_arguments)
ele.appendChild(ele_arguments)
# COLON :
ele.appendChild(DOM.Text(self.pop_merge_NL()))
# body
ele_body = Element('body')
self.fields['body'].value.to_xml(ele_body)
ele.appendChild(ele_body)
parent.appendChild(ele)
###########################################################
# stmt
###########################################################
def _c_field_list(self, parent, field_name, text=None):
"""must a field list that contains line, number information"""
ele = Element(field_name, text=text)
for item in self.fields[field_name].value:
item.to_xml(ele)
parent.appendChild(ele)
def c_Expr(self, parent):
self.tokens.write_non_ast_tokens(parent)
ele = Element('Expr')
self.fields['value'].value.to_xml(ele)
parent.appendChild(ele)
def c_Pass(self, parent):
self.tokens.write_non_ast_tokens(parent)
assert self.tokens.pop().type == Token.NAME
parent.appendChild(Element(self.class_,
text=self.tokens.current.string))
c_Break = c_Pass
c_Continue = c_Pass
def c_Assert(self, parent):
self.tokens.write_non_ast_tokens(parent)
assert self.tokens.pop().string == 'assert'
assert_text = 'assert' + self.tokens.space_right()
assert_ele = Element('Assert', text=assert_text)
# test expr
test_ele = Element('test')
self.fields['test'].value.to_xml(test_ele)
assert_ele.appendChild(test_ele)
# msg
msg = self.fields['msg'].value
if msg:
assert self.tokens.pop().exact_type == Token.COMMA
assert_ele.appendChild(DOM.Text(self.tokens.text_prev2next()))
msg_ele = Element('msg')
msg.to_xml(msg_ele)
assert_ele.appendChild(msg_ele)
parent.appendChild(assert_ele)
def c_Assign(self, parent):
self.tokens.write_non_ast_tokens(parent)
ele = Element('Assign')
# targets
ele_targets = Element('targets')
ele.appendChild(ele_targets)
for target in self.fields['targets'].value:
target.to_xml(ele_targets)
# op `=`
assert self.tokens.pop().exact_type == Token.EQUAL
ele_targets.appendChild(DOM.Text(self.tokens.text_prev2next()))
# value
self.fields['value'].value.to_xml(ele)
parent.appendChild(ele)
def c_Delete(self, parent):
self.tokens.write_non_ast_tokens(parent)
assert self.tokens.pop().string == 'del'
ele = Element('Delete', text='del' + self.tokens.space_right())
# targets
ele_targets = Element('targets')
ele.appendChild(ele_targets)
for target in self.fields['targets'].value:
target.to_xml(ele_targets)
# optional comma
self._c_delimiter(ele_targets)
parent.appendChild(ele)
def c_Global(self, parent):
self.tokens.write_non_ast_tokens(parent)
assert self.tokens.pop().type == Token.NAME
text = self.tokens.current.string + self.tokens.space_right()
ele = Element(self.class_, text=text)
# names
ele_names = Element('names')
ele.appendChild(ele_names)
for name in self.fields['names'].value:
assert self.tokens.pop().type == Token.NAME
ele_name = Element('name', text=name.value)
ele_names.appendChild(ele_name)
# optional comma
self._c_delimiter(ele_names)
parent.appendChild(ele)
c_Nonlocal = c_Global
def c_AugAssign(self, parent):
self.tokens.write_non_ast_tokens(parent)
ele = Element('AugAssign')
parent.appendChild(ele)
# target
ele_target = Element('target')
self.fields['target'].value.to_xml(ele_target)
ele.appendChild(ele_target)
# op
ele_op = Element('op')
assert self.tokens.pop().type == Token.OP
op = self.fields['op'].value
ele_op_val = Element(op.class_, text=self.tokens.text_prev2next())
ele_op.appendChild(ele_op_val)
ele.appendChild(ele_op)
# value
ele_value = Element('value')
self.fields['value'].value.to_xml(ele_value)
ele.appendChild(ele_value)
def _c_import_names(self, ele):
for child in self.fields['names'].value:
alias = Element('alias')
# add name
self.tokens.pop_dotted_name()
name_ele = Element('name', text=child.fields['name'].value)
alias.appendChild(name_ele)
# check if optional asname is present
asname = child.fields.get('asname', None)
if asname.value:
assert self.tokens.pop().string == 'as'
alias.appendChild(DOM.Text(self.tokens.text_prev2next()))
assert self.tokens.pop().type == Token.NAME
alias.appendChild(Element('asname', text=asname.value))
ele.appendChild(alias)
self._c_delimiter(ele)
def c_Import(self, parent):
self.tokens.write_non_ast_tokens(parent)
assert self.tokens.pop().string == 'import'
ele = Element('Import', text='import' + self.tokens.space_right())
self._c_import_names(ele)
parent.appendChild(ele)
def c_ImportFrom(self, parent):
self.tokens.write_non_ast_tokens(parent)
ele = Element('ImportFrom')
# level
ele.setAttribute('level', str(self.fields['level'].value))
# from <module>
assert self.tokens.pop().string == 'from'
from_text = 'from' + self.tokens.space_right()
# get level dots
while self.tokens.next().exact_type == Token.DOT:
from_text += '.'
self.tokens.pop() # dot
ele.appendChild(DOM.Text(from_text))
# get module name
module_text = ''
if self.tokens.next().string != 'import':
module_text += self.tokens.pop_dotted_name()
ele.appendChild(Element('module', text=module_text))
# import keyword
assert self.tokens.pop().string == 'import'
ele.appendChild(DOM.Text(self.tokens.text_prev2next()))
# parenthesis
token = self.tokens.next()
has_paren = False
if token.exact_type == Token.LPAR:
has_paren = True
ele.appendChild(DOM.Text(self.pop_merge_NL())) # LPAR
# names
names = Element('names')
self._c_import_names(names)
ele.appendChild(names)
if has_paren:
ele.appendChild(DOM.Text(self.pop_merge_NL())) #RPAR
# append to parent
parent.appendChild(ele)
def c_Return(self, parent):
self.tokens.write_non_ast_tokens(parent)
assert self.tokens.pop().string == 'return'
ele = Element('Return', text='return')
value = self.fields['value'].value
if value:
ele.appendChild(DOM.Text(self.tokens.space_right()))
value.to_xml(ele)
parent.appendChild(ele)
def _arg_element(self, arg, default=None, kwonly=False):
""":return: XML node"""
arg_ele = Element('arg')
arg_ele.setAttribute('name', arg.fields['arg'].value)
arg_ele.appendChild(DOM.Text(arg.fields['arg'].value))
if kwonly:
arg_ele.setAttribute('kwonly', None)
ann = arg.fields['annotation'].value
if ann:
assert self.tokens.pop().exact_type == Token.COLON
ann_ele = Element('annotation')
ann_text = self.tokens.text_prev2next()
ann_ele.appendChild(DOM.Text(ann_text))
ann.to_xml(ann_ele)
arg_ele.appendChild(ann_ele)
# keyword_only arg might not have a default None instead of an ast node
if hasattr(default, 'fields'):
assert self.tokens.pop().exact_type == Token.EQUAL
default_ele = Element('default')
equal_text = self.tokens.text_prev2next()
default_ele.appendChild(DOM.Text(equal_text))
default.to_xml(default_ele)
arg_ele.appendChild(default_ele)
return arg_ele
def _star_arg(self, ele_arguments, arguments, field):
"""handle vararg and kwarg"""
arg = arguments.fields[field].value
if arg:
ele_arg = Element(field)
token = self.tokens.pop()
# START / DOUBLESTAR
assert token.type == Token.OP, self.tokens.current
star_text = token.string
ele_arg.appendChild(DOM.Text(star_text))
assert self.tokens.pop().type == Token.NAME
ele_arg.appendChild(DOM.Text(self.tokens.prev_space()))
ele_arg.appendChild(self._arg_element(arg))
ele_arguments.appendChild(ele_arg)
self._c_delimiter(ele_arguments)
def _arguments(self, ele_arguments):
"""convert arugments for FuncDef and Lambda"""
arguments = self.fields['args'].value
# args
args = arguments.fields['args'].value
if args:
f_defaults = arguments.fields['defaults'].value
defaults = ([None] * (len(args) - len(f_defaults))) + f_defaults
for arg, default in zip(args, defaults):
assert self.tokens.pop().type == Token.NAME, self.tokens.current
arg_ele = self._arg_element(arg, default)
ele_arguments.appendChild(arg_ele)
self._c_delimiter(ele_arguments)
# vararg
self._star_arg(ele_arguments, arguments, 'vararg')
# kwonlyargs
kwonlyargs = arguments.fields['kwonlyargs'].value
kw_defaults = arguments.fields['kw_defaults'].value
if kwonlyargs and not arguments.fields['vararg'].value:
# if there is kwonly args but no vararg it needs an extra '*' arg
assert self.tokens.pop().exact_type == Token.STAR
ele_arguments.appendChild(DOM.Text('*' + self.tokens.space_right()))
self._c_delimiter(ele_arguments)
for arg, default in zip(kwonlyargs, kw_defaults):
assert self.tokens.pop().type == Token.NAME, self.tokens.current
arg_ele = self._arg_element(arg, default, kwonly=True)
ele_arguments.appendChild(arg_ele)
self._c_delimiter(ele_arguments)
# kwarg
self._star_arg(ele_arguments, arguments, 'kwarg')
def _c_decorator_list(self, parent):
decorators = self.fields['decorator_list'].value
for deco in decorators:
assert self.tokens.pop().exact_type == Token.AT
deco_text = '@' + self.tokens.space_right()
ele_deco = Element('decorator', text=deco_text)
parent.appendChild(ele_deco)
deco.to_xml(ele_deco)
self.tokens.write_non_ast_tokens(parent)
def c_FunctionDef(self, parent):
self.tokens.write_non_ast_tokens(parent)
ele = Element('FunctionDef')
# decorator
self._c_decorator_list(ele)
# def
assert self.tokens.pop().string == 'def'
ele.appendChild(DOM.Text('def' + self.tokens.space_right()))
# name
assert self.tokens.pop().type == Token.NAME
name = self.fields['name'].value
ele.setAttribute('name', name)
ele.appendChild(DOM.Text(name))
# args
start_arguments_text = self.pop_merge_NL(lspace=True) # LPAR
ele_arguments = Element('arguments', text=start_arguments_text)
self._arguments(ele_arguments)
# close parent + colon
assert self.tokens.pop().exact_type == Token.RPAR
close_args_text = ')' + self.tokens.space_right()
ele_arguments.appendChild(DOM.Text(close_args_text))
returns = self.fields['returns'].value
if returns:
assert self.tokens.pop().type == Token.OP # ->
arrow_text = '->' + self.tokens.space_right()
ele_returns = Element('returns', text=arrow_text)
ele_arguments.appendChild(ele_returns)
returns.to_xml(ele_returns)
ele_returns.appendChild(DOM.Text(self.tokens.space_right()))
# colon
assert self.tokens.pop().exact_type == Token.COLON
colon_text = ':'
ele_arguments.appendChild(DOM.Text(colon_text))
ele.appendChild(ele_arguments)
# body
self._c_field_list(ele, 'body')
parent.appendChild(ele)
def c_ClassDef(self, parent):
self.tokens.write_non_ast_tokens(parent)
ele = Element('ClassDef')
# decorator
self._c_decorator_list(ele)
# class
assert self.tokens.pop().string == 'class'
ele.appendChild(DOM.Text('class'))
# name
assert self.tokens.pop().type == Token.NAME
name = self.fields['name'].value
ele.setAttribute('name', name)
text = self.tokens.prev_space() + name
ele.appendChild(DOM.Text(text))
# arguments
if self.tokens.next().exact_type == Token.LPAR:
start_arguments_text = self.pop_merge_NL(lspace=True)
ele_arguments = Element('arguments', text=start_arguments_text)
bases = self.fields['bases'].value
for item in bases:
ele_base = Element('base')
item.to_xml(ele_base)
ele_arguments.appendChild(ele_base)
self._c_delimiter(ele_arguments)
self._c_call_keywords_starargs(ele_arguments)
kwargs = self.fields['kwargs'].value
if kwargs:
self._c_call_star_arg(ele_arguments, kwargs, 'kwargs')
# close arguments
assert self.tokens.pop().exact_type == Token.RPAR
ele_arguments.appendChild(DOM.Text(')'))
ele.appendChild(ele_arguments)
# colon
assert self.tokens.pop().exact_type == Token.COLON
ele.appendChild(DOM.Text(self.tokens.prev_space() + ':'))
# body
self._c_field_list(ele, 'body')
parent.appendChild(ele)
def c_While(self, parent):
self.tokens.write_non_ast_tokens(parent)
assert self.tokens.pop().type == Token.NAME
while_text = self.tokens.current.string + self.tokens.space_right()
ele = Element(self.class_, text=while_text)
# test expr
test_ele = Element('test')
self.fields['test'].value.to_xml(test_ele)
ele.appendChild(test_ele)
# colon
assert self.tokens.pop().exact_type == Token.COLON
ele.appendChild(DOM.Text(self.tokens.prev_space() + ':'))
# body
self._c_field_list(ele, 'body')
# orelse
orelse = self.fields['orelse'].value
if orelse:
self.tokens.write_non_ast_tokens(ele, rspace=False)
if self.tokens.next().string == 'elif':
ele_orelse = Element('orelse')
orelse[0].to_xml(ele_orelse)
ele.appendChild(ele_orelse)
else:
assert self.tokens.pop().string == 'else', self.tokens.current
else_text = self.tokens.text_prev2next() + ':'
assert self.tokens.pop().exact_type == Token.COLON
self._c_field_list(ele, 'orelse', text=else_text)
parent.appendChild(ele)
c_If = c_While
def c_For(self, parent):
self.tokens.write_non_ast_tokens(parent)
assert self.tokens.pop().string == 'for'
for_text = self.tokens.current.string + self.tokens.space_right()
ele = Element(self.class_, text=for_text)
# target expr
ele_target = Element('target')
self.fields['target'].value.to_xml(ele_target)
ele.appendChild(ele_target)
# 'in'
assert self.tokens.pop().string == 'in'
in_text = self.tokens.text_prev2next()
ele.appendChild(DOM.Text(in_text))
# iter
ele_iter = Element('iter')
self.fields['iter'].value.to_xml(ele_iter)
ele.appendChild(ele_iter)
# colon
assert self.tokens.pop().exact_type == Token.COLON
ele.appendChild(DOM.Text(self.tokens.prev_space() + ':'))
# body
self._c_field_list(ele, 'body')
parent.appendChild(ele)
# else
orelse = self.fields['orelse'].value
if orelse:
self.tokens.write_non_ast_tokens(ele, rspace=False)
assert self.tokens.pop().string == 'else', self.tokens.current
else_text = self.tokens.text_prev2next() + ':'
assert self.tokens.pop().exact_type == Token.COLON
self._c_field_list(ele, 'orelse', text=else_text)
def c_Raise(self, parent):
self.tokens.write_non_ast_tokens(parent)
assert self.tokens.pop().string == 'raise'
ele = Element('Raise', text='raise')
# exc
exc = self.fields['exc'].value
if exc:
ele.appendChild(DOM.Text(self.tokens.space_right()))
ele_exc = Element('exc')
exc.to_xml(ele_exc)
ele.appendChild(ele_exc)
# cause
cause = self.fields['cause'].value
if cause:
assert self.tokens.pop().string == 'from'
ele.appendChild(DOM.Text(self.tokens.text_prev2next()))
ele_cause = Element('cause')
cause.to_xml(ele_cause)
ele.appendChild(ele_cause)
parent.appendChild(ele)
def c_ExceptHandler(self, parent):
ele = Element('ExceptHandler')
parent.appendChild(ele)
# except
self.tokens.write_non_ast_tokens(ele)
assert self.tokens.pop().string == 'except', self.tokens.current
except_text = 'except' + self.tokens.space_right()
ele.appendChild(DOM.Text(except_text))
# type
except_type = self.fields['type'].value
if except_type:
ele_type = Element('type')
except_type.to_xml(ele_type)
ele.appendChild(ele_type)
# name
name = self.fields['name'].value
if name:
assert self.tokens.pop().string == 'as'
ele.appendChild(DOM.Text(self.tokens.text_prev2next()))
assert self.tokens.pop().type == Token.NAME
ele_name = Element('name', text=name)
ele.appendChild(ele_name)
# :
assert self.tokens.pop().exact_type == Token.COLON
colon_text = self.tokens.prev_space() + ':'
ele.appendChild(DOM.Text(colon_text))
# body
self._c_field_list(ele, 'body')
def c_Try(self, parent):
self.tokens.write_non_ast_tokens(parent)
ele = Element('Try')
parent.appendChild(ele)
assert self.tokens.pop().string == 'try', self.tokens.current
try_text = 'try' + self.tokens.space_right() + ':'
ele.appendChild(DOM.Text(try_text))
assert self.tokens.pop().exact_type == Token.COLON
# body
self._c_field_list(ele, 'body')
# handlers
handlers = self.fields['handlers'].value
if handlers:
ele_handlers = Element('handlers')
ele.appendChild(ele_handlers)
for handler in handlers:
handler.to_xml(ele_handlers)
orelse = self.fields['orelse'].value
if orelse:
self.tokens.write_non_ast_tokens(ele, rspace=False)
assert self.tokens.pop().string == 'else', self.tokens.current
else_text = self.tokens.text_prev2next() + ':'
assert self.tokens.pop().exact_type == Token.COLON
self._c_field_list(ele, 'orelse', text=else_text)
final = self.fields['finalbody'].value
if final:
self.tokens.write_non_ast_tokens(ele, rspace=False)
assert self.tokens.pop().string == 'finally', self.tokens.current
final_text = self.tokens.text_prev2next() + ':'
assert self.tokens.pop().exact_type == Token.COLON
self._c_field_list(ele, 'finalbody', text=final_text)
def c_With(self, parent):
self.tokens.write_non_ast_tokens(parent)
assert self.tokens.pop().string == 'with'
with_text = 'with' + self.tokens.space_right()
ele = Element(self.class_, text=with_text)
ele_items = Element('items')
ele.appendChild(ele_items)
for item in self.fields['items'].value:
ele_item = Element('withitem')
ele_items.appendChild(ele_item)
item.fields['context_expr'].value.to_xml(ele_item)
opt_vars = item.fields['optional_vars'].value
if opt_vars:
assert self.tokens.pop().string == 'as'
ele_item.appendChild(DOM.Text(self.tokens.text_prev2next()))
opt_vars.to_xml(ele_item)
self._c_delimiter(ele_items)
# colon
assert self.tokens.pop().exact_type == Token.COLON
ele.appendChild(DOM.Text(':'))
# body
self._c_field_list(ele, 'body')
parent.appendChild(ele)
class SrcToken:
"""helper to read tokenized python source
Token is named tuple with field names:
type string start end line exact_type
"""
def __init__(self, fp):
self.list = list(reversed(list(tokenize(fp.readline))))
self.current = None
self.previous = None
self.pop() # ignore encoding
# helper to determine in which expression the () is being applied
# list of tuple with 3 elements:
# - string containing Token.LPAR
# - 2-tuple with line, column position of first element in expr
# - first node to see the LPAR
self.lpar = []
def pop(self):
self.previous = self.current
self.current = self.list[-1]
return self.list.pop()
def next(self):
return self.list[-1]
def pop_dotted_name(self):
name = self.pop().string
while self.next().exact_type == Token.DOT:
self.pop()
name += '.' + self.pop().string
return name
@staticmethod
def calc_space(from_token, to_token):
if from_token.end[0] == to_token.start[0]:
# same line, just add spaces
return ' ' * (to_token.start[1] - from_token.end[1])
elif to_token.type == Token.ENDMARKER:
return ''
else:
# previous token is a previous line
# add end of previous line more spaces leading to current token
return from_token.line[from_token.end[1]:] + ' ' * to_token.start[1]
def text_prev2next(self):
text = self.calc_space(self.previous, self.current)
text += self.current.string
text += self.calc_space(self.current, self.next())
return text
def prev_space(self):
return self.calc_space(self.previous, self.current)
def space_right(self):
return self.calc_space(self.current, self.next())
NON_AST_TOKENS = set([
Token.SEMI,
Token.NEWLINE, Token.NL,
Token.COMMENT,
Token.INDENT, Token.DEDENT,
])
def write_non_ast_tokens(self, parent_ele, rspace=True):
text = ''
while self.next().exact_type in self.NON_AST_TOKENS:
token = self.pop()
text += self.prev_space() + token.string
if rspace:
text += self.space_right()
parent_ele.appendChild(DOM.Text(text))
def py2xml(filename=None, fromstring=None):
"""convert ast to srcML"""
AstNodeX.load_map()
if fromstring:
filename = '<str>'
_bytep = io.BytesIO(fromstring.encode('utf-8'))
_strp = io.StringIO(fromstring)
ast_root = AstNodeX.tree(_strp, filename)
AstNodeX.tokens = SrcToken(_bytep)
elif filename:
with open(filename, 'r') as fp:
filep = io.StringIO(fp.read())
ast_root = AstNodeX.tree(filep, filename)
with open(filename, 'rb') as bs:
AstNodeX.tokens = SrcToken(bs)
else:
filename = '<stdin>'
_bytep = io.BytesIO(sys.stdin.buffer.read())
_strp = io.StringIO(_bytep.getvalue().decode('utf-8'))
ast_root = AstNodeX.tree(_strp, filename)
AstNodeX.tokens = SrcToken(_bytep)
root = ast_root.to_xml()
# add remaining text at the end of the file
ast_root.tokens.write_non_ast_tokens(root)
# write XML string
return root.toxml()
def xml2py(filename=None, fromstring=None):
"""convert XML back to python
To convert back, just get all text from all nodes.
"""
if fromstring:
xml_str = fromstring
else:
if filename:
with open(filename) as fp_in:
xml_str = fp_in.read()
else:
xml_str = sys.stdin.buffer.read()
root = ET.fromstring(xml_str)
return ET.tostring(root, encoding='unicode', method='text')
def main(args=None):
"""command line program for py2xml"""
import difflib
description = """convert python module to XML representation"""
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-r', '--reverse', dest='reverse',
action='store_true',
help='reverse - convert XML back to python code')
parser.add_argument(
'-c', '--check', dest='check',
action='store_true',
help='display the diff between original file and the roundtrip version')
parser.add_argument(
'py_file', metavar='MODULE', nargs='?',
help='python module, if not specified uses stdin.')
args = parser.parse_args(args)
# DIFF
if args.check:
original = open(args.py_file).read()
roundtriped = xml2py(fromstring=py2xml(args.py_file))
diff = difflib.unified_diff(
original.splitlines(),
roundtriped.splitlines(),
lineterm='', fromfile='args.py_file')
failed = 0
for line in diff:
failed = 1
print(line)
sys.exit(failed)
# XML -> PY
elif args.reverse:
sys.stdout.buffer.write(xml2py(args.py_file).encode('utf8'))
# PY -> XML
else:
sys.stdout.buffer.write(py2xml(args.py_file).encode('utf8'))
if __name__ == "__main__": # pragma: no cover
main()
| StarcoderdataPython |
4962914 | <filename>model.py
# ******************************************************************************
# Copyright 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import numpy as np
import math
import torch
import torch.nn as nn
from reparameterized_layers import DynamicLinear,DynamicConv2d
from parameterized_tensors import SparseTensor,TiedTensor
import torch.utils.model_zoo as model_zoo
import torch.nn.functional as F
class DynamicNetworkBase(nn.Module):
def __init__(self):
super(DynamicNetworkBase, self).__init__()
self.split_state = False
def prune(self,prune_fraction_fc,prune_fraction_conv,prune_fraction_fc_special = None):
for x in [x for x in self.modules() if isinstance(x,SparseTensor)]:
if x.conv_tensor:
x.prune_small_connections(prune_fraction_conv)
else:
if x.s_tensor.size(0) == 10 and x.s_tensor.size(1) == 100:
x.prune_small_connections(prune_fraction_fc_special)
else:
x.prune_small_connections(prune_fraction_fc)
def get_model_size(self):
def get_tensors_and_test(tensor_type):
relevant_tensors = [x for x in self.modules() if isinstance(x,tensor_type)]
relevant_params = [p for x in relevant_tensors for p in x.parameters()]
is_relevant_param = lambda x : [y for y in relevant_params if x is y]
return relevant_tensors,is_relevant_param
sparse_tensors,is_sparse_param = get_tensors_and_test(SparseTensor)
tied_tensors,is_tied_param = get_tensors_and_test(TiedTensor)
sparse_params = [p for x in sparse_tensors for p in x.parameters()]
is_sparse_param = lambda x : [y for y in sparse_params if x is y]
sparse_size = sum([x.get_sparsity()[0].item() for x in sparse_tensors])
tied_size = 0
for k in tied_tensors:
unique_reps = k.weight_alloc.cpu().unique()
subtensor_size = np.prod(list(k.bank.size())[1:])
tied_size += unique_reps.size(0) * subtensor_size
fixed_size = sum([p.data.nelement() for p in self.parameters() if (not is_sparse_param(p) and not is_tied_param(p))])
model_size = {'sparse': sparse_size,'tied' : tied_size, 'fixed':fixed_size,'learnable':fixed_size + sparse_size + tied_size}
return model_size
class mnist_mlp(DynamicNetworkBase):
def __init__(self, initial_sparsity = 0.98,sparse = True,no_batch_norm = False):
super(mnist_mlp, self).__init__()
self.fc1 = DynamicLinear(784, 300, initial_sparsity,bias = no_batch_norm,sparse = sparse)
self.fc_int = DynamicLinear(300, 100, initial_sparsity,bias = no_batch_norm,sparse = sparse)
#self.fc2 = DynamicLinear(100, 10, min(0.5,initial_sparsity),bias = False,sparse = sparse)
self.fc2 = DynamicLinear(100, 10, initial_sparsity,bias = no_batch_norm,sparse = sparse)
if no_batch_norm:
self.bn1 = lambda x : x
self.bn2 = lambda x : x
self.bn3 = lambda x : x
else:
self.bn1 = nn.BatchNorm1d(300)
self.bn2 = nn.BatchNorm1d(100)
self.bn3 = nn.BatchNorm1d(10)
def forward(self, x):
x = F.relu(self.bn1(self.fc1(x.view(-1, 784))))
x = F.relu(self.bn2(self.fc_int(x)))
y = self.bn3(self.fc2(x))
return y
#########Definition of wide resnets
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0,widen_factor = 10,initial_sparsity = 0.5,sub_kernel_granularity = False,sparse = True):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = DynamicConv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False,initial_sparsity = initial_sparsity,sub_kernel_granularity = sub_kernel_granularity,sparse = sparse)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = DynamicConv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False,initial_sparsity = initial_sparsity,sub_kernel_granularity = sub_kernel_granularity,sparse = sparse)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0,widen_factor = 10,initial_sparsity = 0.5,sub_kernel_granularity = False,sparse = True):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate,widen_factor,initial_sparsity = initial_sparsity,
sub_kernel_granularity = sub_kernel_granularity,sparse = sparse)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate,widen_factor,initial_sparsity = 0.5,sub_kernel_granularity = False,sparse = True):
layers = []
for i in range(int(nb_layers)):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate,widen_factor = widen_factor,
initial_sparsity = initial_sparsity,sub_kernel_granularity = sub_kernel_granularity,sparse = sparse))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class cifar10_WideResNet(DynamicNetworkBase):
def __init__(self, depth, num_classes=10, widen_factor=1, dropRate=0.0,initial_sparsity_conv = 0.5,initial_sparsity_fc = 0.95,sub_kernel_granularity = 4,sparse = True):
super(cifar10_WideResNet, self).__init__()
nChannels = np.round(np.array([16, 16*widen_factor, 32*widen_factor, 64*widen_factor])).astype('int32')
assert((depth - 4) % 6 == 0)
n = (depth - 4) / 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate,widen_factor = widen_factor,
initial_sparsity = initial_sparsity_conv,sub_kernel_granularity = sub_kernel_granularity,sparse = sparse)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate,widen_factor = widen_factor,
initial_sparsity = initial_sparsity_conv,sub_kernel_granularity = sub_kernel_granularity,sparse = sparse)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate,widen_factor = widen_factor,
initial_sparsity = initial_sparsity_conv,sub_kernel_granularity = sub_kernel_granularity,sparse = sparse)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3],num_classes) #DynamicLinear(nChannels[3], num_classes,initial_sparsity = initial_sparsity_fc,sparse = sparse)
self.nChannels = nChannels[3]
self.split_state = False
self.reset_parameters()
def reset_parameters(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, DynamicConv2d):
n = m.kernel_size * m.kernel_size * m.n_output_maps
if m.sparse:
m.d_tensor.s_tensor.data.normal_(0, math.sqrt(2. / n))
else:
m.d_tensor.bank.data.normal_(0, math.sqrt(2. / n))
if isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
return self.fc(out)
###Resnet Definition
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None,widen_factor = 1,vanilla_conv1 = True,vanilla_conv3 = True,initial_sparsity = 0.5,
sub_kernel_granularity = 4,sparse = True):
super(Bottleneck, self).__init__()
adjusted_planes = planes#np.round(widen_factor * planes).astype('int32')
if vanilla_conv1:
self.conv1 = nn.Conv2d(inplanes, adjusted_planes, kernel_size=1, bias=False)
self.conv3 = nn.Conv2d(adjusted_planes, planes * 4, kernel_size=1, bias=False)
else:
self.conv1 = DynamicConv2d(inplanes, adjusted_planes, kernel_size=1, bias=False , initial_sparsity = initial_sparsity,
sub_kernel_granularity = sub_kernel_granularity,sparse = sparse )
self.conv3 = DynamicConv2d(adjusted_planes, planes * 4, kernel_size=1, bias=False , initial_sparsity = initial_sparsity,
sub_kernel_granularity = sub_kernel_granularity,sparse = sparse)
if vanilla_conv3:
self.conv2 = nn.Conv2d(adjsuted_planes, adjusted_planes, kernel_size=3, stride=stride,padding=1, bias=False)
else:
self.conv2 = DynamicConv2d(adjusted_planes, adjusted_planes, kernel_size=3, stride=stride,
padding=1, bias=False,initial_sparsity = initial_sparsity, sub_kernel_granularity = sub_kernel_granularity,sparse = sparse)
self.bn1 = nn.BatchNorm2d(adjusted_planes)
self.bn2 = nn.BatchNorm2d(adjusted_planes)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(DynamicNetworkBase):
def __init__(self, block, layers, num_classes=1000,widen_factor = 1,vanilla_downsample = True,vanilla_conv1 = True,vanilla_conv3 = True,
initial_sparsity_conv = 0.5,initial_sparsity_fc = 0.95,sub_kernel_granularity = 4,sparse = True):
self.inplanes = np.round(64 * widen_factor).astype('int32')
super(ResNet, self).__init__()
self.widen_factor = widen_factor
self.vanilla_conv1 = vanilla_conv1
self.vanilla_conv3 = vanilla_conv3
self.vanilla_downsample = vanilla_downsample
self.initial_sparsity_conv = initial_sparsity_conv
self.initial_sparsity_fc = initial_sparsity_fc
self.sub_kernel_granularity = sub_kernel_granularity
self.sparse = sparse
self.conv1 = nn.Conv2d(3, np.round(64 * widen_factor).astype('int32'), kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(np.round(64 * widen_factor).astype('int32'))
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, np.round(64 * widen_factor).astype('int32'), layers[0])
self.layer2 = self._make_layer(block, np.round(64 * widen_factor).astype('int32')*2, layers[1], stride=2)
self.layer3 = self._make_layer(block, np.round(64 * widen_factor).astype('int32')*4, layers[2], stride=2)
self.layer4 = self._make_layer(block, np.round(64 * widen_factor).astype('int32')*8, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = DynamicLinear(np.round(64 * widen_factor).astype('int32') * block.expansion * 8, num_classes,initial_sparsity = self.initial_sparsity_fc,sparse = sparse)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, DynamicConv2d):
n = m.kernel_size * m.kernel_size * m.n_output_maps
if m.sparse:
m.d_tensor.s_tensor.data.normal_(0, math.sqrt(2. / n))
else:
m.d_tensor.bank.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False) if self.vanilla_downsample else \
DynamicConv2d(self.inplanes, planes * block.expansion,kernel_size=1,stride=stride, bias=False,
initial_sparsity = self.initial_sparsity_conv,sub_kernel_granularity = self.sub_kernel_granularity,sparse = self.sparse),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample,widen_factor = self.widen_factor,
vanilla_conv1 = self.vanilla_conv1,vanilla_conv3 = self.vanilla_conv3,initial_sparsity = self.initial_sparsity_conv,
sub_kernel_granularity = self.sub_kernel_granularity,sparse = self.sparse))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes,widen_factor = self.widen_factor,
vanilla_conv1 = self.vanilla_conv1,vanilla_conv3 = self.vanilla_conv3,initial_sparsity = self.initial_sparsity_conv,
sub_kernel_granularity = self.sub_kernel_granularity,sparse = self.sparse))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def imagenet_resnet50(widen_factor = 1,vanilla_conv1 = False,vanilla_conv3 = False,vanilla_downsample = True,decimation_factor = 8,
initial_sparsity_conv = 0.5,initial_sparsity_fc = 0.95,sub_kernel_granularity = 4,sparse = True, **kwargs):
"""Constructs a ResNet-50 model.
"""
model = ResNet(Bottleneck, [3, 4, 6, 3],widen_factor = widen_factor,
vanilla_conv1 = vanilla_conv1,vanilla_conv3 = vanilla_conv3,vanilla_downsample = vanilla_downsample, initial_sparsity_conv = initial_sparsity_conv,
initial_sparsity_fc = initial_sparsity_fc,sub_kernel_granularity = sub_kernel_granularity,sparse = sparse,**kwargs)
return model
| StarcoderdataPython |
5171766 | import sys
from framework_list import frameworks
from functions import log, run_command
log("Publishing CocoaPods")
for framework in frameworks:
log(f"Publishing {framework}")
# Most pods take a few minutes to build, and a few seconds to push to trunk. However, the
# AWSiOSSDK podspec can take a long time to build, since it builds each dependent pod as
# part of its linting process, so set the timeout accordingly.
(exit_code, out, err) = run_command(
["pod", "trunk", "push", f"{framework}.podspec", "--allow-warnings", "--synchronous"],
keepalive_interval=300,
timeout=3600,
)
if exit_code != 0 and "Unable to accept duplicate entry for" in str(out):
log(f"Already published {framework}")
elif exit_code == 0:
log(f"Published {framework}")
else:
log(f"Could not publish {framework}: output: {out}; error: {err}")
sys.exit(exit_code)
if framework == "AWSCore":
log(f"pod repo update after {framework}")
(exit_code, out, err) = run_command(
["pod", "repo", "update"],
keepalive_interval=300,
timeout=3600,
)
if exit_code != 0:
log(f"Failed to update CocoaPods repo'; output={out}, error={err}")
sys.exit(0)
| StarcoderdataPython |
3278624 | <gh_stars>1-10
word = 'tin'
print(word[0])
print(word[1])
print(word[2])
print(word[3])
| StarcoderdataPython |
12838123 | from django.db import models
from data_ocean.models import DataOceanModel
from location_register.models.koatuu_models import KoatuuCategory
class RatuRegion(DataOceanModel):
name = models.CharField('назва', max_length=30, unique=True)
koatuu = models.CharField('код КОАТУУ', max_length=10, unique=True, null=True)
class Meta:
verbose_name = 'регіон'
class RatuDistrict(DataOceanModel):
region = models.ForeignKey(RatuRegion, on_delete=models.CASCADE, verbose_name='регіон')
name = models.CharField('назва', max_length=100)
koatuu = models.CharField('код КОАТУУ', max_length=10, unique=True, null=True)
code = models.CharField('код', max_length=200)
class Meta:
verbose_name = 'район'
class RatuCity(DataOceanModel):
region = models.ForeignKey(RatuRegion, on_delete=models.CASCADE, verbose_name='регіон')
district = models.ForeignKey(RatuDistrict, on_delete=models.CASCADE, verbose_name='район',
null=True)
category = models.ForeignKey(KoatuuCategory, on_delete=models.CASCADE, null=True,
verbose_name='категорія населеного пункта')
name = models.CharField('назва', max_length=100)
koatuu = models.CharField('код КОАТУУ', max_length=10, unique=True, null=True)
code = models.CharField('код', max_length=200)
class Meta:
verbose_name = 'населенний пункт'
class RatuCityDistrict(DataOceanModel):
region = models.ForeignKey(RatuRegion, on_delete=models.CASCADE, verbose_name='регіон')
district = models.ForeignKey(RatuDistrict, on_delete=models.CASCADE, verbose_name='район',
null=True)
city = models.ForeignKey(RatuCity, on_delete=models.CASCADE,
verbose_name='населений пункт')
category = models.ForeignKey(KoatuuCategory, on_delete=models.CASCADE, null=True,
verbose_name='категорія населеного пункта')
name = models.CharField('назва', max_length=100)
koatuu = models.CharField('код КОАТУУ', max_length=10, unique=True, null=True)
code = models.CharField('', max_length=200)
class Meta:
verbose_name = 'район у місті'
class RatuStreet(DataOceanModel):
region = models.ForeignKey(RatuRegion, on_delete=models.CASCADE, verbose_name='регіон')
district = models.ForeignKey(RatuDistrict, on_delete=models.CASCADE, verbose_name='район',
null=True)
city = models.ForeignKey(RatuCity, on_delete=models.CASCADE,
verbose_name='населений пункт')
citydistrict = models.ForeignKey(RatuCityDistrict, on_delete=models.CASCADE, null=True,
verbose_name='район у місті')
name = models.CharField('назва', max_length=100)
code = models.CharField('код', max_length=200)
class Meta:
verbose_name = 'вулиця'
| StarcoderdataPython |
156431 | # dictionary, emoji convertion, split method
message = input(">")
words = message.split(' ')
print(words) # get a seperated words of the msg
emojis = {
":)": "😄",
":(": "😟"
}
output = ""
for word in words:
output += emojis.get(word, word) + " "
print(output)
| StarcoderdataPython |
182362 | <filename>src/flask_lucide/extension.py
"""Single File plugin for Lucide icons."""
import re
from dataclasses import dataclass
from flask import current_app, Flask
from io import StringIO
from markupsafe import Markup
from pathlib import Path
from typing import Optional, Any
from xml.dom import minidom
from .icons import i as icons
class Lucide(object):
"""Lucide Class object represents the plugin."""
def __init__(self, app: Flask, import_dir: Optional[Path] = None):
"""Initialize plugin and read extra files from import dir.
Args:
app (Flask): flask app
import_dir (Optional[Path]): potentiall allow a pathlob path
"""
if app is not None:
self.init_app(app)
if import_dir:
print(f"Attempting to import custom svg from {import_dir}")
files = import_dir.glob('**/*.svg')
for file in files:
icon_name = file.stem.replace('-', '_')
print(f"Parsing {icon_name}")
svg = file.read_text()
svg = re.sub(r'\n', r' ', svg)
svg = re.sub(r'\s+', r' ', svg)
svg = svg.replace('> <', '><').replace(' />', '/>')
svg = ('><').join(svg.split('><')[1:-1])
icons[icon_name] = svg
def init_app(self, app: Flask):
"""Initialize plugin.
Args:
app (Flask): flask entity
"""
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['lucide'] = _lucide
app.context_processor(self.context_processor)
@staticmethod
def context_processor() -> dict:
"""Set the extensions.
Returns:
dict: what to add to context of app.
"""
return {'lucide': current_app.extensions['lucide']}
class _lucide(object):
@staticmethod
def icon(icon_name: str, **kwargs: Any) -> Markup:
"""Attempt to render the icon icon_name with attributes as listed.
Args:
icon_name (str): The name of the lucide icon
**kwargs (Any): arguments to set for the icons.
Returns:
Markup: markup safe string of xml
"""
icon_name = icon_name.replace('-', '_')
if not icon_name:
return Markup('')
start = "<svg xmlns=\"http://www.w3.org/2000/svg\" width=\"24\""\
" height=\"24\" viewBox=\"0 0 24 24\" fill=\"none\" stroke="\
"\"currentColor\" stroke-width=\"2\" stroke-linecap=\"round\""\
" stroke-linejoin=\"round\" ><"
end = "></svg>"
svg = start + icons[icon_name] + end
doc = minidom.parseString(svg)
for attr, val in kwargs.items():
attr = attr.replace('_', '-')
doc.documentElement.setAttribute(attr, str(val))
writer = StringIO()
for node in doc.childNodes:
node.writexml(writer, "", "", "")
return Markup(writer.getvalue())
| StarcoderdataPython |
203118 | from setuptools import setup
def readme():
with open('README.rst') as f:
return f.read()
setup(name='pynoter',
version='0.1.7',
description='Powerpoint presentations into org or tex files',
long_description='Allows users to convert powerpoint presentations into raw text for editing in latex or org-mode',
classifiers=['Programming Language :: Python :: 2.7', 'Topic :: Text Processing :: Markup :: LaTeX'],
url='https://github.com/maxrousseau/pynoter',
author='<NAME>',
author_email='<EMAIL>',
test_suite='nose.collector',
tests_require=['nose'],
scripts=['bin/py-noter'],
include_package_data=True,
license='MIT',
packages=['pynoter'],
install_requires=['python-pptx'],
zip_safe=False)
| StarcoderdataPython |
1642939 | import collections
import datetime
import pytz
from django.contrib.auth.decorators import user_passes_test
from django.shortcuts import render
from standup.status.models import Status, Team, StandupUser
def require_superuser(fun):
def authorize_user(user):
return user.is_active and user.is_superuser
return user_passes_test(authorize_user)(fun)
@require_superuser
def errormenow_view(request):
# This is an intentional error designed to kick up the error page because
# otherwise it's difficult to test.
1 / 0 # noqa
@require_superuser
def statistics_view(request):
"""Show health statistics for the system
.. Note::
This is an "admin" view, so it uses Django templates.
"""
hours_24 = datetime.datetime.now(tz=pytz.UTC) - datetime.timedelta(hours=24)
week = datetime.datetime.now(tz=pytz.UTC) - datetime.timedelta(days=7)
groups = collections.OrderedDict()
groups['Standup users'] = collections.OrderedDict([
('Team count', Team.objects.count()),
('User count', StandupUser.objects.count()),
('New users in last 24 hours', StandupUser.objects.filter(user__date_joined__gte=hours_24).count()),
('Active users (posted in last week)',
StandupUser.objects.filter(id__in=Status.objects.filter(created__gte=week).values('user__id')).count()),
])
groups['Standup status'] = collections.OrderedDict([
('Status count', Status.objects.count()),
('Status in last 24 hours', Status.objects.filter(created__gte=hours_24).count()),
('Status in last week', Status.objects.filter(created__gte=week).count()),
])
context = {
'title': 'Site statistics',
'statsitems': groups
}
return render(request, 'admin/statistics.html', context)
| StarcoderdataPython |
3405670 | # flake8: noqa
import os
from pathlib import Path
from tempfile import TemporaryDirectory
from pytest import mark
import torch
from torch import nn
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.utils.data import DataLoader
from catalyst import dl
from catalyst.contrib.datasets import MNIST
from catalyst.contrib.layers import Lambda
from catalyst.settings import IS_CUDA_AVAILABLE, NUM_CUDA_DEVICES, SETTINGS
from tests import (
DATA_ROOT,
IS_CONFIGS_REQUIRED,
IS_CPU_REQUIRED,
IS_DDP_AMP_REQUIRED,
IS_DDP_REQUIRED,
IS_DP_AMP_REQUIRED,
IS_DP_REQUIRED,
IS_GPU_AMP_REQUIRED,
IS_GPU_REQUIRED,
)
from tests.misc import run_experiment_from_configs
class CustomRunner(dl.Runner):
def __init__(self, *args, latent_dim: int = 32, **kwargs):
super().__init__(*args, **kwargs)
self.latent_dim = latent_dim
def predict_batch(self, batch):
batch_size = 1
# Sample random points in the latent space
random_latent_vectors = torch.randn(batch_size, self.latent_dim).to(
self.engine.device
)
# Decode them to fake images
generated_images = self.model["generator"](random_latent_vectors).detach()
return generated_images
def handle_batch(self, batch):
real_images, _ = batch
batch_size = real_images.shape[0]
# Sample random points in the latent space
random_latent_vectors = torch.randn(batch_size, self.latent_dim).to(
self.engine.device
)
# Decode them to fake images
generated_images = self.model["generator"](random_latent_vectors).detach()
# Combine them with real images
combined_images = torch.cat([generated_images, real_images])
# Assemble labels discriminating real from fake images
labels = torch.cat(
[torch.ones((batch_size, 1)), torch.zeros((batch_size, 1))]
).to(self.engine.device)
# Add random noise to the labels - important trick!
labels += 0.05 * torch.rand(labels.shape).to(self.engine.device)
# Discriminator forward
combined_predictions = self.model["discriminator"](combined_images)
# Sample random points in the latent space
random_latent_vectors = torch.randn(batch_size, self.latent_dim).to(
self.engine.device
)
# Assemble labels that say "all real images"
misleading_labels = torch.zeros((batch_size, 1)).to(self.engine.device)
# Generator forward
generated_images = self.model["generator"](random_latent_vectors)
generated_predictions = self.model["discriminator"](generated_images)
self.batch = {
"combined_predictions": combined_predictions,
"labels": labels,
"generated_predictions": generated_predictions,
"misleading_labels": misleading_labels,
}
def _ddp_hack(x):
return x.view(x.size(0), 1, 28, 28)
def train_experiment(engine=None):
with TemporaryDirectory() as logdir:
# latent_dim = 128
# generator = nn.Sequential(
# # We want to generate 128 coefficients to reshape into a 7x7x128 map
# nn.Linear(128, 128 * 7 * 7),
# nn.LeakyReLU(0.2, inplace=True),
# Lambda(lambda x: x.view(x.size(0), 128, 7, 7)),
# nn.ConvTranspose2d(128, 128, (4, 4), stride=(2, 2), padding=1),
# nn.LeakyReLU(0.2, inplace=True),
# nn.ConvTranspose2d(128, 128, (4, 4), stride=(2, 2), padding=1),
# nn.LeakyReLU(0.2, inplace=True),
# nn.Conv2d(128, 1, (7, 7), padding=3),
# nn.Sigmoid(),
# )
# discriminator = nn.Sequential(
# nn.Conv2d(1, 64, (3, 3), stride=(2, 2), padding=1),
# nn.LeakyReLU(0.2, inplace=True),
# nn.Conv2d(64, 128, (3, 3), stride=(2, 2), padding=1),
# nn.LeakyReLU(0.2, inplace=True),
# GlobalMaxPool2d(),
# Flatten(),
# nn.Linear(128, 1),
# )
latent_dim = 32
generator = nn.Sequential(
nn.Linear(latent_dim, 28 * 28), Lambda(_ddp_hack), nn.Sigmoid()
)
discriminator = nn.Sequential(nn.Flatten(), nn.Linear(28 * 28, 1))
model = {"generator": generator, "discriminator": discriminator}
criterion = {
"generator": nn.BCEWithLogitsLoss(),
"discriminator": nn.BCEWithLogitsLoss(),
}
optimizer = {
"generator": torch.optim.Adam(
generator.parameters(), lr=0.0003, betas=(0.5, 0.999)
),
"discriminator": torch.optim.Adam(
discriminator.parameters(), lr=0.0003, betas=(0.5, 0.999)
),
}
loaders = {
"train": DataLoader(
MNIST(DATA_ROOT, train=False),
batch_size=32,
),
}
runner = CustomRunner(latent_dim=latent_dim)
runner.train(
engine=engine,
model=model,
criterion=criterion,
optimizer=optimizer,
loaders=loaders,
callbacks=[
dl.CriterionCallback(
input_key="combined_predictions",
target_key="labels",
metric_key="loss_discriminator",
criterion_key="discriminator",
),
dl.BackwardCallback(metric_key="loss_discriminator"),
dl.OptimizerCallback(
optimizer_key="discriminator",
metric_key="loss_discriminator",
),
dl.CriterionCallback(
input_key="generated_predictions",
target_key="misleading_labels",
metric_key="loss_generator",
criterion_key="generator",
),
dl.BackwardCallback(metric_key="loss_generator"),
dl.OptimizerCallback(
optimizer_key="generator",
metric_key="loss_generator",
),
],
valid_loader="train",
valid_metric="loss_generator",
minimize_valid_metric=True,
num_epochs=1,
verbose=False,
logdir=logdir,
)
if isinstance(engine, (dl.CPUEngine, dl.GPUEngine)) and not engine.is_ddp:
runner.predict_batch(None)[0, 0].cpu().numpy()
def train_experiment_from_configs(*auxiliary_configs: str):
run_experiment_from_configs(
Path(__file__).parent / "configs",
f"{Path(__file__).stem}.yml",
*auxiliary_configs,
)
# Device
@mark.skipif(not IS_CPU_REQUIRED, reason="CPU device is not available")
def test_run_on_cpu():
train_experiment(dl.CPUEngine())
@mark.skipif(
not IS_CONFIGS_REQUIRED or not IS_CPU_REQUIRED, reason="CPU device is not available"
)
def test_config_run_on_cpu():
train_experiment_from_configs("engine_cpu.yml")
@mark.skipif(
not all([IS_GPU_REQUIRED, IS_CUDA_AVAILABLE]), reason="CUDA device is not available"
)
def test_run_on_torch_cuda0():
train_experiment(dl.GPUEngine())
@mark.skipif(
not IS_CONFIGS_REQUIRED or not all([IS_GPU_REQUIRED, IS_CUDA_AVAILABLE]),
reason="CUDA device is not available",
)
def test_config_run_on_torch_cuda0():
train_experiment_from_configs("engine_gpu.yml")
@mark.skipif(
not all([IS_GPU_AMP_REQUIRED, IS_CUDA_AVAILABLE, SETTINGS.amp_required]),
reason="No CUDA or AMP found",
)
def test_run_on_amp():
train_experiment(dl.GPUEngine(fp16=True))
@mark.skipif(
not IS_CONFIGS_REQUIRED
or not all([IS_GPU_AMP_REQUIRED, IS_CUDA_AVAILABLE, SETTINGS.amp_required]),
reason="No CUDA or AMP found",
)
def test_config_run_on_amp():
train_experiment_from_configs("engine_gpu_amp.yml")
# DP
@mark.skipif(
not all([IS_DP_REQUIRED, IS_CUDA_AVAILABLE, NUM_CUDA_DEVICES >= 2]),
reason="No CUDA>=2 found",
)
def test_run_on_torch_dp():
train_experiment(dl.DataParallelEngine())
@mark.skipif(
not IS_CONFIGS_REQUIRED
or not all([IS_DP_REQUIRED, IS_CUDA_AVAILABLE, NUM_CUDA_DEVICES >= 2]),
reason="No CUDA>=2 found",
)
def test_config_run_on_torch_dp():
train_experiment_from_configs("engine_dp.yml")
@mark.skipif(
not all(
[
IS_DP_AMP_REQUIRED,
IS_CUDA_AVAILABLE,
NUM_CUDA_DEVICES >= 2,
SETTINGS.amp_required,
]
),
reason="No CUDA>=2 or AMP found",
)
def test_run_on_amp_dp():
train_experiment(dl.DataParallelEngine(fp16=True))
@mark.skipif(
not IS_CONFIGS_REQUIRED
or not all(
[
IS_DP_AMP_REQUIRED,
IS_CUDA_AVAILABLE,
NUM_CUDA_DEVICES >= 2,
SETTINGS.amp_required,
]
),
reason="No CUDA>=2 or AMP found",
)
def test_config_run_on_amp_dp():
train_experiment_from_configs("engine_dp_amp.yml")
# DDP
@mark.skipif(
not all([IS_DDP_REQUIRED, IS_CUDA_AVAILABLE, NUM_CUDA_DEVICES >= 2]),
reason="No CUDA>=2 found",
)
def test_run_on_torch_ddp():
train_experiment(dl.DistributedDataParallelEngine())
@mark.skipif(
not IS_CONFIGS_REQUIRED
or not all([IS_DDP_REQUIRED, IS_CUDA_AVAILABLE, NUM_CUDA_DEVICES >= 2]),
reason="No CUDA>=2 found",
)
def test_config_run_on_torch_ddp():
train_experiment_from_configs("engine_ddp.yml")
@mark.skipif(
not all(
[
IS_DDP_AMP_REQUIRED,
IS_CUDA_AVAILABLE,
NUM_CUDA_DEVICES >= 2,
SETTINGS.amp_required,
]
),
reason="No CUDA>=2 or AMP found",
)
def test_run_on_amp_ddp():
train_experiment(dl.DistributedDataParallelEngine(fp16=True))
@mark.skipif(
not IS_CONFIGS_REQUIRED
or not all(
[
IS_DDP_AMP_REQUIRED,
IS_CUDA_AVAILABLE,
NUM_CUDA_DEVICES >= 2,
SETTINGS.amp_required,
]
),
reason="No CUDA>=2 or AMP found",
)
def test_config_run_on_amp_ddp():
train_experiment_from_configs("engine_ddp_amp.yml")
def _train_fn(local_rank, world_size):
process_group_kwargs = {
"backend": "nccl",
"world_size": world_size,
}
os.environ["WORLD_SIZE"] = str(world_size)
os.environ["RANK"] = str(local_rank)
os.environ["LOCAL_RANK"] = str(local_rank)
dist.init_process_group(**process_group_kwargs)
train_experiment(dl.Engine())
dist.destroy_process_group()
@mark.skipif(
not all([IS_DDP_REQUIRED, IS_CUDA_AVAILABLE, NUM_CUDA_DEVICES >= 2]),
reason="No CUDA>=2 found",
)
def test_run_on_torch_ddp_spawn():
world_size: int = torch.cuda.device_count()
mp.spawn(
_train_fn,
args=(world_size,),
nprocs=world_size,
join=True,
)
def _train_fn_amp(local_rank, world_size):
process_group_kwargs = {
"backend": "nccl",
"world_size": world_size,
}
os.environ["WORLD_SIZE"] = str(world_size)
os.environ["RANK"] = str(local_rank)
os.environ["LOCAL_RANK"] = str(local_rank)
dist.init_process_group(**process_group_kwargs)
train_experiment(dl.Engine(fp16=True))
dist.destroy_process_group()
@mark.skipif(
not all(
[
IS_DDP_AMP_REQUIRED,
IS_CUDA_AVAILABLE,
NUM_CUDA_DEVICES >= 2,
SETTINGS.amp_required,
]
),
reason="No CUDA>=2 or AMP found",
)
def test_run_on_torch_ddp_amp_spawn():
world_size: int = torch.cuda.device_count()
mp.spawn(
_train_fn_amp,
args=(world_size,),
nprocs=world_size,
join=True,
)
| StarcoderdataPython |
6535803 | <gh_stars>1-10
#!/usr/bin/env python
# coding: utf-8
"""
Information about the frontend package of the widget.
"""
# module_name is the name of the NPM package for the widget
module_name = "@deck.gl/jupyter-widget"
# module_version is the current version of the module of the JS portion of the widget
module_version = "^7.3.0-beta.4"
| StarcoderdataPython |
3200404 | <reponame>LucaGuffanti/FCI<filename>Lab2/udpClient.py
from socket import *
# definisco i dati per la socket cioè l'indirizzo IP e la Porta
serverName = 'localhost' # 127.0.0.1
serverPort = 12001 # arbitrario tranne quelli standardizzati
# costruisco la socket
# AF_INET si riferisce al tipo di indirizzo IP
# SOCK_DGRAM si riferisce all'udilizzo di UDP
clientSocket = socket(AF_INET, SOCK_DGRAM)
tm = 10
clientSocket.settimeout(tm)
# richiesta stringa all'utente
message = input('Inserisci una Stringa: ')
# usiamo la socket appena creata per comunicare con il server
# usiamo il messaggio in binario e identifichiamo il destinatario come una tupla
clientSocket.sendto(message.encode('utf-8'), (serverName, serverPort))
# troviamo una soluzione per l'attesa infinita della risposta:
# possiamo impostare un timeout e generare un'exception
# attendiamo ora la ricezione del messaggio
# la funzione ritorna sia i dati che le generalità del mittente
# prende come argomento la lunghezza del buffer in entrata (in BYTE)
try:
modifiedMessage, serverAddress = clientSocket.recvfrom(2048)
# decodifichiamo dal binario e stampiamo il messaggio
modifiedMessage = modifiedMessage.decode('utf-8')
print(f'Messaggio ricevuto da: {serverAddress}')
print(modifiedMessage)
except:
print('Timeout Scaduto: Server non raggiungibile')
finally:
# chiudiamo la socket
clientSocket.close()
| StarcoderdataPython |
73671 | from abc import ABCMeta, abstractmethod
from typing import Callable, Iterable, List, TypeVar, Tuple
from ..universe import Universe
T = TypeVar('T')
BaseUniverseType = TypeVar('BaseUniverseType', bound='BaseUniverse[T]')
class BaseUniverse(Universe[T]):
"""
Represents a base class for universes of 'The Game of Life'.
The base universe represents a sparse grid that holds memory only for occuppied cells.
"""
__metaclass__ = ABCMeta
def __init__(self, width: int, height: int):
if width <= 0:
raise ValueError('width is zero or a negative number.')
if height <= 0:
raise ValueError('height is zero or a negative number.')
self._width = width
self._height = height
self._data = dict()
@property
def width(self) -> int:
"""Returns universe width."""
return self._width
@property
def height(self) -> int:
"""Returns universe height."""
return self._height
@abstractmethod
def adjust_position(self, x: int, y: int) -> Tuple[int, int]:
"""Returns the universe position."""
pass
@abstractmethod
def is_position_in_range(self, x: int, y: int) -> bool:
"""Indicates whether the specified position is within the universe boundaries."""
pass
def through(self) -> Tuple[int, int]:
"""Returns a new iterator that can iterate over universe positions."""
return ((x, y) for y in range(self.height)
for x in range(self.width))
def neighbours_of(self, x: int, y: int) -> Iterable[T]:
"""Returns a new iterator that can iterate over neighbours around the specified position."""
positions = [
(x - 1, y - 1), # NE
(x, y - 1), # N
(x + 1, y - 1), # NW
(x + 1, y), # W
(x + 1, y + 1), # SW
(x, y + 1), # S
(x - 1, y + 1), # SE
(x - 1, y) # E
]
return (self[position] for position in positions if self.is_position_in_range(*position))
def __copy__(self) -> BaseUniverseType:
"""Returns a shallow copy of the universe."""
copy = type(self)(self.width, self.height)
copy._data = self._data.copy()
return copy
def __getitem__(self, position: Tuple[int, int]) -> T:
"""Returns a value for the specified position using self[x, y]."""
adjusted_position = self.adjust_position(*position)
item = self._data.get(adjusted_position, None)
return item
def __setitem__(self, position: Tuple[int, int], value: T):
"""Sets the value for the specified position using self[x, y]."""
adjusted_position = self.adjust_position(*position)
if value is None:
self._data.pop(adjusted_position, None)
return
self._data[adjusted_position] = value
def __str__(self) -> str:
"""Returns a string representation of the universe."""
def to_str(i): return str(i or ' ')
rows = ((to_str(self[x, y]) for x in range(self.width))
for y in range(self.height))
result = '\n'.join((' '.join(row) for row in rows))
return result
def __eq__(self, other: 'BaseUniverse[T]') -> bool:
"""Indicates whether the universe equals to another universe."""
eq = self._data.items() == other._data.items()
return eq
@classmethod
def from_data(cls, data: List[List[T]], is_cell: Callable[[T], bool]=lambda cell: cell) -> BaseUniverseType:
"""
Creates a universe from a 2-deminsiomal list.
By default, create cells only for values which boolean is True.
"""
min_row = min(data, default=[], key=len)
universe = cls(len(min_row), len(data))
for x, y in universe.through():
universe[x, y] = data[y][x] if is_cell(data[y][x]) else None
return universe
@classmethod
def random(cls, width: int, height: int, get_random: Callable[[], T]) -> BaseUniverseType:
"""Creates a random universe of the specified dimensions."""
universe = cls(width, height)
for x, y in universe.through():
universe[x, y] = get_random()
return universe
| StarcoderdataPython |
33923 | import serialio
class Serial(object):
def __init__(self, port, baudrate, timeout):
self.port = port
self.baudrate = baudrate
self.timeout = timeout
self._openPort()
def _openPort(self):
self.hComm = serialio.Serial(self.port, self.baudrate) # Opening the port
def read(self):
data = serialio.read(self.hComm) # Listening to serial port
splited = data.split() # To remove \r\n(\n)
return splited[0] # Returning the data
ser = Serial("COM3", 9600, 1)
ser.read() | StarcoderdataPython |
8117082 | <filename>irispreppy/psf/deconvolve.py<gh_stars>1-10
import concurrent.futures
import pickle
from copy import deepcopy as dc
from glob import glob as ls
from os import cpu_count as cpus
from os import path
import numpy as np
import scipy.stats as scist
from astropy.io import fits
from tqdm import tqdm
from . import IRIS_SG_deconvolve as isd
#There are two functions here
def ParDecon(rasfits, psfs, save=False):
'''Function acts as a wrapper around IRIS_SG_deconvolve
Input Paramteres:
rasfits: The hdu to be deconvolved
psfs: The point spread functions in a dictionary
save: If True: Save the files with d appended
If False: Return the deconvolved hdus
Output:
If save=False: Deconcolved hdu
If save=True: 0
'''
hdr0=dc(rasfits[0].header)
nlines=rasfits[0].header['NEXP']
indices={rasfits[0].header[name]: ind+1 for ind, name in enumerate(rasfits[0].header['TDESC*'])}
decondict={}
hdrdict={}
for key in indices:
decondict[key]=np.zeros_like(rasfits[indices[key]].data)
psfind=rasfits[0].header['TDET'+str(indices[key])]
hdrdict[key]=dc(rasfits[indices[key]].header)
for j in range(0, nlines):
decondict[key][j]=isd.IRIS_SG_deconvolve(rasfits[indices[key]].data[j], psf=psfs[psfind], fft_div=True)
hdr0['TDMEAN'+str(indices[key])]=np.mean(decondict[key])
hdr0['TDRMS'+str(indices[key])]=np.sqrt(np.sum((decondict[key]-np.mean(decondict[key]))**2)/decondict[key].size)
hdr0['TDMEDN'+str(indices[key])]=np.median(decondict[key])
hdr0['TDMIN'+str(indices[key])]=np.min(decondict[key])
hdr0['TDMAX'+str(indices[key])]=np.max(decondict[key])
hdr0['TDSKEW'+str(indices[key])]=scist.skew(decondict[key], axis=None)
hdr0['TDKURT'+str(indices[key])]=scist.kurtosis(decondict[key], axis=None)
flatdat=np.sort(decondict[key].flatten())
hdr0['TDP01_'+str(indices[key])]=flatdat[int(np.round(len(flatdat)*0.01))]
hdr0['TDP10_'+str(indices[key])]=flatdat[int(np.round(len(flatdat)*0.1))]
hdr0['TDP25_'+str(indices[key])]=flatdat[int(np.round(len(flatdat)*0.25))]
hdr0['TDP75_'+str(indices[key])]=flatdat[int(np.round(len(flatdat)*0.75))]
hdr0['TDP90_'+str(indices[key])]=flatdat[int(np.round(len(flatdat)*0.9))]
hdr0['TDP95_'+str(indices[key])]=flatdat[int(np.round(len(flatdat)*0.95))]
hdr0['TDP98_'+str(indices[key])]=flatdat[int(np.round(len(flatdat)*0.98))]
hdr0['TDP99_'+str(indices[key])]=flatdat[int(np.round(len(flatdat)*0.99))]
del flatdat
for ind, key in enumerate(decondict):
if ind==0:
dattot=decondict[key] #Needed for header stuff. (DATa TOTal)
else:
dattot=np.concatenate((dattot, decondict[key]), axis=2)
hdr0['DATAMEAN']=np.mean(dattot)
hdr0['DATARMS']=np.sqrt(np.sum((dattot-np.mean(dattot))**2)/dattot.size)
hdr0['DATAMEDN']=np.median(dattot)
hdr0['DATAMIN']=np.min(dattot)
hdr0['DATAMAX']=np.max(dattot)
hdr0['DATASKEW']=scist.skew(dattot, axis=None)
hdr0['DATAKURT']=scist.kurtosis(dattot, axis=None)
flatdattot=np.sort(dattot.flatten())
hdr0['DATAP01']=flatdattot[int(np.round(len(flatdattot)*0.01))]
hdr0['DATAP10']=flatdattot[int(np.round(len(flatdattot)*0.1))]
hdr0['DATAP25']=flatdattot[int(np.round(len(flatdattot)*0.25))]
hdr0['DATAP75']=flatdattot[int(np.round(len(flatdattot)*0.75))]
hdr0['DATAP90']=flatdattot[int(np.round(len(flatdattot)*0.9))]
hdr0['DATAP95']=flatdattot[int(np.round(len(flatdattot)*0.95))]
hdr0['DATAP98']=flatdattot[int(np.round(len(flatdattot)*0.98))]
hdr0['DATAP99']=flatdattot[int(np.round(len(flatdattot)*0.99))]
del dattot, flatdattot #I imagine these are large, so delete them after they are no longer needed
phdu=fits.PrimaryHDU(None, header=hdr0)
hduls=[phdu]
for key in indices:
hduls.append(fits.ImageHDU(decondict[key], header=hdrdict[key]))
hdul=fits.HDUList(hduls)
if save:
hdul.writeto(rasfits.filename()[:-5]+'d.fits')
return(0)
else:
return(hdul)
def deconvolve(ras, quiet=False, save=False, limitcores=False):
'''Function prepares input to ParDecon
Input Paramteres:
ras: String, list, or astropy.io.fits.hdu.hdulist.HDUList (hdu)
String: Path to IRIS spectrograph file
Path to IRIS files using wildcard (ie, /path/to/files/*fits) of same observation
List: List of paths to spectrograph file of same observation
List of hdus from same observation
hdu : An IRIS observation
quiet: If True, suppress all print statements
save: If True: Save the files with d appended
If False: Return the deconvolved hdus
limitcores: If True: use all but one core. If False use all cores.
Output:
If save=False: Deconcolved hdu(s).
If save=True: 0
'''
nworkers=cpus()-int(limitcores)
pathlistin=False
hdulistin=False
if type(ras)==fits.hdu.hdulist.HDUList:
assert ras[0].header['TELESCOP']=='IRIS'
rasfits=dc(ras)
elif '*' in ras:
ras=ls(rass)
ras.sort()
assert fits.open(ras[0]).header['TELESCOP']=='IRIS'
pathlistin=True
elif type(ras)==str:
try:
rasfits=fits.open(ras)
assert rasfits[0].header['TELESCOP']=='IRIS'
except NameError:
raise ValueError("Must supply fits file or path to fits file or * directory for one set of observations")
elif type(ras)==list:
if type(ras[0])==fits.hdu.hdulist.HDUList:
assert ras[0].header['TELESCOP']=='IRIS'
hdulistin=True
else:
try:
assert fits.open(ras[0])[0].header['TELESCOP']=='IRIS'
pathlistin=True
except NameError:
raise ValueError("Must supply fits file or * directory for one set of observations")
else:
raise ValueError("Must supply fits file or * directory for one set of observations")
toppath=path.dirname(path.realpath(__file__))
with open(toppath+'/IRIS_SG_PSFs.pkl', 'rb') as psfpkl:
psfsin=pickle.load(psfpkl)
psfs={'FUV1':psfsin['sg_psf_1336'], 'FUV2':psfsin['sg_psf_1394'], 'NUV':psfsin['sg_psf_2796']}
if pathlistin:
with concurrent.futures.ProcessPoolExecutor(workers=nworkers) as executor:
futures=[executor.submit(ParDecon, rasfits=fits.open(ras[i]), psfs=psfs, save=save) for i in range(0, len(ras))]
for f in tqdm(concurrent.futures.as_completed(futures), total=len(rasdirec), disable=quiet):
pass
out=[f for f in futures]
elif hdulistin:
with concurrent.futures.ProcessPoolExecutor(workers=nworkers) as executor:
futures=[executor.submit(ParDecon, rasfits=ras[i], psfs=psfs, save=save) for i in range(0, len(ras))]
for f in tqdm(concurrent.futures.as_completed(futures), total=len(rasdirec), disable=quiet):
pass
out=[f for f in futures]
else:
out=ParDecon(rasfits=ras, psfs=psfs, save=save)
if not save:
return(out)
else:
return(0)
| StarcoderdataPython |
8124035 | <gh_stars>1-10
from scapy.all import *
import sqlite3
import sys
from pprint import pprint
GSM_PACKET_QUERY = '''SELECT * FROM GSMPacket'''
if len(sys.argv) != 3:
print("Usage: python packetdumper.py <db-path> <pcap-dir>")
sys.exit(-1)
sqlitedb = sys.argv[1]
pcapdir = sys.argv[2]
conn = sqlite3.connect(sqlitedb)
'''
struct gsmtap_hdr {
uint8_t version; /* version, set to 0x01 currently */
uint8_t hdr_len; /* length in number of 32bit words */
uint8_t type; /* see GSMTAP_TYPE_* */
uint8_t timeslot; /* timeslot (0..7 on Um) */
uint16_t arfcn; /* ARFCN (frequency) */
int8_t signal_dbm; /* signal level in dBm */
int8_t snr_db; /* signal/noise ratio in dB */
uint32_t frame_number; /* GSM Frame Number (FN) */
uint8_t sub_type; /* Type of burst/channel, see above */
uint8_t antenna_nr; /* Antenna Number */
uint8_t sub_slot; /* sub-slot within timeslot */
uint8_t res; /* reserved for future use (RFU) */
}
'''
def construct_gsmtap_packet(pac_data):
packet_bytes = bytearray()
# Version (uint8)
packet_bytes += bytearray(b'\x02')
# Header len (uint8)
packet_bytes += bytearray(b'\x04')
# Type (uint8)
tmpbytes = pac_data['type'].to_bytes(1, 'big')
packet_bytes += tmpbytes
# timeslot (uint8)
tmpbytes = pac_data['timeslot'].to_bytes(1, 'big')
packet_bytes += tmpbytes
# arfcn (uint16)
tmpbytes = pac_data['arfcn'].to_bytes(2, 'big')
packet_bytes += tmpbytes
# signal_dbm (int8)
tmpbytes = (pac_data['dbm'] & 0xff).to_bytes(1, 'big')
packet_bytes += tmpbytes
# snr (int8)
packet_bytes += bytearray(b'\x00')
# frame_number (uint32)
tmpbytes = pac_data['frameNo'].to_bytes(4, 'big')
packet_bytes += tmpbytes
# sub_type (uint8)
tmpbytes = pac_data['subtype'].to_bytes(1, 'big')
packet_bytes += tmpbytes
# antenna_nr (uint8)
packet_bytes += bytearray(b'\x00')
# sub_slot (uint8)
packet_bytes += bytearray(b'\x00')
# res (uint8)
packet_bytes += bytearray(b'\x00')
# Add the 23 byte payload
packet_bytes += pac_data['payload']
return bytes(packet_bytes)
with conn:
cur = conn.cursor()
packet_query = cur.execute(GSM_PACKET_QUERY)
gsmtap_pacs = []
for raw_pac in packet_query:
pac_data = {}
# This is hardcoded for now beware if the columns change
pac_data['type'] = raw_pac[1]
pac_data['subtype'] = raw_pac[2]
pac_data['timeslot'] = raw_pac[3]
pac_data['frameNo'] = raw_pac[4]
pac_data['payload'] = raw_pac[5]
pac_data['timestamp'] = raw_pac[6]
pac_data['band'] = raw_pac[7]
pac_data['arfcn'] = raw_pac[8]
pac_data['dbm'] = raw_pac[9]
gsmtap_payload = construct_gsmtap_packet(pac_data)
gsmtap_pac = IP(dst="127.0.0.1")/UDP(sport=12345, dport=4729)/Raw(gsmtap_payload)
gsmtap_pac.time = pac_data['timestamp'] / 1000
gsmtap_pacs.append(gsmtap_pac)
wrpcap(pcapdir, gsmtap_pacs)
| StarcoderdataPython |
6691188 | <filename>webapp/migrations/versions/8ae9b5ddadf6_add_audit_logs.py
"""add audit logs
Revision ID: <KEY>
Revises: <PASSWORD>
Create Date: 2021-04-22 12:06:35.642688
"""
from alembic import op
import sqlalchemy as sa
from app import app
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = 'e<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
op.create_table('audit_log',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.TIMESTAMP(), nullable=False),
sa.Column('log_data', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
if app.config['ENV'] in ('staging', 'production'):
# only INSERT allowed
op.execute("""
GRANT INSERT ON TABLE "audit_log" TO steuerlotse;
GRANT ALL ON SEQUENCE audit_log_id_seq TO steuerlotse;
""")
def downgrade():
op.drop_table('audit_log')
| StarcoderdataPython |
4952643 | from opencdms.models.climsoft.v4_1_1_core import Base
from apps.climsoft.db import engine
def migrate():
Base.metadata.create_all(engine.db_engine)
| StarcoderdataPython |
3542633 | # import the time module
import time
import pygame
# define the countdown func.
def countdown(t):
while t:
#divmod functions return quotient and remainder
mins, secs = divmod(t, 60)
#:02d repesents that minutes and seconds will be represented in 2 digits
timer = '{:02d}:{:02d}'.format(mins, secs)
pygame.mixer.init()
pygame.mixer.music.load('clock.ogg')#any mp3/ogg file
pygame.mixer.music.play()
#while pygame.mixer.music.get_busy():
pygame.time.Clock().tick()
print(timer, end="\r")
time.sleep(1)
t -= 1
print('Time\'s up!!!')
# input time in seconds
t = input("For how many seconds do you want to set the timer? ")
# function call
countdown(int(t))
| StarcoderdataPython |
3377583 | #使用多线程:在携程中集成阻塞io
import asyncio
from concurrent.futures import ThreadPoolExecutor
import socket
from urllib.parse import urlparse
def get_url(url):
#通过socket请求html
url = urlparse(url)
host = url.netloc
path = url.path
if path == "":
path = "/"
#建立socket连接
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# client.setblocking(False)
client.connect((host, 80)) #阻塞不会消耗cpu
#不停的询问连接是否建立好, 需要while循环不停的去检查状态
#做计算任务或者再次发起其他的连接请求
client.send("GET {} HTTP/1.1\r\nHost:{}\r\nConnection:close\r\n\r\n".format(path, host).encode("utf8"))
data = b""
while True:
d = client.recv(1024)
if d:
data += d
else:
break
data = data.decode("utf8")
html_data = data.split("\r\n\r\n")[1]
print(html_data)
client.close()
if __name__ == "__main__":
import time
start_time = time.time()
loop = asyncio.get_event_loop()
executor = ThreadPoolExecutor(3)
tasks = []
for url in range(20):
url = "http://shop.projectsedu.com/goods/{}/".format(url)
task = loop.run_in_executor(executor, get_url, url)
tasks.append(task)
loop.run_until_complete(asyncio.wait(tasks))
print("last time:{}".format(time.time()-start_time))
| StarcoderdataPython |
6675886 | <filename>lostanimals/lostpet/admin.py
from django.contrib import admin
from django_google_maps import widgets as map_widgets
from django_google_maps import fields as map_fields
from lostpet.models import Pet
# Register your models here.
class PetAdmin(admin.ModelAdmin):
formfield_overrides = {
map_fields.AddressField: {'widget': map_widgets.GoogleMapsAddressWidget},
}
admin.site.register(Pet, PetAdmin)
| StarcoderdataPython |
4824349 | from django.db import models
class User(models.Model):
username = models.CharField(max_length=100)
email = models.EmailField()
groups = models.ManyToManyField('Group')
ordering = models.IntegerField(default=0)
class Meta:
ordering = ('ordering',)
class Group(models.Model):
name = models.CharField(max_length=100)
class NonIntegerPk(models.Model):
name = models.CharField(primary_key=True, max_length=10)
relation = models.ForeignKey('self', null=True, blank=True)
noise = models.ForeignKey('basic.FkModel', null=True, blank=True)
for_inline = models.ForeignKey('self', null=True, blank=True,
related_name='inline')
class CustomIntegerPk(models.Model):
id = models.IntegerField(primary_key=True)
class SubGroup(Group):
pass
class CustomSchema(models.Model):
name = models.CharField(primary_key=True, max_length=10, db_column='bar')
class Meta:
db_table = 'foobar'
class Caps(models.Model):
id = models.IntegerField(primary_key=True, db_column='Id')
name = models.CharField(max_length=10, db_column='Bar')
class Meta:
db_table = 'Caps'
| StarcoderdataPython |
6564985 | <gh_stars>1-10
from django.contrib.gis.db import models
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from ambulance.models import Location, LocationType
from equipment.models import EquipmentHolder
from environs import Env
env = Env()
# Hospital model
class Hospital(Location):
equipmentholder = models.OneToOneField(EquipmentHolder,
on_delete=models.CASCADE,
verbose_name=_('equipmentholder'))
# active
active = models.BooleanField(_('active'), default=True)
def save(self, *args, **kwargs):
# creation?
created = self.pk is None
# create equipment holder?
try:
if created or self.equipmentholder is None:
self.equipmentholder = EquipmentHolder.objects.create()
except EquipmentHolder.DoesNotExist:
self.equipmentholder = EquipmentHolder.objects.create()
# enforce type hospital
self.type = LocationType.h.name
# save to Hospital
super().save(*args, **kwargs)
if env.bool("DJANGO_ENABLE_MQTT_PUBLISH", default=True):
# publish to mqtt
from mqtt.publish import SingletonPublishClient
SingletonPublishClient().publish_hospital(self)
# just created?
if created:
# invalidate permissions cache
from mqtt.cache_clear import mqtt_cache_clear
mqtt_cache_clear()
def delete(self, *args, **kwargs):
# invalidate permissions cache
from mqtt.cache_clear import mqtt_cache_clear
mqtt_cache_clear()
# delete from Hospital
super().delete(*args, **kwargs)
def get_absolute_url(self):
return reverse('hospital:detail', kwargs={'pk': self.id})
def __str__(self):
return ('Hospital {}(id={})\n' +
' Comment: {}\n' +
' Updated: {} by {}').format(self.name,
self.id,
self.comment,
self.updated_by,
self.updated_on)
| StarcoderdataPython |
5000196 | from . import config_links, packages
| StarcoderdataPython |
230194 | <reponame>a76yyyy/ipdata
import os
data_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__))+os.path.sep+"data")
tmp_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__))+os.path.sep+"tmp")
DEFAULT_FILE_LOCATION = os.path.join(data_dir,'ipv6wry.db')
sql_file = os.path.abspath(data_dir+os.path.sep+"ipdatabase.sql")
table_college_info_sql_file = os.path.abspath(data_dir+os.path.sep+"college_info.sql")
table_iprange_info_sql_file = os.path.abspath(data_dir+os.path.sep+"iprange_info.sql")
table_ipv6_range_info_sql_file = os.path.abspath(data_dir+os.path.sep+"ipv6_range_info.sql")
__all__ = ['collegeUpdate', 'convert','dat2mysql','dat2sqlite3','dat2txt','file_set','ip_Sync','ipSearch','ipUpdate','ipv6Update'] | StarcoderdataPython |
1650967 | from .loss import MemReplayLoss
| StarcoderdataPython |
4896547 | <filename>wandb/sdk/interface/interface_sock.py
"""InterfaceSock - Derived from InterfaceShared using a socket to send to internal thread
See interface.py for how interface classes relate to each other.
"""
import logging
from typing import Any, Optional
from typing import TYPE_CHECKING
from .interface_shared import InterfaceShared
from .message_future import MessageFuture
from .router_sock import MessageSockRouter
from ..lib.sock_client import SockClient
if TYPE_CHECKING:
from wandb.proto import wandb_internal_pb2 as pb
from ..wandb_run import Run
logger = logging.getLogger("wandb")
class InterfaceSock(InterfaceShared):
_stream_id: Optional[str]
_sock_client: SockClient
def __init__(self, sock_client: SockClient) -> None:
# _sock_client is used when abstract method _init_router() is called by constructor
self._sock_client = sock_client
super().__init__()
self._process_check = False
self._stream_id = None
def _init_router(self) -> None:
self._router = MessageSockRouter(self._sock_client)
def _hack_set_run(self, run: "Run") -> None:
super(InterfaceSock, self)._hack_set_run(run)
assert run._run_id
self._stream_id = run._run_id
def _assign(self, record: Any) -> None:
assert self._stream_id
record._info.stream_id = self._stream_id
def _publish(self, record: "pb.Record", local: bool = None) -> None:
self._assign(record)
self._sock_client.send_record_publish(record)
def _communicate_async(self, rec: "pb.Record", local: bool = None) -> MessageFuture:
self._assign(rec)
assert self._router
if self._process_check and self._process and not self._process.is_alive():
raise Exception("The wandb backend process has shutdown")
future = self._router.send_and_receive(rec, local=local)
return future
def _communicate_stop_status(
self, status: "pb.StopStatusRequest"
) -> Optional["pb.StopStatusResponse"]:
# Message stop_status is called from a daemon thread started by wandb_run
# The underlying socket might go away while the thread is still running.
# Handle this like a timedout message as the daemon thread will eventually
# be killed.
try:
data = super()._communicate_stop_status(status)
except BrokenPipeError:
data = None
return data
def _communicate_network_status(
self, status: "pb.NetworkStatusRequest"
) -> Optional["pb.NetworkStatusResponse"]:
# Message network_status is called from a daemon thread started by wandb_run
# The underlying socket might go away while the thread is still running.
# Handle this like a timedout message as the daemon thread will eventually
# be killed.
try:
data = super()._communicate_network_status(status)
except BrokenPipeError:
data = None
return data
| StarcoderdataPython |
6594702 | <gh_stars>10-100
"""
bidsUtils.py
============================
Description:
This file is used for formatting pipline data into the BIDS format
Author:
<NAME>
Usage: N/A not a command line script (no bang above)
"""
from os.path import join
class BAWBIDSFormatter(object):
def __init__(self):
self.sub_kw = "sub"
self.ses_kw = "ses"
self.REQUIRED_FIELDS = [self.ses_kw, self.sub_kw]
def get_bids_name(
self, subject_data: dict, full_path: str = None, ext: str = None
) -> str:
"""
:param subject_data: a dictionary of information about the subject including subject id and session id
:param full_path: a string representing the path to join the bids name to
:param ext: an optional file extension parameter
:return: a formatted string containing all of the subject information as a file name
"""
# input validation (subject and session must be given)
if self.sub_kw not in subject_data.keys():
raise KeyError(
"Subject must have a key of '{kw}' in order to generate a meaningful filename.".format(
kw=self.sub_kw
)
)
if self.ses_kw not in subject_data.keys():
raise KeyError(
"Session must have a key of '{kw}' in order to generate a meaningful filename.".format(
kw=self.ses_kw
)
)
# build the bids name
bids_name = "{subkw}-{subject}_{seskw}-{session}".format(
subkw=self.sub_kw,
subject=subject_data[self.sub_kw],
seskw=self.ses_kw,
session=subject_data[self.ses_kw],
)
for k in sorted(subject_data.keys()):
if k not in self.REQUIRED_FIELDS:
bids_name = "{base}_{key}-{value}".format(
base=bids_name, key=k, value=subject_data[k]
)
# add the extension if defined
if ext:
bids_name = "{base}.{ext}".format(base=bids_name, ext=ext)
# add full path if defined
if full_path:
bids_name = join(full_path, bids_name)
return bids_name
| StarcoderdataPython |
4961818 | <gh_stars>0
from simple import func1
from whatever import func2
from world import func3
| StarcoderdataPython |
3475698 | <filename>Chapter01/file.py
f = open('test.txt', 'w')
f.write('first line of file \n')
f.write('second line of file \n')
f.close()
f = open('test.txt')
content = f.read()
print(content)
f.close()
| StarcoderdataPython |
1962025 | from util.enums import RES
TILE_H = 32
TILE_W = 32
DEFAULT_FONT = RES + "fonts/FiraSans-Light.ttf"
DEFAULT_FONT_SIZE = 24
HEART_LOCS = [[(770, 5), (805, 5), (840, 5), (875, 5)],
[(770, 39), (805, 39), (840, 39), (875, 39)]]
TOWER_LOCS = [(781, 99), (781, 157), (781, 217),
(846, 99), (846, 157), (846, 217)]
UPGRADE_LOCS = [(336, 587), (551, 587)]
GEAR_LOC = (10, 634)
BUNCH_LOC = (10, 584)
| StarcoderdataPython |
5053844 | import itertools as itt
from typing import List, Optional, Sequence
from rl_rpsr.linalg import cross_sum
from rl_rpsr.pruning import inc_prune, purge
from rl_rpsr.util import VI_Type
from rl_rpsr.value_function import Alpha, ValueFunction
from rl_rpsr.value_iteration import VI_Algo
from .model import RPSR_Model
__all__ = ['vi_factory', 'VI_Enum', 'VI_IncPruning']
def _bootstrap(model: RPSR_Model, action, observation, vector):
return model.discount * model.M_aoI[action, observation] @ vector
def _make_alpha(
model: RPSR_Model, action, next_alphas: Optional[Sequence[Alpha]] = None
) -> Alpha:
vector = model.R[:, action].copy()
if next_alphas is not None:
for observation, alpha in enumerate(next_alphas):
vector += _bootstrap(model, action, observation, alpha.vector)
return Alpha(action, vector)
def vi_factory(vi_type: VI_Type) -> VI_Algo:
if vi_type == VI_Type.ENUM:
return VI_Enum()
if vi_type == VI_Type.INC_PRUNING:
return VI_IncPruning(true_inc_pruning=False)
if vi_type == VI_Type.TRUE_INC_PRUNING:
return VI_IncPruning(true_inc_pruning=True)
raise ValueError(f'No implementation for VI type {vi_type}')
class VI_Enum(VI_Algo):
def iterate(
self, model: RPSR_Model, vf: ValueFunction, **kwargs
) -> ValueFunction:
alphas = vf.alphas
alphas = [
_make_alpha(model, a, next_alphas)
for a in range(model.action_space.n)
for next_alphas in itt.product(
alphas, repeat=model.observation_space.n
)
]
alphas = purge(
alphas, model.V, key=lambda alpha: alpha.vector, **kwargs
)
return ValueFunction(alphas, vf.horizon + 1)
class VI_IncPruning(VI_Algo):
def __init__(self, true_inc_pruning=True):
super().__init__()
self.true_inc_pruning = true_inc_pruning
def iterate(
self, model: RPSR_Model, vf: ValueFunction, **kwargs
) -> ValueFunction:
alphas = vf.alphas
S: List[Alpha] = []
for a in range(model.action_space.n):
R_over_O = model.R[:, a] / model.observation_space.n
S_ao = []
for o in range(model.observation_space.n):
vectors = [
R_over_O + _bootstrap(model, a, o, alpha.vector)
for alpha in alphas
]
self.logger.debug('purging S_ao a=%d o=%d', a, o)
vectors = purge(vectors, model.V, **kwargs)
S_ao.append(vectors)
if self.true_inc_pruning:
S_a = inc_prune(S_ao, model.V, **kwargs)
else:
self.logger.debug('cross_sum S_a a=%d', a)
S_a = cross_sum(S_ao)
self.logger.debug('purging S_a a=%d', a)
S_a = purge(S_a, model.V, **kwargs)
S.extend(Alpha(a, vector) for vector in S_a)
self.logger.debug('purging S')
alphas = purge(S, model.V, key=lambda alpha: alpha.vector, **kwargs)
return ValueFunction(alphas, vf.horizon + 1)
| StarcoderdataPython |
3257256 | <reponame>similarweb/gru
from string import Template
import simpleldap
from gru.plugins.base.auth import AuthenticationBackend, User
from gru.config import settings
class LdapBackend(AuthenticationBackend):
"""
LDAP authentication backend.
expects the following configuration in the inventory.yaml file
under authentication.config:
- server (string) - server IP or dns name
- port (int) - port number
- bind_user (string) - initial user name to connect to LDAP server
- bind_password(string) - password for the initial user
- user_query (string) - the LDAP query to make when searching for the user.
Should contain a $username placeholder.
"""
required_settings = [
'authentication.config.server',
'authentication.config.port',
'authentication.config.bind_user',
'authentication.config.bind_password',
'authentication.config.user_query'
]
@staticmethod
def _split_ldap_spec(ldap_spec):
return { k.split('=')[0]:k.split('=')[1] for k in ldap_spec.split(',') }
def authenticate(self, username, password):
server = settings.get('authentication.config.server')
port = settings.get('authentication.config.port')
bind_user = settings.get('authentication.config.bind_user')
bind_password = settings.get('authentication.config.bind_password')
query = Template(settings.get('authentication.config.user_query'))
with simpleldap.Connection(server, port, bind_user, bind_password) as conn:
try:
user = conn.get(query.substitute(username=username))
except simpleldap.ObjectNotFound:
return None
with simpleldap.Connection(server, port) as conn:
if conn.authenticate(user.dn, password):
return User(
username=username,
name=user.first('cn'),
groups=[self._split_ldap_spec(x)['CN'] for x in user.get('memberof', [])]
)
return None
| StarcoderdataPython |
3472765 | <gh_stars>10-100
"""
Readability OAuth1 backend, docs at:
http://psa.matiasaguirre.net/docs/backends/readability.html
"""
from social.backends.oauth import BaseOAuth1
READABILITY_API = 'https://www.readability.com/api/rest/v1'
class ReadabilityOAuth(BaseOAuth1):
"""Readability OAuth authentication backend"""
name = 'readability'
ID_KEY = 'username'
AUTHORIZATION_URL = '{0}/oauth/authorize/'.format(READABILITY_API)
REQUEST_TOKEN_URL = '{0}/oauth/request_token/'.format(READABILITY_API)
ACCESS_TOKEN_URL = '{0}/oauth/access_token/'.format(READABILITY_API)
EXTRA_DATA = [('date_joined', 'date_joined'),
('kindle_email_address', 'kindle_email_address'),
('avatar_url', 'avatar_url'),
('email_into_address', 'email_into_address')]
def get_user_details(self, response):
fullname, first_name, last_name = self.get_user_names(
first_name=response['first_name'],
last_name=response['last_name']
)
return {'username': response['username'],
'fullname': fullname,
'first_name': first_name,
'last_name': last_name}
def user_data(self, access_token):
return self.get_json(READABILITY_API + '/users/_current',
auth=self.oauth_auth(access_token))
| StarcoderdataPython |
12850213 | <gh_stars>1-10
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Built and managed with Open Source Love by BeeHyv Software Solutions Pvt Ltd. Hyderabad
# www.beehyv.com
import uuid
from django.http import HttpResponse, JsonResponse
from rest_framework import status
from communications.CommunicationHandler import CommunicationHandler
from otpvalidation.models import Otp
from survey.models import SurveyInstance
def send_otp(request):
try:
# id send otp based on survey id
otpValue = str(uuid.uuid4()).replace('-', '')[:6]
entity = Otp()
entity.otp = otpValue
instance = SurveyInstance.objects.filter(pk=request.GET['survey_id']).first()
worker = instance.health_worker_id
if worker.whatsapp_number and len(worker.whatsapp_number) > 0:
entity.phone_number = worker.whatsapp_number
else:
entity.phone_number = worker.phone_number
entity.email = worker.email
commHandler = CommunicationHandler()
commHandler.send_message(worker, 5, {'otp': entity.otp})
entity.save()
except Exception as e:
return HttpResponse(e, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
response = {}
response['name'] = instance.health_worker_id.first_name
response['consent'] = instance.health_worker_id.is_consented
response['email'] = entity.email
response['phone_number'] = entity.phone_number
return JsonResponse(response)
def resend_otp(request):
try:
instance = SurveyInstance.objects.filter(pk=request.GET['survey_id']).first()
worker = instance.health_worker_id
entity = Otp.objects.filter(phone_number=worker.phone_number).order_by('-id').first()
if entity:
commHandler = CommunicationHandler()
commHandler.send_message(worker, 5, {'otp': entity.otp})
except Exception as e:
return HttpResponse(e, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
response = {}
response['name'] = worker.first_name
response['consent'] = instance.health_worker_id.is_consented
response['email'] = entity.email
response['phone_number'] = entity.phone_number
return JsonResponse(response)
def verify_otp(request):
otpValue = request.GET['otp']
instance = SurveyInstance.objects.filter(pk=request.GET['survey_id']).first()
worker = instance.health_worker_id
response = {}
entity = Otp.objects.filter(phone_number=worker.whatsapp_number, otp=otpValue).first()
if not entity:
entity = Otp.objects.filter(phone_number=worker.phone_number, otp=otpValue).first()
if entity:
worker.is_consented = True
worker.save()
entity.delete()
response['verified'] = True
else:
response['verified'] = False
return JsonResponse(response) | StarcoderdataPython |
5014719 | <gh_stars>0
# _*_ coding: utf-8 _*_
from aip import AipOcr
import wda
import cv2
import webbrowser
import time
import datetime
from urllib import parse
import numpy as np
import requests
# """ 你的 APPID AK SK """
APP_ID = '10701834'
API_KEY = '<KEY>'
SECRET_KEY = '<KEY>'
client = AipOcr(APP_ID, API_KEY, SECRET_KEY)
# c = wda.Client('http://192.168.0.117:8100')
# s = c.session()
# print(s.window_size())
def printNowDatetime():
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
def getImage(url):
with open(url,'rb') as fp:
return fp.read()
def ocrImage(image):
# image = getImage('/Users/user/Desktop/testP.png')
""" 如果有可选参数 """
options = {}
options["language_type"] = "CHN_ENG"
options["detect_direction"] = "true"
options["detect_language"] = "true"
options["probability"] = "true"
""" 带参数调用通用文字识别, 图片参数为本地图片 """
response = client.basicGeneral(image, options)
print(response)
print(type(response))
words = response['words_result']
appendWord = ''
for item in words:
appendWord += item['words'] + ''
return appendWord
def cvCutImg(x,y,width,height,img):
return img[y:y+height, x:x+width]
def cvBytes_to_numpyNdarray(imgBytes):
img = np.asarray(bytearray(imgBytes), np.uint8)
img = cv2.imdecode(img, cv2.IMREAD_COLOR)
# cv2.imshow('mm', img)
# cv2.waitKey(0)
# img type is numpy.ndarray
# img = cv2.imread('/Users/user/Desktop/testP.png')
return img
def cvNumpyNdarray_to_bytes(img):
return np.ndarray.tobytes(img)
def chongdingdahui():
img = c.screenshot('screen01.png')
# img = getImage('chongdingdahui.jpg')
image = cvBytes_to_numpyNdarray(img)
cutImg = cvCutImg(25, 320, 700, 175, image)
cv2.imwrite('cut.png', cutImg)
image = getImage('cut.png')
ocrwd = ocrImage(image)
image = getImage('cut.png')
ocrwd = ocrImage(image)
wd = parse.quote(ocrwd)
url = 'https://www.baidu.com/s?wd=' + wd
webbrowser.open(url)
def xiguashiping():
# img = c.screenshot('screen01.png')
img = getImage('xiguaishipin.jpg')
image = cvBytes_to_numpyNdarray(img)
cutImg = cvCutImg(40, 220, 670, 175, image)
cv2.imwrite('cut.png', cutImg)
image = getImage('cut.png')
ocrwd = ocrImage(image)
image = getImage('cut.png')
ocrwd = ocrImage(image)
wd = parse.quote(ocrwd)
url = 'https://www.baidu.com/s?wd=' + wd
webbrowser.open(url)
if __name__ == "__main__":
print('--')
while True:
time.sleep(3)
printNowDatetime()
# chongdingdahui()
xiguashiping()
| StarcoderdataPython |
9753315 |
from django.conf.urls import url
from hood import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns=[
url('^$',views.index, name='index'),
url('^edit/',views.edit_profile, name='edit_profile'),
url(r'^user/(?P<username>\w+)', views.user_profile, name='user_profile'),
url(r'^join/(\d+)', views.join, name='join'),
url(r'^add/hood$', views.add, name='add'),
url(r'^leave/(\d+)', views.leave, name='leave'),
url('^post/',views.add_post, name='add_post'),
url('^add_business/',views.add_business, name='add_business'),
url('^search/',views.search_business, name='search_business'),
url('^listing/', views.business_listing, name='business_listing'),
url('^hoods/$', views.hood_listing, name='hood_listing'),
url('^hood/info$', views.hood_info, name='hood_info'),
url('^hood/thread$', views.hood_thread, name='hood_thread'),
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
| StarcoderdataPython |
9600688 | from django import forms
from accounts.models import UserProfile
class ProfileEditForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for (_, field) in self.fields.items():
field.widget.attrs['class'] = 'form-control'
username = forms.CharField(max_length=150)
class Meta:
model = UserProfile
exclude = ('user', )
| StarcoderdataPython |
4826204 | <reponame>awesome-archive/Automatic_Speech_Recognition<gh_stars>0
# -*- coding:utf-8 -*-
import os
import numpy as np
import scipy.io.wavfile as wav
from calcmfcc import calcMFCC_delta_delta
PHN_LOOKUP_TABLE = ['aa', 'ae', 'ah', 'ao', 'aw', 'ax', 'ax-h', 'axr', 'ay', 'b', 'bcl', 'ch', 'd', 'dcl', 'dh', 'dx',
'eh', 'el', 'em', 'en', 'eng', 'epi', 'er', 'ey', 'f', 'g', 'gcl', 'h#', 'hh', 'hv', 'ih', 'ix',
'iy', 'jh', 'k', 'kcl', 'l', 'm', 'n', 'ng', 'nx', 'ow', 'oy', 'p', 'pau', 'pcl', 'q', 'r', 's',
'sh', 't', 'tcl', 'th', 'uh', 'uw', 'ux', 'v', 'w', 'y', 'z', 'zh']
def wav_to_mfcc(wav_file_path):
rate, sig = wav.read(wav_file_path)
mfcc = calcMFCC_delta_delta(sig, rate, win_length=0.020, win_step=0.010)
mfcc = np.transpose(mfcc)
return mfcc
def create_label(phn_file_path):
phenome = []
with open(phn_file_path, 'r') as f:
for line in f.read().splitlines():
s = line.split(' ')[2]
p_index = PHN_LOOKUP_TABLE.index(s)
phenome.append(p_index)
return np.array(phenome)
def transform_raw_data(raw_data_dir, dest_mfcc_dir, dest_label_dir):
i = 0
for subdir, dirs, files in os.walk(raw_data_dir):
for file in files:
if file.endswith('.wav'):
wav_file_path = os.path.join(subdir, file)
speech_file_base = os.path.splitext(wav_file_path)[0]
phn_file_path = speech_file_base + '.phn'
mfcc_file_path = dest_mfcc_dir + '-'.join(speech_file_base.split('/')[-2:]) + '.npy'
label_file_path = dest_label_dir + '-'.join(speech_file_base.split('/')[-2:]) + '.npy'
print('[{}] processing: {}'.format(i, wav_file_path))
mfcc = wav_to_mfcc(wav_file_path)
np.save(mfcc_file_path, mfcc)
phenome = create_label(phn_file_path)
np.save(label_file_path, np.array(phenome))
i += 1
| StarcoderdataPython |
9781354 | <gh_stars>10-100
# coding=utf-8
import os
# 获取上级目录的绝对路径
last_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
# 获取lib
LIB_DIR = os.path.join(last_dir, u"lib")
LINUX_X64_DIR = os.path.join(LIB_DIR, u"linux_x64")
WINDOWS_DIR = os.path.join(LIB_DIR, u"windows")
WIN32_DIR = os.path.join(LIB_DIR, u"win32")
WIN64_DIR = os.path.join(LIB_DIR, u"win64")
def test():
print(LIB_DIR)
print(LINUX_X64_DIR)
print(WIN32_DIR)
if __name__ == '__main__':
test()
| StarcoderdataPython |
1938772 | import logging
import os
import subprocess
import sys
import traceback
from datetime import datetime
import oss2
import prettytable
import requests
from automonkey.config import DefaultConfig
from automonkey.exception import FileDownloadErrorException
logger = logging.getLogger(__name__)
"""
# 工具类
"""
class Utils(object):
@classmethod
def command_execute(cls, cmd):
try:
if not cmd:
return False
logger.info(cmd)
command_process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
executable="/bin/bash")
return command_process
except Exception as e:
logger.error(e)
traceback.print_exc()
@classmethod
def bug_report_tool(cls, log_path):
try:
if not os.path.exists(log_path):
return None
local_path = os.getcwd()
chk_path = os.path.abspath(os.path.join(local_path, "./tools/check_bug_report/chkbugreport"))
jar_path = os.path.abspath(os.path.join(local_path, "./tools/check_bug_report"))
if not os.path.exists(chk_path):
logger.error('tool path not found at {}'.format(chk_path))
return None
os.system('chmod +x {}'.format(chk_path))
cmd = '{} {} {}'.format(chk_path, log_path, jar_path)
p = cls.command_execute(cmd)
return p.stdout.readlines()
except Exception as e:
logger.error(e)
logger.error(traceback.format_exc())
@classmethod
def show_info_as_table(cls, keys, values):
# keys list; values list(dict) or dict
table = prettytable.PrettyTable()
if isinstance(values, list):
table.field_names = keys
for v in values:
if isinstance(v, list):
row = v
elif isinstance(v, dict):
row = v.values()
table.add_row(row)
elif isinstance(values, dict):
table.field_names = ['key', 'value']
for v in keys:
row = [v, values.get(v)]
table.add_row(row)
logger.info('\n{}'.format(table))
# 下载 apk 文件
@classmethod
def download_apk_from_url(cls, url, target_path, target_name):
try:
if not os.path.exists(target_path):
os.mkdir(target_path)
if target_name is None:
date_time_now = datetime.now().strftime('%Y%m%d-%H.%M.%S')
target_name = '{}.apk'.format(date_time_now)
elif not target_name.endswith('.apk'):
target_name = '{}.apk'.format(target_name)
download_apk_name = os.path.join(target_path, target_name)
logger.info('开始从 {} 下载到 {}'.format(url, download_apk_name))
response = requests.get(url=url, verify=False)
with open(download_apk_name, 'wb') as f:
f.write(response.content)
# 下载失败
if not os.path.exists(download_apk_name):
logger.error('{} 下载失败!'.format(url))
raise FileDownloadErrorException
logger.info('下载成功,保存地址 {}'.format(download_apk_name))
return download_apk_name
except Exception as e:
print(e)
traceback.print_exc()
raise e
@classmethod
def upload_bug_report_log_to_oss(cls, log_path):
endpoint = DefaultConfig.OSS_URL
auth = DefaultConfig.OSS_AUTH
bucket = oss2.Bucket(auth, endpoint, DefaultConfig.OSS_BUCKET_NAME)
cls.upload_dir(log_path, bucket)
@classmethod
def upload_file_to_oss(cls, local_path, bucket):
if not bucket:
endpoint = DefaultConfig.OSS_URL
auth = DefaultConfig.OSS_AUTH
bucket = oss2.Bucket(auth, endpoint, DefaultConfig.OSS_BUCKET_NAME)
now = datetime.now().strftime("%Y-%m-%d")
build_number = os.environ.get('BUILD_NUMBER')
remote_file_path = 'monkey/{}/{}/{}'.format(now, build_number, local_path)
bucket.put_object_from_file('{}'.format(remote_file_path), local_path)
@classmethod
def upload_dir(cls, dir_path, bucket):
if not bucket:
endpoint = DefaultConfig.OSS_URL
auth = DefaultConfig.OSS_AUTH
bucket = oss2.Bucket(auth, endpoint, DefaultConfig.OSS_BUCKET_NAME)
fs = os.listdir(dir_path)
dir_path_new = dir_path
for f in fs:
file = dir_path_new + "/" + f
if os.path.isdir(file):
cls.upload_dir(file, bucket)
else:
if 'DS_Store' not in file:
print(file)
cls.upload_file_to_oss(file, bucket)
@classmethod
def deal_with_python_version(cls, data):
if str(sys.version_info.major) == '3':
if isinstance(data, list):
return [d.decode('utf-8') for d in data]
else:
return data.decode('utf-8')
else:
return data
| StarcoderdataPython |
8032461 | <gh_stars>1-10
_available_directives = {}
def directive(fn):
_available_directives[fn.__name__] = fn
fn._directive = True
return fn
def get_directive(fn):
return _available_directives[fn]
def get_available_directives():
return _available_directives
@directive
async def body(request):
return await request.json()
| StarcoderdataPython |
1707893 | from django.shortcuts import render,redirect
from django.http import HttpResponse
# Create your views here.
from .tasks import *
import pymongo
import datetime
from bson.objectid import ObjectId
from .local1 import *
from .regular1 import *
from django.http import JsonResponse
class vars():
grid_var = 0
zel_var = 0
process_list=[]
#
# def __init__(self):
# self.zel_var=0
# self.grid_var=0
# self.process_list=[]
@staticmethod
def inc_g_var():
vars.grid_var += 1
@staticmethod
def dec_g_var():
vars.grid_var -= 1
@staticmethod
def get_g_var():
return vars.grid_var
@staticmethod
def inc_z_var():
vars.zel_var += 1
@staticmethod
def dec_z_var():
vars.zel_var -= 1
@staticmethod
def get_z_var():
return vars.zel_var
@staticmethod
def append_to_list(item):
vars.process_list.append(item)
@staticmethod
def delete_from_list(i):
return vars.process_list.pop(i)
def inc_g_var():
vars.grid_var += 1
def dec_g_var():
vars.grid_var -= 1
def get_g_var():
return vars.grid_var
def inc_z_var():
vars.zel_var += 1
def dec_z_var():
vars.zel_var -= 1
def get_z_var():
return vars.zel_var
def append_to_list(item):
vars.process_list.append(item)
def delete_from_list(i):
return vars.process_list.pop(i)
def homepage(request):
# delete_data()
# tests=retrive_data()
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
mydb = myclient["mvr_result_set"]
data = mydb.Completed_test_set
# data.delete_many({})
outputs = data.find()
l=[]
for i in outputs:
print(i,'hi')
ids=str(i['_id'])
ota=i['ota']
start_date=i['start_date']
end_date=i['end_date']
comments=i['comments']
checkin=i['cindate']
checkout=i['coutdate']
status=i['status']
d={'ids':ids,'ota':ota,'start_date':start_date,'comments':comments,'end_date':end_date,
'checkin':checkin,'checkout':checkout,'status':status}
l.append(d)
print(outputs)
l.reverse()
print(l)
param={'tests':l}
return render(request,'automation/index.html',param)
def automate(request):
cindate=request.GET.get('cindt')
coutdate=request.GET.get('coutdt')
agent=request.GET.get('agt')
# cindate=request.POST.get('checkin',None)
# coutdate=request.POST.get('checkout',None)
# agent=request.POST.get('agt',None)
current_time = datetime.datetime.now()
print('a')
start_date = current_time.strftime("%Y-%m-%d")
id = data_entry(agent, "", start_date, "", "Started", "Succesfully started", "",cindate,coutdate)
cur_sel_process=vars.get_g_var()
cur_zel_process=vars.get_z_var()
print(cur_zel_process,cur_sel_process)
if cur_sel_process>=2:
if cur_zel_process>=2:
vars.append_to_list(id)
else:
plugin='zelenium'
elif cur_sel_process<2:
plugin='selenium grid'
if cur_sel_process<2 or cur_zel_process<2:
if agent == 'booking':
# id = data_entry("Booking.com", "", start_date, "", "Started", "Succesfully started","")
retrive_data()
# return redirect("/automation/v1/booking/" + cindate + "/" + coutdate + "/" + id, code=307)
automation_for_booking.delay(cindate,coutdate,id,plugin)
elif agent == 'goibibo':
# id = data_entry("Goibibo.com", "", start_date, "", "Started", "Succesfully started","")
return redirect("/automation/v1/goibibo/" + cindate + "/" + coutdate + "/" + id, code=307)
elif agent == 'mmt':
# id = data_entry("Make my trip", "", start_date, "", "Started", "Succesfully started","")
return redirect("/automation/v1/mmt/" + cindate + "/" + coutdate + "/" + id, code=307)
tests = retrive_data()
param = {'tests': tests}
return render(request, 'automation/index.html', param)
def automate1(request):
# cindate=request.GET.get('cindt')
# coutdate=request.GET.get('coutdt')
# agent=request.GET.get('agt')
dt=request.POST.get('dt',None)
print(dt)
cindate,coutdate,agent=dt.split('@')
print(cindate,coutdate,agent)
# coutdate=request.POST.get('checkout',None)
# agent=request.POST.get('agt',None)
current_time = datetime.datetime.now()
print('a')
start_date = current_time.strftime("%Y-%m-%d")
id = data_entry(agent, "", start_date, "", "Started", "Succesfully started", "",cindate,coutdate)
cur_sel_process=vars.get_g_var()
cur_zel_process=vars.get_z_var()
print(cur_zel_process,cur_sel_process)
if cur_sel_process>=1:
if cur_zel_process>=2:
vars.append_to_list(id)
else:
plugin='zelenium'
elif cur_sel_process<1:
plugin='selenium grid'
if cur_sel_process<1 or cur_zel_process<2:
if agent == 'booking':
# id = data_entry("Booking.com", "", start_date, "", "Started", "Succesfully started","")
retrive_data()
# return redirect("/automation/v1/booking/" + cindate + "/" + coutdate + "/" + id, code=307)
automation_for_booking(cindate,coutdate,id,plugin)
elif agent == 'goibibo':
# id = data_entry("Goibibo.com", "", start_date, "", "Started", "Succesfully started","")
return redirect("/automation/v1/goibibo/" + cindate + "/" + coutdate + "/" + id, code=307)
elif agent == 'mmt':
# id = data_entry("Make my trip", "", start_date, "", "Started", "Succesfully started","")
return redirect("/automation/v1/mmt/" + cindate + "/" + coutdate + "/" + id, code=307)
data={'id':id}
return JsonResponse(data)
def data_entry(ota,response,start_date,end_date,status,comment,plugin,cindate,coutdate):
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
mydb=myclient["mvr_result_set"]
mycol=mydb["Completed_test_set"]
result={'start_date':start_date,
'end_date':end_date,
'ota':ota,
'response':response,
'status':status,
'plugin':plugin,
'comments':comment,
'cindate':cindate,
'coutdate':coutdate}
x = mycol.insert_one(result)
assign_id=x.inserted_id
return str(assign_id)
def update_entry(id,end_date,response,status,comment,plugin,cindate,coutdate):
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
mydb = myclient["mvr_result_set"]
mycol = mydb["Completed_test_set"]
new_result={"$set":{'end_date':end_date,'response':response,'status':status,'comments':comment,'plugin':plugin,'cindate':cindate,'coutdate':coutdate}}
mycol.update_one({'_id':ObjectId(id)},new_result)
# result = mycol.find({'_id': ObjectId(id)})
# result = eval(dumps(result))
# print(result[0])
def retrive_data():
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
mydb = myclient["mvr_result_set"]
data=mydb.Completed_test_set
# data.delete_many({})
outputs=data.find()
for item in outputs:
print(item)
return outputs
def delete_data():
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
mydb = myclient["mvr_result_set"]
data = mydb.Completed_test_set
data.delete_many({})
print('deleted all document successfully')
def booking_run(clint,checkin = "26/05/2020",checkout = "15/06/2020"):
agent = Booking()
search_text = "Ratnagiri"
hotel_name = "Mango Valley Resort Ganpatipule"
hotel_id = "4216443"
room_typeids = ["room_type_id_421644306", "room_type_id_421644302",
"room_type_id_421644305", "room_type_id_421644303"]
room_priceids = ["421644306_174652031_0_42_0",
"421644302_141698786_0_42_0", "421644302_174652031_0_42_0",
"421644305_174652031_0_42_0", "421644303_174652031_0_42_0"]
# hotel_name="The Blue View - sea view villa's"
# hotel_id="2808749"
# room_typeids=[
# # 'room_type_id_280874901']
# 'room_type_id_280874905']
# room_priceids=[
## '280874901_229832000_0_41_0']
# '280874905_229832000_0_41_0']
result=main_run(agent, hotel_id, search_text, checkin, checkout,hotel_name,clint,room_typeids=room_typeids, room_priceids=room_priceids)
print(result)
return result
def automation_for_booking(cindate,coutdate,id,plugin):
if plugin=='selenium grid':
vars.inc_g_var()
clint='http://192.168.56.1:4444/wd/hub'
elif plugin=='zelenium':
vars.inc_z_var()
clint='http://192.168.99.100:4444/wd/hub'
yr,month,date=cindate.split('-')
checkin = date+"/"+month+"/"+yr
yr, month, date = coutdate.split('-')
checkout = date + "/" + month + "/" + yr
current_time = datetime.datetime.now()
end_date = current_time.strftime("%Y-%m-%d")
try:
result=booking_run(clint,checkin,checkout)
update_entry(id, end_date,result, "Finished", "Succesfully completed",plugin,cindate,coutdate)
# print(result)
# return render_template('result.html',param=result)
except Exception as e:
print(e.__class__.__name__)
update_entry(id, end_date, "No result", "Error", e.__class__.__name__,plugin,cindate,coutdate)
# return f"Error occured of type {e.__class__.__name__}"
if plugin == 'selenium grid':
vars.dec_g_var()
elif plugin == 'zelenium':
vars.dec_z_var()
no_of_pending_process=len(vars.process_list)
for i in range(no_of_pending_process):
# if no_of_pending_process>0:
new_process=vars.process_list[i]
print(new_process)
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
mydb = myclient["mvr_result_set"]
data = mydb.Completed_test_set
output = data.find({'_id':ObjectId(new_process)})
print(output)
for m in output:
l=m
# k=0
if(l['ota']=='booking'):
prcs=vars.delete_from_list(i)
automation_for_booking(l['cindate'],l['coutdate'],new_process,plugin)
# k=1
def result(request):
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
mydb = myclient["mvr_result_set"]
data = mydb.Completed_test_set
id=request.GET.get('id')
process=data.find({'_id':ObjectId(id)})
# output = data.find({'_id': ObjectId(process)})
for i in process:
res=i['response']
res1=i
print(res)
return render(request,'automation/result.html',{'param':res,'res1':res1})
def heartbeat(request):
return render(request,'automation/heartbeat.html')
def selheart(request):
clint = 'http://192.168.56.1:4444/wd/hub'
try:
caps = DesiredCapabilities.CHROME.copy()
driver = webdriver.Remote(
command_executor=clint,
# desired_capabilities=DesiredCapabilities.CHROME)
desired_capabilities=caps)
driver.get('https://www.google.com/')
driver.quit()
return HttpResponse('working')
except Exception as e:
return HttpResponse('Not woking due to <b>'+str(e)+'</b> error')
def zelheart(request):
clint = 'http://192.168.99.100:4444/wd/hub'
try:
caps = DesiredCapabilities.CHROME.copy()
driver = webdriver.Remote(
command_executor=clint,
# desired_capabilities=DesiredCapabilities.CHROME)
desired_capabilities=caps)
driver.get('https://www.google.com/')
driver.quit()
return HttpResponse('working')
except Exception as e:
return HttpResponse('Not woking due to <b>' + str(e) + '</b> error')
| StarcoderdataPython |
5179867 | # Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset utilities."""
import functools
import pathlib
from typing import Dict, Tuple
from absl import logging
from graph_nets import graphs as tf_graphs
from graph_nets import utils_tf
import numpy as np
import scipy.sparse as sp
import tensorflow as tf
import tqdm
# pylint: disable=g-bad-import-order
import sub_sampler
Path = pathlib.Path
NUM_PAPERS = 121751666
NUM_AUTHORS = 122383112
NUM_INSTITUTIONS = 25721
EMBEDDING_SIZE = 768
NUM_CLASSES = 153
NUM_NODES = NUM_PAPERS + NUM_AUTHORS + NUM_INSTITUTIONS
NUM_EDGES = 1_728_364_232
assert NUM_NODES == 244_160_499
NUM_K_FOLD_SPLITS = 10
OFFSETS = {
"paper": 0,
"author": NUM_PAPERS,
"institution": NUM_PAPERS + NUM_AUTHORS,
}
SIZES = {
"paper": NUM_PAPERS,
"author": NUM_AUTHORS,
"institution": NUM_INSTITUTIONS
}
RAW_DIR = Path("raw")
PREPROCESSED_DIR = Path("preprocessed")
RAW_NODE_FEATURES_FILENAME = RAW_DIR / "node_feat.npy"
RAW_NODE_LABELS_FILENAME = RAW_DIR / "node_label.npy"
RAW_NODE_YEAR_FILENAME = RAW_DIR / "node_year.npy"
TRAIN_INDEX_FILENAME = RAW_DIR / "train_idx.npy"
VALID_INDEX_FILENAME = RAW_DIR / "train_idx.npy"
TEST_INDEX_FILENAME = RAW_DIR / "train_idx.npy"
EDGES_PAPER_PAPER_B = PREPROCESSED_DIR / "paper_paper_b.npz"
EDGES_PAPER_PAPER_B_T = PREPROCESSED_DIR / "paper_paper_b_t.npz"
EDGES_AUTHOR_INSTITUTION = PREPROCESSED_DIR / "author_institution.npz"
EDGES_INSTITUTION_AUTHOR = PREPROCESSED_DIR / "institution_author.npz"
EDGES_AUTHOR_PAPER = PREPROCESSED_DIR / "author_paper.npz"
EDGES_PAPER_AUTHOR = PREPROCESSED_DIR / "paper_author.npz"
PCA_PAPER_FEATURES_FILENAME = PREPROCESSED_DIR / "paper_feat_pca_129.npy"
PCA_AUTHOR_FEATURES_FILENAME = (
PREPROCESSED_DIR / "author_feat_from_paper_feat_pca_129.npy")
PCA_INSTITUTION_FEATURES_FILENAME = (
PREPROCESSED_DIR / "institution_feat_from_paper_feat_pca_129.npy")
PCA_MERGED_FEATURES_FILENAME = (
PREPROCESSED_DIR / "merged_feat_from_paper_feat_pca_129.npy")
NEIGHBOR_INDICES_FILENAME = PREPROCESSED_DIR / "neighbor_indices.npy"
NEIGHBOR_DISTANCES_FILENAME = PREPROCESSED_DIR / "neighbor_distances.npy"
FUSED_NODE_LABELS_FILENAME = PREPROCESSED_DIR / "fused_node_labels.npy"
FUSED_PAPER_EDGES_FILENAME = PREPROCESSED_DIR / "fused_paper_edges.npz"
FUSED_PAPER_EDGES_T_FILENAME = PREPROCESSED_DIR / "fused_paper_edges_t.npz"
K_FOLD_SPLITS_DIR = Path("k_fold_splits")
def get_raw_directory(data_root):
return Path(data_root) / "raw"
def get_preprocessed_directory(data_root):
return Path(data_root) / "preprocessed"
def _log_path_decorator(fn):
def _decorated_fn(path, **kwargs):
logging.info("Loading %s", path)
output = fn(path, **kwargs)
logging.info("Finish loading %s", path)
return output
return _decorated_fn
@_log_path_decorator
def load_csr(path, debug=False):
if debug:
# Dummy matrix for debugging.
return sp.csr_matrix(np.zeros([10, 10]))
return sp.load_npz(str(path))
@_log_path_decorator
def load_npy(path):
return np.load(str(path))
@functools.lru_cache()
def get_arrays(data_root="/data/",
use_fused_node_labels=True,
use_fused_node_adjacencies=True,
return_pca_embeddings=True,
k_fold_split_id=None,
return_adjacencies=True,
use_dummy_adjacencies=False):
"""Returns all arrays needed for training."""
logging.info("Starting to get files")
data_root = Path(data_root)
array_dict = {}
array_dict["paper_year"] = load_npy(data_root / RAW_NODE_YEAR_FILENAME)
if k_fold_split_id is None:
train_indices = load_npy(data_root / TRAIN_INDEX_FILENAME)
valid_indices = load_npy(data_root / VALID_INDEX_FILENAME)
else:
train_indices, valid_indices = get_train_and_valid_idx_for_split(
k_fold_split_id, num_splits=NUM_K_FOLD_SPLITS,
root_path=data_root / K_FOLD_SPLITS_DIR)
array_dict["train_indices"] = train_indices
array_dict["valid_indices"] = valid_indices
array_dict["test_indices"] = load_npy(data_root / TEST_INDEX_FILENAME)
if use_fused_node_labels:
array_dict["paper_label"] = load_npy(data_root / FUSED_NODE_LABELS_FILENAME)
else:
array_dict["paper_label"] = load_npy(data_root / RAW_NODE_LABELS_FILENAME)
if return_adjacencies:
logging.info("Starting to get adjacencies.")
if use_fused_node_adjacencies:
paper_paper_index = load_csr(
data_root / FUSED_PAPER_EDGES_FILENAME, debug=use_dummy_adjacencies)
paper_paper_index_t = load_csr(
data_root / FUSED_PAPER_EDGES_T_FILENAME, debug=use_dummy_adjacencies)
else:
paper_paper_index = load_csr(
data_root / EDGES_PAPER_PAPER_B, debug=use_dummy_adjacencies)
paper_paper_index_t = load_csr(
data_root / EDGES_PAPER_PAPER_B_T, debug=use_dummy_adjacencies)
array_dict.update(
dict(
author_institution_index=load_csr(
data_root / EDGES_AUTHOR_INSTITUTION,
debug=use_dummy_adjacencies),
institution_author_index=load_csr(
data_root / EDGES_INSTITUTION_AUTHOR,
debug=use_dummy_adjacencies),
author_paper_index=load_csr(
data_root / EDGES_AUTHOR_PAPER, debug=use_dummy_adjacencies),
paper_author_index=load_csr(
data_root / EDGES_PAPER_AUTHOR, debug=use_dummy_adjacencies),
paper_paper_index=paper_paper_index,
paper_paper_index_t=paper_paper_index_t,
))
if return_pca_embeddings:
array_dict["bert_pca_129"] = np.load(
data_root / PCA_MERGED_FEATURES_FILENAME, mmap_mode="r")
assert array_dict["bert_pca_129"].shape == (NUM_NODES, 129)
logging.info("Finish getting files")
# pytype: disable=attribute-error
assert array_dict["paper_year"].shape[0] == NUM_PAPERS
assert array_dict["paper_label"].shape[0] == NUM_PAPERS
if return_adjacencies and not use_dummy_adjacencies:
array_dict = _fix_adjacency_shapes(array_dict)
assert array_dict["paper_author_index"].shape == (NUM_PAPERS, NUM_AUTHORS)
assert array_dict["author_paper_index"].shape == (NUM_AUTHORS, NUM_PAPERS)
assert array_dict["paper_paper_index"].shape == (NUM_PAPERS, NUM_PAPERS)
assert array_dict["paper_paper_index_t"].shape == (NUM_PAPERS, NUM_PAPERS)
assert array_dict["institution_author_index"].shape == (
NUM_INSTITUTIONS, NUM_AUTHORS)
assert array_dict["author_institution_index"].shape == (
NUM_AUTHORS, NUM_INSTITUTIONS)
# pytype: enable=attribute-error
return array_dict
def add_nodes_year(graph, paper_year):
nodes = graph.nodes.copy()
indices = nodes["index"]
year = paper_year[np.minimum(indices, paper_year.shape[0] - 1)].copy()
year[nodes["type"] != 0] = 1900
nodes["year"] = year
return graph._replace(nodes=nodes)
def add_nodes_label(graph, paper_label):
nodes = graph.nodes.copy()
indices = nodes["index"]
label = paper_label[np.minimum(indices, paper_label.shape[0] - 1)]
label[nodes["type"] != 0] = 0
nodes["label"] = label
return graph._replace(nodes=nodes)
def add_nodes_embedding_from_array(graph, array):
"""Adds embeddings from the sstable_service for the indices."""
nodes = graph.nodes.copy()
indices = nodes["index"]
embedding_indices = indices.copy()
embedding_indices[nodes["type"] == 1] += NUM_PAPERS
embedding_indices[nodes["type"] == 2] += NUM_PAPERS + NUM_AUTHORS
# Gather the embeddings for the indices.
nodes["features"] = array[embedding_indices]
return graph._replace(nodes=nodes)
def get_graph_subsampling_dataset(
prefix, arrays, shuffle_indices, ratio_unlabeled_data_to_labeled_data,
max_nodes, max_edges,
**subsampler_kwargs):
"""Returns tf_dataset for online sampling."""
def generator():
labeled_indices = arrays[f"{prefix}_indices"]
if ratio_unlabeled_data_to_labeled_data > 0:
num_unlabeled_data_to_add = int(ratio_unlabeled_data_to_labeled_data *
labeled_indices.shape[0])
unlabeled_indices = np.random.choice(
NUM_PAPERS, size=num_unlabeled_data_to_add, replace=False)
root_node_indices = np.concatenate([labeled_indices, unlabeled_indices])
else:
root_node_indices = labeled_indices
if shuffle_indices:
root_node_indices = root_node_indices.copy()
np.random.shuffle(root_node_indices)
for index in root_node_indices:
graph = sub_sampler.subsample_graph(
index,
arrays["author_institution_index"],
arrays["institution_author_index"],
arrays["author_paper_index"],
arrays["paper_author_index"],
arrays["paper_paper_index"],
arrays["paper_paper_index_t"],
paper_years=arrays["paper_year"],
max_nodes=max_nodes,
max_edges=max_edges,
**subsampler_kwargs)
graph = add_nodes_label(graph, arrays["paper_label"])
graph = add_nodes_year(graph, arrays["paper_year"])
graph = tf_graphs.GraphsTuple(*graph)
yield graph
sample_graph = next(generator())
return tf.data.Dataset.from_generator(
generator,
output_signature=utils_tf.specs_from_graphs_tuple(sample_graph))
def paper_features_to_author_features(
author_paper_index, paper_features):
"""Averages paper features to authors."""
assert paper_features.shape[0] == NUM_PAPERS
assert author_paper_index.shape[0] == NUM_AUTHORS
author_features = np.zeros(
[NUM_AUTHORS, paper_features.shape[1]], dtype=paper_features.dtype)
for author_i in range(NUM_AUTHORS):
paper_indices = author_paper_index[author_i].indices
author_features[author_i] = paper_features[paper_indices].mean(
axis=0, dtype=np.float32)
if author_i % 10000 == 0:
logging.info("%d/%d", author_i, NUM_AUTHORS)
return author_features
def author_features_to_institution_features(
institution_author_index, author_features):
"""Averages author features to institutions."""
assert author_features.shape[0] == NUM_AUTHORS
assert institution_author_index.shape[0] == NUM_INSTITUTIONS
institution_features = np.zeros(
[NUM_INSTITUTIONS, author_features.shape[1]], dtype=author_features.dtype)
for institution_i in range(NUM_INSTITUTIONS):
author_indices = institution_author_index[institution_i].indices
institution_features[institution_i] = author_features[
author_indices].mean(axis=0, dtype=np.float32)
if institution_i % 10000 == 0:
logging.info("%d/%d", institution_i, NUM_INSTITUTIONS)
return institution_features
def generate_fused_paper_adjacency_matrix(neighbor_indices, neighbor_distances,
paper_paper_csr):
"""Generates fused adjacency matrix for identical nodes."""
# First construct set of identical node indices.
# NOTE: Since we take only top K=26 identical pairs for each node, this is not
# actually exhaustive. Also, if A and B are equal, and B and C are equal,
# this method would not necessarily detect A and C being equal.
# However, this should capture almost all cases.
logging.info("Generating fused paper adjacency matrix")
eps = 0.0
mask = ((neighbor_indices != np.mgrid[:neighbor_indices.shape[0], :1]) &
(neighbor_distances <= eps))
identical_pairs = list(map(tuple, np.nonzero(mask)))
del mask
# Have a csc version for fast column access.
paper_paper_csc = paper_paper_csr.tocsc()
# Construct new matrix as coo, starting off with original rows/cols.
paper_paper_coo = paper_paper_csr.tocoo()
new_rows = [paper_paper_coo.row]
new_cols = [paper_paper_coo.col]
for pair in tqdm.tqdm(identical_pairs):
# STEP ONE: First merge papers being cited by the pair.
# Add edges from second paper, to all papers cited by first paper.
cited_by_first = paper_paper_csr.getrow(pair[0]).nonzero()[1]
if cited_by_first.shape[0] > 0:
new_rows.append(pair[1] * np.ones_like(cited_by_first))
new_cols.append(cited_by_first)
# Add edges from first paper, to all papers cited by second paper.
cited_by_second = paper_paper_csr.getrow(pair[1]).nonzero()[1]
if cited_by_second.shape[0] > 0:
new_rows.append(pair[0] * np.ones_like(cited_by_second))
new_cols.append(cited_by_second)
# STEP TWO: Then merge papers that cite the pair.
# Add edges to second paper, from all papers citing the first paper.
citing_first = paper_paper_csc.getcol(pair[0]).nonzero()[0]
if citing_first.shape[0] > 0:
new_rows.append(citing_first)
new_cols.append(pair[1] * np.ones_like(citing_first))
# Add edges to first paper, from all papers citing the second paper.
citing_second = paper_paper_csc.getcol(pair[1]).nonzero()[0]
if citing_second.shape[0] > 0:
new_rows.append(citing_second)
new_cols.append(pair[0] * np.ones_like(citing_second))
logging.info("Done with adjacency loop")
paper_paper_coo_shape = paper_paper_coo.shape
del paper_paper_csr
del paper_paper_csc
del paper_paper_coo
# All done; now concatenate everything together and form new matrix.
new_rows = np.concatenate(new_rows)
new_cols = np.concatenate(new_cols)
return sp.coo_matrix(
(np.ones_like(new_rows, dtype=np.bool), (new_rows, new_cols)),
shape=paper_paper_coo_shape).tocsr()
def generate_k_fold_splits(
train_idx, valid_idx, output_path, num_splits=NUM_K_FOLD_SPLITS):
"""Generates splits adding fractions of the validation split to training."""
output_path = Path(output_path)
np.random.seed(42)
valid_idx = np.random.permutation(valid_idx)
# Split into `num_parts` (almost) identically sized arrays.
valid_idx_parts = np.array_split(valid_idx, num_splits)
for i in range(num_splits):
# Add all but the i'th subpart to training set.
new_train_idx = np.concatenate(
[train_idx, *valid_idx_parts[:i], *valid_idx_parts[i+1:]])
# i'th subpart is validation set.
new_valid_idx = valid_idx_parts[i]
train_path = output_path / f"train_idx_{i}_{num_splits}.npy"
valid_path = output_path / f"valid_idx_{i}_{num_splits}.npy"
np.save(train_path, new_train_idx)
np.save(valid_path, new_valid_idx)
logging.info("Saved: %s", train_path)
logging.info("Saved: %s", valid_path)
def get_train_and_valid_idx_for_split(
split_id: int,
num_splits: int,
root_path: str,
) -> Tuple[np.ndarray, np.ndarray]:
"""Returns train and valid indices for given split."""
new_train_idx = load_npy(f"{root_path}/train_idx_{split_id}_{num_splits}.npy")
new_valid_idx = load_npy(f"{root_path}/valid_idx_{split_id}_{num_splits}.npy")
return new_train_idx, new_valid_idx
def generate_fused_node_labels(neighbor_indices, neighbor_distances,
node_labels, train_indices, valid_indices,
test_indices):
"""Generates fused adjacency matrix for identical nodes."""
logging.info("Generating fused node labels")
valid_indices = set(valid_indices.tolist())
test_indices = set(test_indices.tolist())
valid_or_test_indices = valid_indices | test_indices
train_indices = train_indices[train_indices < neighbor_indices.shape[0]]
# Go through list of all pairs where one node is in training set, and
for i in tqdm.tqdm(train_indices):
for j in range(neighbor_indices.shape[1]):
other_index = neighbor_indices[i][j]
# if the other is not a validation or test node,
if other_index in valid_or_test_indices:
continue
# and they are identical,
if neighbor_distances[i][j] == 0:
# assign the label of the training node to the other node
node_labels[other_index] = node_labels[i]
return node_labels
def _pad_to_shape(
sparse_csr_matrix: sp.csr_matrix,
output_shape: Tuple[int, int]) -> sp.csr_matrix:
"""Pads a csr sparse matrix to the given shape."""
# We should not try to expand anything smaller.
assert np.all(sparse_csr_matrix.shape <= output_shape)
# Maybe it already has the right shape.
if sparse_csr_matrix.shape == output_shape:
return sparse_csr_matrix
# Append as many indptr elements as we need to match the leading size,
# This is achieved by just padding with copies of the last indptr element.
required_padding = output_shape[0] - sparse_csr_matrix.shape[0]
updated_indptr = np.concatenate(
[sparse_csr_matrix.indptr] +
[sparse_csr_matrix.indptr[-1:]] * required_padding,
axis=0)
# The change in trailing size does not have structural implications, it just
# determines the highest possible value for the indices, so it is sufficient
# to just pass the new output shape, with the correct trailing size.
return sp.csr.csr_matrix(
(sparse_csr_matrix.data,
sparse_csr_matrix.indices,
updated_indptr),
shape=output_shape)
def _fix_adjacency_shapes(
arrays: Dict[str, sp.csr.csr_matrix],
) -> Dict[str, sp.csr.csr_matrix]:
"""Fixes the shapes of the adjacency matrices."""
arrays = arrays.copy()
for key in ["author_institution_index",
"author_paper_index",
"paper_paper_index",
"institution_author_index",
"paper_author_index",
"paper_paper_index_t"]:
type_sender = key.split("_")[0]
type_receiver = key.split("_")[1]
arrays[key] = _pad_to_shape(
arrays[key], output_shape=(SIZES[type_sender], SIZES[type_receiver]))
return arrays
| StarcoderdataPython |
6439096 | #!/usr/bin/env python3
import logging
import socket
import os
from time import sleep
import seqlog
server_url = os.getenv("SEQ_SERVER_URL", "http://localhost:5341/")
api_key = os.getenv("SEQ_API_KEY", "")
print("Logging to Seq server '{}' (API key = '{}').".format(server_url, api_key))
log_handler = seqlog.log_to_seq(
server_url,
api_key,
level=logging.INFO,
auto_flush_timeout=0.2,
additional_handlers=[logging.StreamHandler()],
override_root_logger=True
)
print("Running...")
logging.info("Hi, {name}. {greeting}", name="Root logger", greeting="Nice to meet you")
logger1 = logging.getLogger("A")
logger1.info("Hi, {name}! {greeting}", name="world", greeting=b"Nice to meet you")
logger2 = logging.getLogger("A.B")
logger2.info("Bye, {name}! {greeting}", name=b"moon", greeting="Nice to meet you")
logger3 = logging.getLogger("C")
logger3.info("By, %s!", "moon")
try:
raise Exception("Hello world!")
except:
logger1.error("Encountered an error!", exc_info=1)
print("Sleeping...")
sleep(0.5)
print("Done.")
| StarcoderdataPython |
11245183 | <gh_stars>1-10
""" pyglotaran-extras io package """
from pyglotaran_extras.io.load_data import load_data
from pyglotaran_extras.io.setup_case_study import setup_case_study
__all__ = ["setup_case_study", "load_data"]
| StarcoderdataPython |
221489 | # !/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import time
import random
import hmac
import hashlib
import binascii
import base64
import json
import logging
import re
import requests
# (*)腾讯优图配置
app_id = os.environ.get('app_id')
secret_id = os.environ.get('secret_id')
secret_key = os.environ.get('secret_key')
# Server酱V3配置
sckey = os.environ.get('sckey')
logger = logging.getLogger()
class Youtu(object):
def __init__(self, app_id, secret_id, secret_key, qq=10000):
self.app_id = app_id
self.secret_id = secret_id
self.secret_key = secret_key
self.qq = qq
def cal_sig(self):
timestamp = int(time.time())
expired = str(timestamp + 2592000)
rdm = str(random.randint(0, 999999999))
plain_text = 'a={appid}&k={secret_id}&e={expired}&t={timestamp}&r={rdm}&u={qq}&f='
plain_text = plain_text.format(appid=self.app_id,
secret_id=self.secret_id,
timestamp=timestamp,
rdm=rdm, qq=self.qq,
expired=expired)
bin = hmac.new(self.secret_key.encode(), plain_text.encode(), hashlib.sha1).hexdigest()
s = binascii.unhexlify(bin)
s = s + plain_text.encode('ascii')
signature = base64.b64encode(s).rstrip().decode()
return signature
def get_text(self, image_raw):
signature = self.cal_sig()
headers = {'Host': 'api.youtu.qq.com', 'Content-Type': 'text/json', 'Authorization': signature}
data = {'app_id': self.app_id, 'image': ''}
data['image'] = base64.b64encode(image_raw).rstrip().decode('utf-8')
resp = requests.post('https://api.youtu.qq.com/youtu/ocrapi/generalocr',
data=json.dumps(data),
headers=headers)
if 'items' in resp.text:
return resp.content.decode('utf-8')
else:
return '0'
class ScoreQuery:
def __init__(self, xm, id, ksbh):
self.xm = xm
self.id = id
self.ksbh = ksbh
self.cookies = requests.cookies.RequestsCookieJar()
self.headers = {
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding':'gzip, deflate, br',
'Accept-Language':'zh-CN,zh;q=0.9,en;q=0.8',
'Cache-Control':'max-age=0',
'Content-Type':'application/x-www-form-urlencoded',
'DNT':'1',
'Host':'yz.chsi.com.cn',
'Origin':'https://yz.chsi.com.cn',
'Referer':'https://yz.chsi.com.cn/apply/cjcx/',
'Upgrade-Insecure-Requests':'1',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.37 (KHTML, like Gecko) Chrome/70.0.3537.110 Safari/537.37'
}
def get_cookies(self):
base_url = 'https://yz.chsi.com.cn/apply/cjcx'
session = requests.session()
base_resp = session.get(base_url, headers=self.headers)
self.cookies = base_resp.cookies
def get_checkcode(self):
pic_url = 'https://yz.chsi.com.cn/apply/cjcx/image.do'
resp = requests.get(pic_url, headers=self.headers, cookies=self.cookies).content
ocr = Youtu(app_id, secret_id, secret_key)
try:
resp = ocr.get_text(resp)
resp = eval(resp)
return resp['items'][0]['itemstring']
except:
return '0'
def get_score_page(self):
self.get_cookies()
checkcode = self.get_checkcode().replace(' ','')
post_url = 'https://yz.chsi.com.cn/apply/cjcx/cjcx.do'
data = {
'xm': self.xm,
'zjhm':self.id,
'ksbh':self.ksbh,
'bkdwdm':None,
'checkcode':checkcode
}
post_resp = requests.post(post_url,data=data, headers=self.headers).text
return post_resp
@staticmethod
def get_mid_text(w1, w2, text):
pat = re.compile(w1+'(.*?)'+w2,re.S)
result_dict = pat.findall(text)
return result_dict
@staticmethod
def notice(key, title, desp):
url = 'https://sc.ftqq.com/{}.send'.format(key)
payload = {'text':title,'desp':desp}
r = requests.get(url,params=payload)
return r.text
def main_handler(event, context):
data = {
"isBase64Encoded": False,
"statusCode": 200,
"headers": {"Content-Type":"application/json"},
"body": ""}
try:
rid = context["request_id"]
xm = event['queryString']['xm']
id = event['queryString']['id']
kh = event['queryString']['kh']
query = ScoreQuery(xm,id,kh)
page = query.get_score_page()
if '无查询结果' in page:
logging.info('成绩还没出')
data['body'] = json.dumps({"Code":101,"Msg":"Score not released yet","Request_id":rid})
return data
elif '总分' in page:
score_content = query.get_mid_text('<tbody>','</tbody>',page)[0]
logging.info('成绩查询成功')
data['headers']['Content-Type'] = 'text/html'
data['body'] = score_content
#query.notice(sckey,'成绩出了',page)
#写这段代码的时候,成绩还未公布,并不知道页面结构,所以直接返回表格内容
return data
else:
data['body'] = json.dumps({"Code":103,"Msg":"Unexpected page contents","Request_id":rid})
return data
except:
data['body'] = json.dumps({"Code":102,"Msg":"Unexpected url parameters","Request_id":rid})
return data
| StarcoderdataPython |
6517024 | '''
Crie um programa que leia o nome de uma pessoa e diga se ela tem "Silva" no nome.
'''
nome = str(input('Digite um nome de pessoa: ')).title().split()
print(f'\nTem "Silva" no nome? {"Silva" in nome}')
#Essa forma evita que 'Silvana' seja aceita | StarcoderdataPython |
3239331 | <gh_stars>0
"""
LKB (c) 2016-17
Processing Nivel220 sensor (RS485 interface) output, as collected by Portmon for Windows port sniffer.
Output:
Time, X,Y,T
written for python 2.7
"""
#import pandas as pd
#import numpy as np
#import scipy
import glob, os #for file handling
import re #advanced text
import pdb #debugger
#################################################################
############## SUPPORT FUNCTION DEFINITIONS
#################################################################
# parser Nivel220 sensor (RS485 interface) output from Portmon for Windows
def parserNivel(dataFileName):
# get output file
##################
outFile = open("%s.out" % dataFileName[:dataFileName.index('.')], "w")
iErrorCounter = 0
iLineCounter = 0
print "\tProcessing {}...".format(dataFileName)
# extract readings
##################
for line in open(dataFileName).readlines():
if line.find('C1N1 X')>0:
line = re.sub(": ", ":", line) #remove gaps
#Val = line.split(" ") #only one argument allowed
Val = re.split('[\t ]',line)
Val = list(filter(None, Val)) #remove empty elements
iLineCounter=iLineCounter+1
try:
time = Val[1]
#to get X= Y= T= we need to id it first
#base_index=Val.index('.G..C1N1') #to find index of item
base_index=[idx for idx, str in enumerate(Val) if 'C1N1' in str][0] #find part of string
#I can't just list it from the back as sometimes parser breaks last field
Val = Val[base_index+1:base_index+4] #get X= Y= T=
Val[-1] = Val[-1].strip()[:-1] #remove noise at the end
Val = re.split(' |:', ' '.join(Val))[1::2] #X Y T
Val.append(time) #X Y T time
#pdb.set_trace()
outString = "{d[3]},{d[0]},{d[1]},{d[2]}\n".format(d=Val)
#time X Y T
#
outString = re.sub("\+", "", outString) #remove +
outFile.write(outString)
except: #ValueError:
print "Reading error at %s: %s" % (line,ValueError)
#pdb.set_trace()
iErrorCounter=iErrorCounter+1
# finalise outputs
##################
print "\n\nReading errors:%i \n Lines Read:%i \n" %(iErrorCounter,iLineCounter)
outFile.close()
#################################################################
############## MAIN BODY STARTS HERE
#################################################################
if __name__ == "__main__":
os.chdir("./") #get current dir
print("Found the following Nivel sensor log files:\n")
for file in glob.glob("*.log"):
parserNivel(file)
print "\tAll is done now." | StarcoderdataPython |
1955096 | <reponame>drzymala-pro/histograph<gh_stars>0
# -*- coding: utf-8 -*-
from histograph.histograph import Histograph
| StarcoderdataPython |
8143237 | import marshmallow as ma
from flask.globals import current_app
from marshmallow.exceptions import ValidationError
from marshmallow.utils import missing
from sqlalchemy.exc import IntegrityError
from werkzeug.exceptions import UnprocessableEntity
from slurk.extensions.api import abort
def register_blueprints(api):
from . import layouts, logs, permissions, tokens, users, tasks, rooms, openvidu
MODULES = (
layouts,
openvidu,
rooms,
permissions,
tokens,
users,
tasks,
logs,
)
for module in MODULES:
if module is not None:
name = module.__name__.split(".")[-1]
api.register_blueprint(module.blp, url_prefix=f"/slurk/api/{name}")
class Id(ma.fields.Integer):
def __init__(self, table, **kwargs):
self._table = table
super().__init__(strict=False, **kwargs)
def _validated(self, value):
from flask.globals import current_app
id = super()._validated(value)
if current_app.session.query(self._table).get(id) is None:
raise ValidationError(f"{self._table.__tablename__} `{id}` does not exist")
return id
class BaseSchema(ma.Schema):
known_schemas = {}
class Meta:
unknown = ma.RAISE
ordered = True
def _create_schema(self, name, fields, inner=None):
name = f'{self.__class__.__name__.split("Schema")[0]}{name}Schema'
if name in BaseSchema.known_schemas:
return BaseSchema.known_schemas[name]
if BaseSchema.Meta == getattr(self, "Meta"):
fields["Meta"] = type(
"GeneratedMeta", (BaseSchema.Meta,), {"register": False}
)
else:
fields["Meta"] = type(
"GeneratedMeta",
(BaseSchema.Meta, getattr(self, "Meta")),
{"register": False},
)
BaseSchema.known_schemas[name] = type(name, (BaseSchema,), fields)
return BaseSchema.known_schemas[name]
@classmethod
@property
def Creation(cls):
"""Returns the class only with load fields"""
def create_schema(schema):
fields = schema.load_fields
for field in fields.values():
if isinstance(field, ma.fields.Nested) and issubclass(
field.nested, BaseSchema
):
field.nested = create_schema(field.nested())
return schema._create_schema("Creation", fields)
return create_schema(cls())
@classmethod
@property
def Response(cls):
"""Returns the class only with dump fields
For all fields the required property is set to False and the missing property is reset"""
def create_schema(schema):
fields = schema.dump_fields
for field in fields.values():
field.required = False
field.missing = missing
if isinstance(field, ma.fields.Nested) and issubclass(
field.nested, BaseSchema
):
field.nested = create_schema(field.nested())
return schema._create_schema("Response", fields)
return create_schema(cls())
@classmethod
@property
def Filter(cls):
"""Returns the class only with load fields, which are either Integer, String, or Boolean
For all fields the required property is set to False, None is allowed, the missing property is reset,
and the metadatafield "filter_description" is used as description"""
def create_schema(schema):
fields = {
k: v
for k, v in schema.load_fields.items()
if isinstance(
v, (ma.fields.Integer, ma.fields.String, ma.fields.Boolean)
)
}
for field in fields.values():
field.allow_none = True
field.required = False
field.missing = missing
if isinstance(field, ma.fields.Nested) and issubclass(
field.nested, BaseSchema
):
field.nested = create_schema(field.nested())
if "filter_description" in field.metadata:
field.metadata = {
"description": field.metadata["filter_description"]
}
return schema._create_schema("Filter", fields)
return create_schema(cls())
@classmethod
@property
def Update(cls):
"""Returns the class only with load fields
For all fields the required property is set to False, None is allowed, and the missing property is reset"""
def create_schema(schema):
fields = schema.load_fields
for field in fields.values():
field.required = False
field.missing = missing
if isinstance(field, ma.fields.Nested) and issubclass(
field.nested, BaseSchema
):
field.nested = create_schema(field.nested())
return schema._create_schema("Update", fields)
return create_schema(cls())
class CommonSchema(BaseSchema):
"""Common fields and operations for database access"""
id = ma.fields.Integer(
dump_only=True, description="Unique ID that identifies this entity"
)
date_created = ma.fields.DateTime(
dump_only=True, description="Server time at which this entity was created"
)
date_modified = ma.fields.DateTime(
dump_only=True,
allow_none=True,
description="Server time when this entity was last modified",
)
def list(self, args):
return (
current_app.session.query(self.Meta.model)
.filter_by(**args)
.order_by(self.Meta.model.date_created.desc())
.all()
)
def post(self, item):
if isinstance(item, self.Meta.model):
entity = item
else:
entity = self.Meta.model(**item)
db = current_app.session
db.add(entity)
db.commit()
return entity
def put(self, old, new):
if isinstance(new, self.Meta.model):
entity = new
else:
entity = self.Meta.model(**new)
for field in self.load_fields.keys():
setattr(old, field, getattr(entity, field, None))
current_app.session.commit()
return old
def patch(self, old, new):
for field in self.load_fields.keys():
if field in new:
setattr(old, field, new[field])
current_app.session.commit()
return old
def delete(self, entity):
db = current_app.session
db.delete(entity)
try:
db.commit()
except IntegrityError:
db.rollback()
abort(
UnprocessableEntity,
query=f"{self.Meta.model.__tablename__} `{entity.id}` is still in use",
)
| StarcoderdataPython |
3541061 | <gh_stars>1-10
from twisted.internet import reactor, defer, endpoints
from twisted.internet.endpoints import TCP4ClientEndpoint, connectProtocol
from twisted.protocols.amp import AMP
from ampserver import Sum, Divide
def doMath():
destination = TCP4ClientEndpoint(reactor, '1172.16.58.3', 1234)
sumDeferred = connectProtocol(destination, AMP())
def connected(ampProto):
return ampProto.callRemote(Sum, a=13, b=81)
sumDeferred.addCallback(connected)
def summed(result):
return result['total']
sumDeferred.addCallback(summed)
divideDeferred = connectProtocol(destination, AMP())
def connected(ampProto):
return ampProto.callRemote(Divide, numerator=1234, denominator=0)
divideDeferred.addCallback(connected)
def trapZero(result):
result.trap(ZeroDivisionError)
print "Divided by zero: returning INF"
return 1e1000
divideDeferred.addErrback(trapZero)
def done(result):
print 'Done with math:', result
reactor.stop()
defer.DeferredList([sumDeferred, divideDeferred]).addCallback(done)
if __name__ == '__main__':
doMath()
reactor.run()
| StarcoderdataPython |
11334432 | <reponame>cmbasnett/fake-bpy-module<gh_stars>0
DecimateModifier.face_count = None
| StarcoderdataPython |
4948921 | from model.linter import Linter
import sys
from tkinter import *
from interpreter import Interpreter
class Model():
def __init__(self):
self.linter=Linter()
self.errors = StringVar()
self.maxsteps = IntVar()
self.input = StringVar()
self.name = StringVar()
self.name.set("Hello world!")
self.maxsteps.set(1000)
self.errors.set("Not checked")
self.interpreter = Interpreter()
self.program = ""
# Proof stats
self.prooffound = ""
self.stepstaken = ""
self.proctime = ""
self.rulesused = ""
self.proofseq = ""
self.prooftree = ""
| StarcoderdataPython |
8146033 | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
import string
from random import *
#global a, b, c, d
a = string.ascii_lowercase
b = string.ascii_uppercase
c = string.digits
d = string.punctuation
def genPassword(n):
a1=randint(1,n-3)
b1=randint(1,n-2-a1)
c1=randint(1,n-1-a1-b1)
d1=n-a1-b1-c1
a2=sample(a,a1)
b2=sample(b,b1)
c2=sample(c,c1)
d2=sample(d,d1)
p=a2+b2+c2+d2
shuffle(p)
return(''.join(p))
if __name__ == '__main__':
print(genPassword(12))
| StarcoderdataPython |
12862932 | from abc import ABCMeta
from whatsapp_tracker.bases.selenium_bases.base_selenium_kit import BaseSeleniumKit
from whatsapp_tracker.mixins.seleniun_keyboard_press_mixin import SeleniumKeyBoardPressMixin
class BaseSeleniumKeyboard(BaseSeleniumKit, SeleniumKeyBoardPressMixin, metaclass=ABCMeta):
...
| StarcoderdataPython |
11292747 | import time
from python_ecs.ecs import Component
class Stronger(Component):
def __init__(self) -> None:
super().__init__()
self.time = 0
self.start_time = time.time()
| StarcoderdataPython |
3339056 | <filename>significance_test/significanceTest.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: <NAME>
"""
Statistical Hypothesis Test(significance test)
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import shapiro
from scipy.stats import normaltest
from scipy.stats import anderson
from scipy.stats import chi2_contingency
from scipy.stats import spearmanr
from scipy.stats import pearsonr
from scipy.stats import kendalltau
import pandas as pd
class SignificanceTest:
"""
If Data Is Gaussian:
Use Parametric Statistical Methods
Else:
Use Nonparametric Statistical Methods
"""
def __init__(self):
"""
constructor
"""
np.random.seed(0)
def normality_test(self, data, plot_hist=False):
"""
Tests whether a data sample has a Normal(Gaussian) distribution.
Assumptions
Observations in each sample are independent and identically distributed (iid).
Interpretation
H0: the sample has a Gaussian distribution.(null hypothesis)
H1: the sample does not have a Gaussian distribution.
Note: Null hypothesis usually means there's no relationship between two data sets.
Return:
percentage of success: show the percentage of success. (soft_fail)
if all failed, we call that hard fail.
if some failed, we call that soft fail.
"""
if plot_hist is True:
plt.hist(data)
plt.show()
test_succeeded = 0
num_tests = 0
# Shapiro-Wilk Test
# use p-value only as p-value is easier to evaluate.
stat, p = shapiro(data)
num_tests += 1
# interpret
alpha = 0.05
if p > alpha:
# failed to reject null hypothesis
test_succeeded += 1
# D’Agostino’s K^2 test
stat, p = normaltest(data)
num_tests += 1
# interpret
alpha = 0.05
if p > alpha:
test_succeeded += 1
# Anderson-Darling Test
result = anderson(data)
num_tests += 1
p = 0
anderson_test_succeeded = True
for i in range(len(result.critical_values)):
if result.statistic >= result.critical_values[i]:
anderson_test_succeeded = False
if anderson_test_succeeded is True:
test_succeeded += 1
test_succeeded_pert = test_succeeded/num_tests
if test_succeeded_pert == 0:
print("Samples do not follows a normal distribution. (hard fail)")
elif test_succeeded_pert == 1:
print("Samples do follows a normal distribution.")
else:
print("Samples may not follows a normal distribution. (soft fail)")
return test_succeeded_pert
def correlation_test(self, data1, data2, normal_dist=True, corr_algo="spearman"):
"""
Checking if two samples are related. The following 3 rank correlation are provided.
1. Pearson’s Correlation Coefficient
Tests whether two samples have a monotonic relationship.
Assumptions
Observations in each sample are independent and identically distributed (iid).
Observations in each sample are normally distributed.
Observations in each sample have the same variance.
Interpretation
H0: the two samples are independent.
H1: there is a dependency between the samples.
2. Spearman’s Rank Correlation
Tests whether two samples have a monotonic relationship.
Assumptions
Observations in each sample are independent and identically distributed (iid).
Observations in each sample can be ranked.
Interpretation
H0: the two samples are independent.
H1: there is a dependency between the samples.
3. Kendall’s Rank Correlation
Tests whether two samples have a monotonic relationship.
Assumptions
Observations in each sample are independent and identically distributed (iid).
Observations in each sample can be ranked.
Interpretation
H0: the two samples are independent.
H1: there is a dependency between the samples.
Args:
data1: input data1
data2: input data2
normal_dist: if samples have Normal Distribution.
corr_algo: rank correlation algorithm name.
Returns:
correlations
"""
algo_name_spearman = "spearman"
algo_name_kendall = "kendall"
if normal_dist is True:
corr, p = pearsonr(data1, data2)
else:
if corr_algo == algo_name_spearman:
corr, p = spearmanr(data1, data2)
elif corr_algo == algo_name_kendall:
corr, p = kendalltau(data1, data2)
else:
raise ValueError("not supported rank correlation!")
# interpret the significance
alpha = 0.05
if p > alpha:
print('Samples are uncorrelated (fail to reject H0) p=%.3f' % p)
else:
print('Samples are correlated (reject H0) p=%.3f' % p)
return corr
def chi_square_test(self, dataframe):
"""
Chi-Squared Test
Tests whether two categorical variables are related or independent.
Assumptions
Observations used in the calculation of the contingency table are independent.
25 or more examples in each cell of the contingency table.
Interpretation
H0: the two samples are independent.
H1: there is a dependency between the samples.
"""
# Survived Pclass Sex Age Fare Cabin Embarked
# PassengerId
# 1 0 3 male 22.0 7.2500 NaN S
# 2 1 1 female 38.0 71.2833 C85 C
# 3 1 3 female 26.0 7.9250 NaN S
# 4 1 1 female 35.0 53.1000 C123 S
# 5 0 3 male 35.0 8.0500 NaN S
# \/
# Pclass 1 2 3
# Sex
# female 94 76 144
# male 122 108 347
crossed = pd.crosstab(dataframe.A, dataframe.B)
stat, p, dof, expected = chi2_contingency(crossed)
alpha = 0.05
if p > alpha:
print('Samples are unrelated (fail to reject H0) p=%.3f' % p)
else:
print('Samples are related (reject H0) p=%.3f' % p)
def compare_data_samples(self, data1, data2, normal_dist=True, test_name="t-test"):
"""
Compare tow data samples.
Parametric Statistical Hypothesis Tests
1. Student’s t-test
Tests whether the means of two independent samples are significantly different.
Assumptions
Observations in each sample are independent and identically distributed (iid).
Observations in each sample are normally distributed.
Observations in each sample have the same variance.
Interpretation
H0: the means of the samples are equal.
H1: the means of the samples are unequal.
2. Paired Student’s t-test
Tests whether the means of two paired samples are significantly different.
Assumptions
Observations in each sample are independent and identically distributed (iid).
Observations in each sample are normally distributed.
Observations in each sample have the same variance.
Observations across each sample are paired.
Interpretation
H0: the means of the samples are equal.
H1: the means of the samples are unequal.
3. Analysis of Variance Test (ANOVA)
Tests whether the means of two or more independent samples are significantly different.
Assumptions
Observations in each sample are independent and identically distributed (iid).
Observations in each sample are normally distributed.
Observations in each sample have the same variance.
Interpretation
H0: the means of the samples are equal.
H1: one or more of the means of the samples are unequal.
Nonparametric Statistical Hypothesis Tests
4. Mann-Whitney U Test
Tests whether the distributions of two independent samples are equal or not.
Assumptions
Observations in each sample are independent and identically distributed (iid).
Observations in each sample can be ranked.
Interpretation
H0: the distributions of both samples are equal.
H1: the distributions of both samples are not equal.
5. Wilcoxon Signed-Rank Test
Tests whether the distributions of two paired samples are equal or not.
Assumptions
Observations in each sample are independent and identically distributed (iid).
Observations in each sample can be ranked.
Observations across each sample are paired.
Interpretation
H0: the distributions of both samples are equal.
H1: the distributions of both samples are not equal.
6. <NAME> Test
Tests whether the distributions of two or more independent samples are equal or not.
Assumptions
Observations in each sample are independent and identically distributed (iid).
Observations in each sample can be ranked.
Interpretation
H0: the distributions of all samples are equal.
H1: the distributions of one or more samples are not equal.
7. Friedman Test
Tests whether the distributions of two or more paired samples are equal or not.
Assumptions
Observations in each sample are independent and identically distributed (iid).
Observations in each sample can be ranked.
Observations across each sample are paired.
Interpretation
H0: the distributions of all samples are equal.
H1: the distributions of one or more samples are not equal.
"""
from scipy.stats import ttest_ind
stat, p = ttest_ind(data1, data2)
from scipy.stats import ttest_rel
stat, p = ttest_rel(data1, data2)
from scipy.stats import f_oneway
stat, p = f_oneway(data1, data2, ...)
from scipy.stats import mannwhitneyu
stat, p = mannwhitneyu(data1, data2)
from scipy.stats import wilcoxon
stat, p = wilcoxon(data1, data2)
from scipy.stats import kruskal
stat, p = kruskal(data1, data2, ...)
from scipy.stats import friedmanchisquare
stat, p = friedmanchisquare(data1, data2, ...)
| StarcoderdataPython |
5001374 | #!/usr/bin/python3
# -*- coding : utf-8 -*-
import os
import nlpnet
class Tagger:
'''
POS-Tagger for portuguese language
'''
def __init__(self):
self.tagger = nlpnet.POSTagger(os.path.dirname(os.path.realpath(__file__)) + "/pos-pt", language="pt")
def tag(self, text):
'''
Return the tagged text
'''
return self.tagger.tag(text)[0]
if __name__ == "__main__":
import sys
print(Tagger().tag(sys.argv[1])) | StarcoderdataPython |
9644288 | <reponame>MTandHJ/leetcode
from typing import List
from base import version
from sorts import MinHeap
class Solution:
@version("sorted: 32ms")
def findKthLargest(self, nums: List[int], k: int) -> int:
return sorted(nums)[-k]
@staticmethod
def _sorted(nums: List) -> List:
left, right = 0, len(nums)
mark = nums[0]
while left < right:
right -= 1
if nums[right] < mark:
nums[left], nums[right] = nums[right], nums[left]
left += 1
while mark < nums[left]:
left += 1
@version("sorted: 356ms")
def findKthLargest(self, nums: List[int], k: int) -> int:
stack = [-10 ** 5] * k
for num in nums:
if num > stack[0]:
stack[0] = num
stack.sort()
return stack[0]
def quicksort_(self, nums:List, low: int, high: int, k) -> List:
if low < high and low <= len(nums) - k and high >= len(nums) - k:
pivot = nums[low]
left, right = low, high
while left < right:
while left < right and nums[right] >= pivot:
right -= 1
nums[left], nums[right] = nums[right], nums[left]
while left < right and pivot > nums[left]:
left += 1
nums[left], nums[right] = nums[right], nums[left]
self.quicksort_(nums, low=low, high=left-1, k=k)
self.quicksort_(nums, low=right+1, high=high, k=k)
@version("844ms?")
def findKthLargest(self, nums: List[int], k: int) -> int:
self.quicksort_(nums)
return nums[-k]
@version("minimum heap: 404ms")
def findKthLargest(self, nums: List[int], k: int) -> int:
heap = MinHeap()
for num in nums:
heap.add(num)
if len(heap) > k:
heap.pop()
return heap.pop()
| StarcoderdataPython |
11372639 | #!/usr/local/bin/python3
#-*- encoding: utf-8 -*-
import json
from common.db.redisDB import RedisDB
#싱글톤 패턴으로 단 한번의 DB 커넥션을 가진다.
class ClientRedis:
_instance = None
_dbConn = None
_prefixClient = 'worker_client_t0001:ip:'
_prefixClientDeviceInfo = 'worker_client_t0001:device:'
_timeout = 7200
def __new__(cls):
if not cls._instance:
cls._instance = object.__new__(cls)
cls._dbConn = RedisDB().connect()
return cls._instance
def registClient(cls, commandIp, clientIp):
result = True
for key in cls._dbConn.keys(cls._prefixClient+"*"):
if cls._dbConn.get(key) == clientIp:
result = False
break;
cls._dbConn.set(cls._prefixClient + clientIp, commandIp)
return result
def getClient(cls):
keys = cls._dbConn.keys(cls._prefixClient+"*")
client = {}
for key in keys:
clientKey = key.replace(cls._prefixClient, "")
client[clientKey] = cls._dbConn.get(key)
return client
def deleteClient(cls, clientIp):
result = True
cls._dbConn.delete(cls._prefixClient + clientIp)
cls._dbConn.delete(cls._prefixClientDeviceInfo + clientIp)
return result
def registClientDevice(cls, clientIp, device):
result = True
clientKeys = cls._dbConn.keys(cls._prefixClient + "*")
clientKey = cls._prefixClient+clientIp
if clientKey in clientKeys:
keys = cls._dbConn.keys(cls._prefixClientDeviceInfo + "*")
key = cls._prefixClientDeviceInfo+clientIp
if key in keys:
cls._dbConn.delete(key)
cls._dbConn.set(key, json.dumps(device, ensure_ascii=False))
return result
def deleteClientDevice(cls, clientIp):
result = True
cls._dbConn.delete(cls._prefixClient + clientIp)
cls._dbConn.delete(cls._prefixClientDeviceInfo + clientIp)
return result
def getClientDevice(cls):
clientKeys = cls._dbConn.keys(cls._prefixClient + "*")
clientDeviceKeys = cls._dbConn.keys(cls._prefixClientDeviceInfo + "*")
for clientDeviceKey in clientDeviceKeys:
clientIp = clientDeviceKey.replace(cls._prefixClientDeviceInfo, "")
clientkey = cls._prefixClient + clientIp
if clientkey not in clientKeys:
cls.deleteClientDevice(clientIp)
clientDeviceKeys = cls._dbConn.keys(cls._prefixClientDeviceInfo + "*")
clientDevice = {}
for key in clientDeviceKeys:
clientDeviceKey = key.replace(cls._prefixClientDeviceInfo, "")
clientDevice[clientDeviceKey] = json.loads(cls._dbConn.get(key))
return clientDevice
| StarcoderdataPython |
3470798 |
from BasicMetrics import true_positive, false_positive
def precision(y_true, y_pred) -> float:
TP = true_positive(y_true, y_pred)
FP = false_positive(y_true, y_pred)
precision = TP / (TP+FP) #formulla
return precision
l1 = [0,1,1,1,0,0,0,1]
l2 = [0,1,0,1,0,1,0,0]
print(precision(l1, l2))
| StarcoderdataPython |
4979012 | <reponame>kumagai-group/vise<gh_stars>10-100
# -*- coding: utf-8 -*-
# Copyright (c) 2020. Distributed under the terms of the MIT License.
from pymatgen.io.vasp.sets import Kpoints
class ViseKpoints(Kpoints):
def __str__(self):
lines = [self.comment, str(self.num_kpts), self.style.name]
style = self.style.name.lower()[0]
if style == "l":
lines.append(self.coord_type)
for i in range(len(self.kpts)):
# The following four lines are modified
if len(self.kpts) == 1:
lines.append(" ".join([str(x) for x in self.kpts[i]]))
else:
lines.append(" ".join([f"{x:20.17f}" for x in self.kpts[i]]))
if style == "l":
lines[-1] += " ! " + self.labels[i]
if i % 2 == 1:
lines[-1] += "\n"
elif self.num_kpts > 0:
# The following four lines are modified
if self.labels is not None:
lines[-1] += f"{self.kpts_weights[i]:4} {self.labels[i]}"
else:
lines[-1] += f"{self.kpts_weights[i]:4}"
# Print shifts for automatic kpoints types if not zero.
if self.num_kpts <= 0 and tuple(self.kpts_shift) != (0, 0, 0):
lines.append(" ".join([str(x) for x in self.kpts_shift]))
return "\n".join(lines) + "\n"
| StarcoderdataPython |
120512 | """
When you select a contiguous block of text in a PDF viewer, the selection is highlighted with a blue rectangle. In this PDF viewer, each word is highlighted independently. For example:
PDF-highighting.png
In this challenge, you will be given a list of letter heights in the alphabet and a string. Using the letter heights given, determine the area of the rectangle highlight in assuming all letters are wide.
For example, the highlighted . Assume the heights of the letters are and . The tallest letter is high and there are letters. The hightlighted area will be so the answer is .
Function Description
Complete the designerPdfViewer function in the editor below. It should return an integer representing the size of the highlighted area.
designerPdfViewer has the following parameter(s):
h: an array of integers representing the heights of each letter
word: a string
Input Format
The first line contains space-separated integers describing the respective heights of each consecutive lowercase English letter, ascii[a-z].
The second line contains a single word, consisting of lowercase English alphabetic letters.
Constraints
, where is an English lowercase letter.
contains no more than letters.
Output Format
Print a single integer denoting the area in of highlighted rectangle when the given word is selected. Do not print units of measure.
Sample Input 0
1 3 1 3 1 4 1 3 2 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5
abc
Sample Output 0
9
"""
#!/bin/python3
import math
import os
import random
import re
import sys
from typing import List
# Complete the designerPdfViewer function below.
def designerPdfViewer(h: List[int], word: str) -> int:
maxHeight = 0
for char in word:
height = h[ord(char) - 97]
if height > maxHeight:
maxHeight = height
return maxHeight * len(word)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
h = list(map(int, input().rstrip().split()))
word = input()
result = designerPdfViewer(h, word)
fptr.write(str(result) + '\n')
fptr.close()
| StarcoderdataPython |
9605268 | import csv
ground_truth_path = "/Users/sephon/Desktop/Research/VizioMetrics/Corpus/Phylogenetic/CNN_corpus/TreeRipper_dataset/TreeRipper_multi_dataset.csv"
ground_truth_unfix_path = "/Users/sephon/Desktop/Research/VizioMetrics/Corpus/Phylogenetic/CNN_corpus/TreeRipper_dataset/TreeRipper_multi_dataset_unfix.csv"
count = 0
ground_truth = {}
with open(ground_truth_path ,'rb') as incsv:
reader = csv.reader(incsv, dialect='excel', delimiter='\t')
reader.next()
for row in reader:
print row
ground_truth[row[0]] = row[1]
print
# ground_truth = {}
with open(ground_truth_unfix_path ,'rb') as incsv:
reader = csv.reader(incsv, dialect='excel', delimiter='\t')
reader.next()
for row in reader:
print row
if ground_truth[row[0]] != row[1]:
count += 1
ground_truth_path = "/Users/sephon/Desktop/Research/VizioMetrics/Corpus/Phylogenetic/CNN_corpus/TreeRipper_dataset/TreeRipper_dataset.csv"
ground_truth_unfix_path = "/Users/sephon/Desktop/Research/VizioMetrics/Corpus/Phylogenetic/CNN_corpus/TreeRipper_dataset/TreeRipper_dataset_unfix.csv"
ground_truth = {}
with open(ground_truth_path ,'rb') as incsv:
reader = csv.reader(incsv, dialect='excel', delimiter='\t')
reader.next()
for row in reader:
print row
ground_truth[row[0]] = row[1]
print
# ground_truth = {}
with open(ground_truth_unfix_path ,'rb') as incsv:
reader = csv.reader(incsv, dialect='excel', delimiter='\t')
reader.next()
for row in reader:
print row
if ground_truth[row[0]] != row[1]:
count += 1
print "fix truth count:", count
| StarcoderdataPython |
3323035 | #!/usr/bin/env python
"""
parse.py - replace simple yaml values
Usage:
parse.py [-h] --file filename.yaml [--dry-run] --key-val a.b.c=val
Options:
-h, --help show this help message and exit
--file filename.yaml replace in this YAML file
--dry-run Don't replace in file, just print to stdout
--key-val a.b.c=val Key/Value pair to replace in yaml
This script allows simple yaml values to be replaced by dot-path. For example, give a yaml file /my-home/file.yaml like:
# my awesome yaml config
some:
value:
is: something
calling 'parse.yaml --file /my-home/file.yaml --key-val some.value.is=other' Will result in /my-home/file.yaml like:
# my awesome yaml config
some:
value:
is: other
Note the preserved comment line.
It is necessary because upstream helm chart repository (https://github.com/helm/charts) requires chart values.yaml
files to be annotated with comments, and normal java/groovy YAML parsers will not preserve comment lines when setting
version information for charts (such as docker tags and Chart.yaml version strings)
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, Samsung CNCT"
__credits__ = ["<NAME>"]
__license__ = "Apache"
__version__ = "0.0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
import sys
from ruamel.yaml import YAML
from functools import reduce
from argparse import ArgumentParser
def main():
# handle arguments
parser = ArgumentParser()
parser.add_argument("--file", dest="filename", metavar="filename.yaml", required=True,
help="replace in this YAML file")
parser.add_argument("--dry-run", action='store_true',
dest="dryrun", help="Don't replace in file, just print to stdout")
parser.add_argument("--key-val", dest="kv", metavar="a.b.c=val", required=True,
help="Key/Value pair to replace in yaml")
args = parser.parse_args()
yaml = YAML()
yaml.explicit_start = False
yaml.indent(mapping=3)
yaml.preserve_quotes = True
try:
with open(args.filename) as fp:
data = yaml.load(fp)
except IOError:
print("File '" + args.filename + "' not found!")
exit(1)
# get key and value
key = args.kv.split('=')[0]
val = args.kv.split('=')[1]
# get first part of the key dot-path
lookupKey = key.split('.')
lookupKey.pop()
# get last part of key dot-path
itemKey = key.split('.')[-1]
# reduce down to last lookupKey dictionary value and assign it to val
try:
setItem = reduce(lambda c, k: c[k], lookupKey, data)
except KeyError:
print("'" + key + "' is not a valid dot-path in " + args.filename)
exit(1)
# check if this path exists in yaml
if setItem.get(itemKey, None) == None:
print("'" + key + "' is not a valid dot-path in " + args.filename)
exit(1)
setItem[itemKey] = val
ofp = None
if args.dryrun:
ofp = sys.stdout
else:
try:
ofp = open(args.filename, 'w')
except IOError:
print("File '" + args.filename + "' not found!")
exit(1)
yaml.dump(data, ofp)
if __name__== "__main__":
main()
| StarcoderdataPython |
5113675 | Register =\
{
"firstname_textbox": "css:#basicBootstrapForm > div:nth-child(1) > div:nth-child(2) > input",
"lastname_textbox": "#basicBootstrapForm > div:nth-child(1) > div:nth-child(3) > input",
"address_textbox": "#basicBootstrapForm > div:nth-child(2) > div > textarea",
"email_textbox": "#eid > input",
"phone_textbox": "#basicBootstrapForm > div:nth-child(4) > div > input"
}
| StarcoderdataPython |
6655840 | <reponame>XiaoshengLin/shadow3
from __future__ import print_function
import Shadow.ShadowLibExtensions as sd
import numpy
import os
import socket
import getpass
import datetime
try:
import matplotlib.pyplot as plt
import matplotlib
except:
pass
class ArgsError(Exception):
def __init__(self,value):
self.value = value
def __str__(self):
return repr(self.value)
class NoValueSelectedError(Exception):
def __init__(self,value):
self.value = value
def __str__(self):
return repr(self.value)
class Histo1_Ticket:
def __init__(self):
self.histogram = None
self.bin_center = None
self.bin_left = None
self.figure = None
self.xrange = None
self.yrange = None
self.xtitle = None
self.ytitle = None
self.title = None
self.fwhm = None
class plotxy_Ticket:
def __init__(self):
self.figure = None
self.xrange = None
self.yrange = None
self.xtitle = None
self.ytitle = None
self.title = None
self.fwhmx = None
self.fwhmy = None
def ErrorMsg(fromFunc,value):
print (fromFunc+" called with an error in the arguments.\nCheck help function")
return ArgsError(value)
def getshonecol_CheckArg(beam,col):
if not isinstance(beam,(sd.Beam,str)): raise ErrorMsg('getshonecol','beam')
if not isinstance(col,int): raise ErrorMsg('getshonecol','col')
if col<1 or col>33: raise ErrorMsg('getshonecol','col')
def getshcol_CheckArg(beam,col): #the rest of checks are included in the function getshonecol_CheckArg
if not isinstance(beam,(sd.Beam,str)): raise ErrorMsg('getshcol','beam')
if not isinstance(col,(int,tuple,list)): raise ErrorMsg('getshcol','col')
if isinstance(col, int):
if col<1 or col>33: raise ErrorMsg('getshcol','col')
else:
for c in col:
if not isinstance(c,int): raise ErrorMsg('getshcol','col')
if c<1 or c>33: raise ErrorMsg('getshcol','col')
def Histo1_CheckArg(beam,col,xrange,yrange,nbins,nolost,ref,write,title,xtitle,ytitle,calfwhm,noplot):
if not isinstance(beam,(sd.Beam,str)): raise ErrorMsg('Histo1','beam')
if not isinstance(col,int): raise ErrorMsg('Histo1','col')
if col<1 or col>33: raise ErrorMsg('Histo1','col')
# the next 3 lines don't matter, it is a trick to pass the test when None
if xrange==None: xrange=(1.0,2.0)
if yrange==None: yrange=(1.0,2.0)
if xtitle==None: xtitle='pippo'
if ytitle==None: ytitle='pippo'
if not isinstance(xrange,(tuple,list)): raise ErrorMsg('Histo1','xrange')
if len(xrange)!=2: raise ErrorMsg('Histo1','xrange')
if not isinstance(xrange[0],(int,float)) or not isinstance(xrange[1],(int,float)): raise ErrorMsg('Histo1','xrange')
if not isinstance(yrange,(tuple,list)): raise ErrorMsg('Histo1','yrange')
if len(yrange)!=2: raise ErrorMsg('Histo1','yrange')
if not isinstance(yrange[0],(int,float)) or not isinstance(yrange[1],(int,float)): raise ErrorMsg('Histo1','yrange')
if not isinstance(nbins,int): raise ErrorMsg('Histo1','nbins')
if nbins<=0: raise ErrorMsg('Histo1','nbins')
if nolost!=0 and nolost!=1 and nolost!=2: raise ErrorMsg('Histo1','nolost')
if ref>=22 and ref<=33: ref = 1
if ref!=0 and ref!=1: raise ErrorMsg('Histo1','ref')
if write!=0 and write!=1: raise ErrorMsg('Histo1','write')
if not isinstance(title,str): raise ErrorMsg('Histo1','title')
if not isinstance(xtitle,str): raise ErrorMsg('Histo1','xtitle')
if not isinstance(ytitle,str): raise ErrorMsg('Histo1','ytitle')
if calfwhm!=0 and calfwhm!=1: raise ErrorMsg('Histo1','calfwhm')
if noplot!=0 and noplot!=1: raise ErrorMsg('Histo1','noplot')
def plotxy_CheckArg(beam,cols1,cols2,nbins,nbins_h,level,xrange,yrange,nolost,title,xtitle,ytitle,noplot,calfwhm,contour):
if not isinstance(beam,(sd.Beam,str)): raise ErrorMsg('plotxy','beam')
if cols1<1 or cols1>33: raise ErrorMsg('plotxy','cols1')
if cols2<1 or cols2>33: raise ErrorMsg('plotxy','cols2')
if not isinstance(nbins,int): raise ErrorMsg('plotxy','nbins')
if nbins<=0: raise ErrorMsg('plotxy','nbins')
if not isinstance(nbins_h,int): raise ErrorMsg('plotxy','nbins_h')
if nbins_h<=0: raise ErrorMsg('plotxy','nbins_h')
if not isinstance(level,int): raise ErrorMsg('plotxy','level')
if level<=0: raise ErrorMsg('plotxy','level')
# the next 4 lines don't matter, it is a trick to pass the test when None
if xrange==None: xrange=(1.0,2.0)
if yrange==None: yrange=(1.0,2.0)
if xtitle==None: xtitle='pippo'
if ytitle==None: ytitle='pippo'
if not isinstance(xrange,(tuple,list)): raise ErrorMsg('plotxy','xrange')
if len(xrange)!=2: raise ErrorMsg('plotxy','xrange')
if not isinstance(xrange[0],(int,float)) or not isinstance(xrange[1],(int,float)): raise ErrorMsg('plotxy','xrange')
if not isinstance(yrange,(tuple,list)): raise ErrorMsg('plotxy','yrange')
if len(yrange)!=2: raise ErrorMsg('plotxy','yrange')
if not isinstance(yrange[0],(int,float)) or not isinstance(yrange[1],(int,float)): raise ErrorMsg('plotxy','yrange')
if nolost!=0 and nolost!=1 and nolost!=2: raise ErrorMsg('plotxy','nolost')
if not isinstance(title,str): raise ErrorMsg('plotxy','title')
if not isinstance(xtitle,str): raise ErrorMsg('plotxy','xtitle')
if not isinstance(ytitle,str): raise ErrorMsg('plotxy','ytitle')
if noplot!=0 and noplot!=1: raise ErrorMsg('plotxy','noplot')
#if ref!=0 and ref!=1: raise ErrorMsg('plotxy','ref')
if calfwhm!=0 and calfwhm!=1 and calfwhm!=2: raise ErrorMsg('plotxy','calfwhm')
if not isinstance(contour,int): raise ErrorMsg('plotxy','contour')
if contour<0 or contour>6: raise ErrorMsg('plotxy','contour')
def setGoodRange(col):
if col.size == 0:
return [-1,1]
rmin = min(col)
rmax = max(col)
if rmin>0.0:
rmin = rmin*0.95
else:
rmin = rmin*1.05
if rmax<0.0:
rmax = rmax*0.95
else:
rmax = rmax*1.05
if rmin==rmax:
rmin = rmin*0.95
rmax = rmax*1.05
if rmin==0.0:
rmin = -1.0
rmax = 1.0
return [rmin,rmax]
def findIndex(xx,n,la,lb):
return int( numpy.floor((xx-(lb-la)*0.5/n-la)*n/(lb-la)) )
def calcFWHM(h,binSize):
t = numpy.where(h>max(h)*0.5)
return binSize*(t[0][-1]-t[0][0]+1), t[0][-1], t[0][0]
def Histo1_write(title,bins,h,w,col,beam,ref):
if isinstance(beam,sd.Beam): usubtitle = "Shadow running in dir "+os.getcwd()
if isinstance(beam,str): usubtitle = os.getcwd()+beam
now = str(datetime.datetime.now())
usubtitle += " "+now+" "+getpass.getuser()+"@"+socket.gethostname()
file = open(title,'w')
print ("#F HISTO1", file = file)
print ("#C This file has been created using histo1 (python ShadowTools)", file = file)
print ("#D "+now, file = file)
print ("#UTITLE", file = file)
print ("#USUBTITLE "+usubtitle, file = file)
print ("#UTTEXT", file = file)
print ("#C COLUMN 1 CORRESPONDS TO ABSCISSAS IN THE CENTER OF EACH BIN", file = file)
print ("#C COLUMN 2 CORRESPONDS TO ABSCISSAS IN THE THE LEFT CORNER OF THE BIN", file = file)
print ("#C COLUMN 3 CORRESPONDS TO INTENSITY (COUNTING RAYS)", file = file)
print ("#C COLUMN 4 CORRESPONDS TO INTENSITY (WEIGHTED IF SELECTED)", file = file)
print (" ", file = file)
print ("#S 1 histogram", file = file)
print ("#N 4" , file = file)
print ("#L "+getLabel(col)[1]+" "+(getLabel(col))[1]+" "+"intensity (rays)"+" "+(getLabel(ref))[1], file = file)
for i in range(len(h)):
print ("%f\t%f\t%f\t%f" % ( (bins[i]+bins[i+1])*0.5, bins[i], h[i], w[i] ), file = file)
file.close()
def getLabel(col):
Label= [
[ r"$x$ [user unit]", "x [user unit]" ],
[ r"$y$ [user unit]", "y [user unit]" ],
[ r"$z$ [user unit]", "z [user unit]" ],
[ r"$\dot{x}$ [rads]", "x' [rads]" ],
[ r"$\dot{y}$ [rads]", "y' [rads]" ],
[ r"$\dot{z}$ [rads]", "z' [rads]" ],
[ r"$\mathbf{E}_{\sigma x}$", "Es_x" ],
[ r"$\mathbf{E}_{\sigma y}$", "Es_y" ],
[ r"$\mathbf{E}_{\sigma z}$", "Es_z" ],
[ r"ray flag", "Ray flag" ],
[ r"E [eV]", "Energy" ],
[ r"Ray index", "Ray index" ],
[ r"s", "Opt. Path" ],
[ r"$\phi_{\sigma}$", "phase_s" ],
[ r"$\phi_{\pi}$", "phase_p" ],
[ r"$\mathbf{E}_{\pi x}$", "Ep_x" ],
[ r"$\mathbf{E}_{\pi y}$", "Ep_y" ],
[ r"$\mathbf{E}_{\pi z}$", "Ep_z" ],
[ r"$\lambda$ [$\AA$]", "wavelength" ],
[ r"$R= \sqrt{x^2+y^2+z^2}$", "R [user unit]" ],
[ r"$\theta$" , "theta" ],
[ r"$\Vert\mathbf{E_{\sigma}}+\mathbf{E_{\pi}}\Vert$", "Electromagnetic vector magnitude" ],
[ r"$\Vert\mathbf{E_{\sigma}}+\mathbf{E_{\pi}}\Vert^2$", "intensity (weight column = 23: |E|^2 (total intensity))" ],
[ r"$\Vert\mathbf{E_{\sigma}}\Vert^2$", "intensity (weight column = 24: |E_s|^2 (sigma intensity))" ],
[ r"$\Vert\mathbf{E_{\pi}}\Vert^2$", "intensity (weight column = 25: |E_p|^2 (pi intensity))" ],
[ r"$K = \frac{2 \pi}{\lambda} [A^{-1}]$", "K magnitude" ],
[ r"$K_x = \frac{2 \pi}{\lambda} \dot{x}$ [$\AA^{-1}$]", "K_x" ],
[ r"$K_y = \frac{2 \pi}{\lambda} \dot{y}$ [$\AA^{-1}$]", "K_y" ],
[ r"$K_z = \frac{2 \pi}{\lambda} \dot{z}$ [$\AA^{-1}$]", "K_z" ],
[ r"$S_0 = \Vert\mathbf{E}_{\sigma}\Vert^2 + \Vert\mathbf{E}_{\pi}\Vert^2 $", "S0" ],
[ r"$S_1 = \Vert\mathbf{E}_{\sigma}\Vert^2 - \Vert\mathbf{E}_{\pi}\Vert^2 $", "S1" ],
[ r"$S_2 = 2 \Vert\mathbf{E}_{\sigma}\Vert \cdot \Vert\mathbf{E}_{\pi}\Vert \cos{(\phi_{\sigma}-\phi_{\pi})}$", "S2" ],
[ r"$S_3 = 2 \Vert\mathbf{E}_{\sigma}\Vert \cdot \Vert\mathbf{E}_{\pi}\Vert \sin{(\phi_{\sigma}-\phi_{\pi})}$", "S3" ],
[ r"Power [eV/s]", "Power" ],
]
return Label[col]
| StarcoderdataPython |
46317 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright(c) 2019 Nippon Telegraph and Telephone Corporation
# Filename: NVRDriverCLIProtocol.py
import traceback
import re
import GlobalModule
from EmCommonLog import decorater_log
from CgwshDriverCLIProtocol import CgwshDriverCLIProtocol
class NVRDriverCLIProtocol(CgwshDriverCLIProtocol):
'''
Class for processing NVR driver protocol(CLI)
'''
@decorater_log
def __init__(self, error_recv_message=[], connected_recv_message="~"):
super(NVRDriverCLIProtocol, self).__init__(error_recv_message,
connected_recv_message)
@decorater_log
def _exit_configuration_mode(self):
'''
Release of configuration mode is forced.
'''
re_txt_save_conf = "Save new configuration \? \(Y/N\)"
send_message = [("quit", "{0}|{1}".format(">", re_txt_save_conf))]
GlobalModule.EM_LOGGER.debug(
"start exit command :\n%s" % (send_message,))
try:
output = self._exec_interactive(send_message,
self.error_recv_message)
if re.search(re_txt_save_conf, output):
self._send_command_no_save()
except Exception as ex:
GlobalModule.EM_LOGGER.debug("Error exit command:%s", ex)
GlobalModule.EM_LOGGER.debug("Traceback:%s",
traceback.format_exc())
else:
GlobalModule.EM_LOGGER.debug("Success configuration exit")
self._is_mode_configuration = False
@decorater_log
def _send_command_no_save(self):
'''
Save new configuration ? n is set as reponse to question(Y/N).
Return value:
Received message
'''
GlobalModule.EM_LOGGER.debug(
"Send n for 'Save new configuration ? (Y/N)'")
shell_obj = self._ssh_shell
send_message = "n"
receive_keyword = ">"
shell_obj.send(send_message)
return self._recv_message(shell_obj, receive_keyword)
| StarcoderdataPython |
5117956 | <filename>data_describe/_widget.py
from abc import ABC, abstractmethod
class BaseWidget(ABC):
"""Interface for collecting information and visualizations for a feature.
A "widget" serves as a container for data, diagnostics, and other outputs
(i.e. DataFrames, plots, estimators etc.) for a feature in data-describe.
"""
def __init__(self, compute_backend=None, viz_backend=None, **kwargs):
"""Instantiates a BaseWidget.
Attributes are not explicitly required to be assigned on instantiation;
data-describe does not constrain or require assignment of (possibly "private")
attributes after instantiation. Widgets may be used to pass data between
internal calculations (possibly across different backends) as the final widget
state is accumulated.
However, it is strongly recommended to add all expected attributes to
the __init__ signature for documentation purposes.
Args:
compute_backend: The compute backend
viz_backend: The visualization backend. Must be assigned if the user
specified a value
**kwargs: Keyword arguments.
"""
self.compute_backend = compute_backend
self.viz_backend = viz_backend
for key, value in kwargs.items():
self.__setattr__(key, value)
def __str__(self):
return "data-describe Base Widget"
# TODO (haishiro): Use @final from typing; requires Python 3.8+
def _repr_html_(self):
"""Displays the object (widget) when it is on the last line in a Jupyter Notebook cell."""
try:
from IPython.display import display
return display(self.show())
except ImportError:
return self.show()
@abstractmethod
def show(self, viz_backend=None):
"""Show the default output.
Assembles the object to be displayed by _repr_html_. This should respect the
viz_backend, if applicable.
Args:
viz_backend: The visualization backend
Raises:
NotImplementedError: No default visualization defined.
"""
backend = viz_backend or self.viz_backend # noqa: F841
raise NotImplementedError(
"No default visualization defined defined on this widget."
)
| StarcoderdataPython |
3413519 | # encoding: utf-8
from .cuhk03 import CUHK03
from .dukemtmcreid import DukeMTMCreID
from .market1501 import Market1501
from .msmt17 import MSMT17
from .veri import VeRi
from .aicity20 import AICity20
from .aicity20_sim import AICity20Sim
from .aicity20_trainval import AICity20Trainval
from .aicity20_ReOri import AICity20ReOri
from .aicity20_ReCam import AICity20ReCam
from .aicity20_ReColor import AICity20ReColor
from .aicity20_ReType import AICity20ReType
from .dataset_loader import ImageDataset
from .bases import BaseImageDataset, apply_id_bias
from .personx_spgan import PersonX_Spgan
from .personx_spgan_test import PersonX_Spgan_Test
from .personx_spgan_plabel import PersonX_Spgan_Plabel
__factory = {
'market1501': Market1501,
'cuhk03': CUHK03,
'dukemtmc-reid': DukeMTMCreID,
'msmt17': MSMT17,
'veri': VeRi,
'aicity20': AICity20,
'aicity20-sim': AICity20Sim,
'aicity20-trainval': AICity20Trainval,
'aicity20-ReOri': AICity20ReOri,
'aicity20-ReCam': AICity20ReCam,
'aicity20-ReColor': AICity20ReColor,
'aicity20-ReType': AICity20ReType,
# 'personX': personX,
'personx_spgan': PersonX_Spgan,
'personx_spgan_test': PersonX_Spgan_Test,
'personx_spgan_plabel': PersonX_Spgan_Plabel,
}
def get_names():
return __factory.keys()
def init_dataset(name, *args, **kwargs):
if name not in __factory.keys():
raise KeyError("Unknown datasets: {}".format(name))
return __factory[name](*args, **kwargs)
| StarcoderdataPython |
233161 | import torch
from transformers import BertModel, BertTokenizer
from bert_ner.aux import bioes_classes, clean_tuples
from bert_ner.aux import create_data_from_sentences, batchify_sentences
from bert_ner.model import NERModel
def clean_labels(output_labels):
return [item[2:] if item not in ['OTHER', '[CLS]'] else item for item in output_labels]
def join_tokens(words, labels):
new_words, new_labels = [], []
for word, label in zip(words, labels):
if word[:2] != '##':
new_words.append(word)
new_labels.append(label)
continue
new_words[-1] += word[2:]
return new_words, new_labels
class NERTagger:
_MODEL = (BertModel, BertTokenizer, 'bert-base-uncased')
_model_class, _tokenizer_class, _pretrained_weights = _MODEL
_tokenizer = _tokenizer_class.from_pretrained(_pretrained_weights)
_language_model = _model_class.from_pretrained(_pretrained_weights)
def __init__(self, filename, num_batches=100):
self._num_batches = num_batches
self._model = NERModel(self._language_model, nout=len(bioes_classes))
checkpoint = torch.load(filename)
self._model.load_state_dict(checkpoint['model_state_dict'])
self._model.cuda()
def tag(self, text):
return self.tag_list([text])[0]
def tag_list(self, text_list):
sentences = create_data_from_sentences(text_list, self._tokenizer)
batches = batchify_sentences(sentences, 10)
entity_list = []
for batch in batches:
entity_list += self._predict(batch)
return entity_list
def _predict(self, batch):
outputs = self._model(batch.cuda())
out_list = []
for inp, output in zip(batch, outputs):
words = [self._tokenizer.convert_ids_to_tokens([i])[0] for i in list(inp)[1:]]
output_labels = [torch.argmax(vector) for vector in output[1:]]
output_labels = [bioes_classes[int(l)] for l in output_labels]
output_labels = clean_labels(output_labels)
words, output_labels = join_tokens(words, output_labels)
predicted_tuples = clean_tuples(words, output_labels)
predicted_tuples = [item for item in predicted_tuples if item[1] != 'OTHER']
out_list.append(predicted_tuples)
return out_list
| StarcoderdataPython |
99996 | <gh_stars>10-100
"""PyTest configuration module."""
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import base64
import json
import os
import zlib
import numpy as np
import pytest
from scenepic import Color
def _asset(name):
if name.endswith(".json"):
path = os.path.join(os.path.dirname(__file__), "assets", name)
if os.path.exists(path):
return path
return os.path.join(os.path.dirname(__file__), "..", "ci", "assets", name)
def _decode_and_decompress(buffer: bytes) -> np.ndarray:
buffer = base64.b64decode(buffer)
buffer = zlib.decompress(buffer)
return np.frombuffer(buffer, dtype=np.float32)
def _assert_buffer_equal(actual, expected):
actual = _decode_and_decompress(actual)
expected = _decode_and_decompress(expected)
np.testing.assert_array_almost_equal(actual, expected)
def _assert_dict_equal(actual, expected):
actual_keys = sorted(actual.keys())
expected_keys = sorted(expected.keys())
assert actual_keys == expected_keys
for key in actual_keys:
if key in ("VertexBuffer", "Transform", "LineBuffer", "R", "T", "Projection"):
_assert_buffer_equal(actual[key], expected[key])
else:
_assert_item_equal(actual[key], expected[key])
def _assert_list_equal(actual, expected):
for actual_i, expected_i in zip(actual, expected):
_assert_item_equal(actual_i, expected_i)
def _assert_item_equal(actual, expected):
assert isinstance(actual, type(expected))
if isinstance(expected, list):
_assert_list_equal(actual, expected)
elif isinstance(expected, dict):
_assert_dict_equal(actual, expected)
else:
assert actual == expected
def _assert_json_equal(actual, expected_path):
"""Assert that the json string provided is equal to the asset."""
expected_path = _asset(expected_path + ".json")
with open(expected_path) as file:
expected = json.load(file)
actual = json.loads(actual)
_assert_item_equal(actual, expected)
@pytest.fixture(scope="module")
def color():
return Color(0.83863144, 0.39671423, 0.77389568)
@pytest.fixture(scope="module")
def asset():
return _asset
@pytest.fixture(scope="module")
def assert_json_equal():
return _assert_json_equal
| StarcoderdataPython |
5141750 | from django.conf import settings
from django.dispatch import Signal
from django.http import HttpResponseRedirect
from ..base_client import FrameworkIntegration, RemoteApp
from ..requests_client import OAuth1Session, OAuth2Session
from deming.models import OauthClient
token_update = Signal()
class DjangoIntegration(FrameworkIntegration):
oauth1_client_cls = OAuth1Session
oauth2_client_cls = OAuth2Session
def update_token(self, token, refresh_token=None, access_token=None):
token_update.send(
sender=self.__class__,
name=self.name,
token=token,
refresh_token=refresh_token,
access_token=access_token,
)
def generate_access_token_params(self, request_token_url, request):
if request_token_url:
return request.GET.dict()
if request.method == 'GET':
params = {
'code': request.GET.get('code'),
'state': request.GET.get('state'),
}
else:
params = {
'code': request.POST.get('code'),
'state': request.POST.get('state'),
}
return params
@staticmethod
def load_config(oauth, name, params):
"""
Method to load config from database
Parameters:
oauth: Instance authlib, passed internally by the package
name: Name used inside register method of oauth
params: Contains oauth parameters name inside tuple
Returns:
config: Configauration required by the package to load client information
"""
auth_client = OauthClient.objects.get(name=name)
config = {
auth_client.name : {
'client_id': auth_client.oauth_client_id,
'client_secret': auth_client.oauth_client_secret
}
}
if config:
return config.get(name)
class DjangoRemoteApp(RemoteApp):
def authorize_redirect(self, request, redirect_uri=None, **kwargs):
"""Create a HTTP Redirect for Authorization Endpoint.
:param request: HTTP request instance from Django view.
:param redirect_uri: Callback or redirect URI for authorization.
:param kwargs: Extra parameters to include.
:return: A HTTP redirect response.
"""
rv = self.create_authorization_url(redirect_uri, **kwargs)
self.save_authorize_data(request, redirect_uri=redirect_uri, **rv)
return HttpResponseRedirect(rv['url'])
def authorize_access_token(self, request, **kwargs):
"""Fetch access token in one step.
:param request: HTTP request instance from Django view.
:return: A token dict.
"""
params = self.retrieve_access_token_params(request)
params.update(kwargs)
return self.fetch_access_token(**params)
def parse_id_token(self, request, token, claims_options=None, leeway=120):
return self._parse_id_token(request, token, claims_options, leeway)
| StarcoderdataPython |
9625360 | """Finds the best option for the next waypoint."""
import geopandas as gpd
import shapely.geometry as sp
from matplotlib import pyplot as plt
from path_finding.path_finder import _find_intersection_to_destination
from path_finding.path_finder import _generate_waypoint_choices
from path_finding.path_finder import _get_best_next_waypoint
from path_finding.path_finder import _get_waypoint
from path_finding.path_finder import getPolygon
from shapely.geometry import Point
origin = Point(4.661788405110263, 52.404488319225315)
destination = Point(4.679982508895097, 52.412416699424895)
border = getPolygon(origin, 3)
boundary = gpd.GeoSeries(border.exterior if isinstance(border, sp.Polygon) else
[x.exterior for x in border.geoms])
ax = boundary.plot(color='red')
opoint = gpd.GeoSeries(origin)
dpoint = gpd.GeoSeries(destination)
ax = dpoint.plot(color='blue', facecolor='none', ax=ax)
ax = opoint.plot(color='green', facecolor='none', ax=ax)
extra_box = sp.box(4.668466196669055, 52.407664958949056, 4.6704661966690555,
52.40966495894905)
ax = gpd.GeoSeries(extra_box.exterior).plot(color='green', ax=ax)
intersection = _get_waypoint(origin, destination)
for point in _generate_waypoint_choices(origin, intersection):
ax = gpd.GeoSeries(point).plot(color='green', ax=ax)
path = _find_intersection_to_destination(point, destination)
ax = gpd.GeoSeries(sp.LineString([point, path])).plot(color='purple',
ax=ax)
next_ = _get_best_next_waypoint(origin, destination)
ax = gpd.GeoSeries(next_).plot(color='purple', ax=ax)
ax = gpd.GeoSeries(sp.LineString([intersection, next_]))\
.plot(color='yellow', ax=ax)
next_two = _get_best_next_waypoint(next_, destination)
ax = gpd.GeoSeries(next_two).plot(color='purple', ax=ax)
ax = gpd.GeoSeries(sp.LineString([next_, next_two]))\
.plot(color='yellow', ax=ax)
intersection_line = sp.LineString([origin, intersection])
intersection_line_series = gpd.GeoSeries(intersection_line)
ax = intersection_line_series.plot(color='yellow', ax=ax)
plt.show()
| StarcoderdataPython |
11311026 | <gh_stars>1-10
from distutils.core import setup
setup(
name = 'blissops',
packages = ['blissops'],
version = '0.1',
license='MIT',
description = 'Simple BytesIO based image manipulation library. No hard work and no good results.',
author = 'Liam (ir-3) H.',
author_email = '<EMAIL>',
url = 'https://github.com/blisspy/bliss-ops',
download_url = 'https://github.com/blisspy/bliss-ops/archive/v_01.tar.gz',
keywords = ['imageops', 'simple', 'Bad'],
install_requires=[
'wand',
'numpy',
'scipy',
'scikit-image'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| StarcoderdataPython |
1831673 | from flask import Blueprint
from flask_restful import Api
from . import resources
bp = Blueprint("users", __name__)
api = Api(bp)
api.add_resource(resources.Users, "/users")
api.add_resource(resources.UsersId, "/users/<user_id>")
| StarcoderdataPython |
3598705 | from thesis import config
from thesis.experiments import pg_time_train_iter_cc
from thesis.runner import runner
conf = pg_time_train_iter_cc.make_conf("pippo")
conf, *_ = pg_time_train_iter_cc.doconfs(conf, config.data_dir)
run = runner.build_runner(conf, config.scratch_data_dir)
ag = run.agent
ag.train(ag.sample_memory())
| StarcoderdataPython |
3379085 | import argparse
from cocojson.tools import split_from_file
def main():
ap = argparse.ArgumentParser()
ap.add_argument('cocojson', help='Path to coco.json to chop up', type=str)
ap.add_argument('--ratios', help='List of ratios to split by', type=float,required=True, nargs='+')
ap.add_argument('--names', help='List of names of the splits. Must be same length as ratios.', type=str, nargs='+')
ap.add_argument('--shuffle', help='Flag to shuffle images before splitting. Defaults to False.', action='store_true')
args = ap.parse_args()
split_from_file(args.cocojson, args.ratios, names=args.names, do_shuffle=args.shuffle)
if __name__ == '__main__':
main() | StarcoderdataPython |
5076574 | <reponame>Dineth-De-Silva/CSV
import os
class csv:
def __init__(self, FileName):
self.FileName = FileName
def write(self, Data):
File = open(self.FileName + ".csv", "w")
if isinstance(Data, list):
for Element in Data:
if isinstance(Element, list):
for Subelement in Element:
File.write(Subelement + ", ")
File.write("\n")
else:
File.write(Element + "\n")
else:
File.write(Data + "\n")
File.close()
def read(self):
File = open(self.FileName + ".csv", "r")
Lines = File.readlines()
Return = []
for Line in Lines:
if "," in Line:
SubReturn = []
Start = 0
CreatedStr = ""
End = False
while not End:
if Start <= len(Line):
RemainingStr = Line[Start:]
for char in RemainingStr:
if char == ",":
SubReturn.append(CreatedStr)
Start += len(CreatedStr) + 2
CreatedStr = ""
End = True
else:
CreatedStr += char
else:
End = True
Return.append(SubReturn)
else:
Return.append(Line[0:len(Line) - 1])
File.close()
return Return
def close(self):
os.remove(self.FileName + ".csv")
| StarcoderdataPython |
40645 | import django
# Now this is ugly.
# The django.db.backend.features that exist changes per version and per db :/
if django.VERSION[:2] == (2, 2):
has_sufficient_json_support = ('has_jsonb_agg',)
if django.VERSION[:2] == (3, 2):
# This version of EasyDMP is not using Django's native JSONField
# implementation, but the deprecated postgres-specific field. When no
# longer supporting 2.2 this can be "has_native_json_field"
has_sufficient_json_support = ('is_postgresql_10',)
if django.VERSION[:2] == (4, 0):
has_sufficient_json_support = ('has_native_json_field',)
| StarcoderdataPython |
9664730 | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server import util
class Model(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, model_id: int=None, model_version: str=None, bal_accuracy: float=None, is_active: bool=None, date_added: str=None, rule_id: int=None): # noqa: E501
"""Model - a model defined in Swagger
:param model_id: The model_id of this Model. # noqa: E501
:type model_id: int
:param model_version: The model_version of this Model. # noqa: E501
:type model_version: str
:param bal_accuracy: The bal_accuracy of this Model. # noqa: E501
:type bal_accuracy: float
:param is_active: The is_active of this Model. # noqa: E501
:type is_active: bool
:param date_added: The date_added of this Model. # noqa: E501
:type date_added: str
:param rule_id: The rule_id of this Model. # noqa: E501
:type rule_id: int
"""
self.swagger_types = {
'model_id': int,
'model_version': str,
'bal_accuracy': float,
'is_active': bool,
'date_added': str,
'rule_id': int
}
self.attribute_map = {
'model_id': 'modelID',
'model_version': 'modelVersion',
'bal_accuracy': 'balAccuracy',
'is_active': 'isActive',
'date_added': 'dateAdded',
'rule_id': 'ruleId'
}
self._model_id = model_id
self._model_version = model_version
self._bal_accuracy = bal_accuracy
self._is_active = is_active
self._date_added = date_added
self._rule_id = rule_id
@classmethod
def from_dict(cls, dikt) -> 'Model':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The model of this Model. # noqa: E501
:rtype: Model
"""
return util.deserialize_model(dikt, cls)
@property
def model_id(self) -> int:
"""Gets the model_id of this Model.
Unique ID of the model # noqa: E501
:return: The model_id of this Model.
:rtype: int
"""
return self._model_id
@model_id.setter
def model_id(self, model_id: int):
"""Sets the model_id of this Model.
Unique ID of the model # noqa: E501
:param model_id: The model_id of this Model.
:type model_id: int
"""
self._model_id = model_id
@property
def model_version(self) -> str:
"""Gets the model_version of this Model.
Version of the model # noqa: E501
:return: The model_version of this Model.
:rtype: str
"""
return self._model_version
@model_version.setter
def model_version(self, model_version: str):
"""Sets the model_version of this Model.
Version of the model # noqa: E501
:param model_version: The model_version of this Model.
:type model_version: str
"""
if model_version is None:
raise ValueError("Invalid value for `model_version`, must not be `None`") # noqa: E501
self._model_version = model_version
@property
def bal_accuracy(self) -> float:
"""Gets the bal_accuracy of this Model.
Percentage Accuracy of the model # noqa: E501
:return: The bal_accuracy of this Model.
:rtype: float
"""
return self._bal_accuracy
@bal_accuracy.setter
def bal_accuracy(self, bal_accuracy: float):
"""Sets the bal_accuracy of this Model.
Percentage Accuracy of the model # noqa: E501
:param bal_accuracy: The bal_accuracy of this Model.
:type bal_accuracy: float
"""
if bal_accuracy is None:
raise ValueError("Invalid value for `bal_accuracy`, must not be `None`") # noqa: E501
self._bal_accuracy = bal_accuracy
@property
def is_active(self) -> bool:
"""Gets the is_active of this Model.
is the current model in use for a given rule # noqa: E501
:return: The is_active of this Model.
:rtype: bool
"""
return self._is_active
@is_active.setter
def is_active(self, is_active: bool):
"""Sets the is_active of this Model.
is the current model in use for a given rule # noqa: E501
:param is_active: The is_active of this Model.
:type is_active: bool
"""
if is_active is None:
raise ValueError("Invalid value for `is_active`, must not be `None`") # noqa: E501
self._is_active = is_active
@property
def date_added(self) -> str:
"""Gets the date_added of this Model.
Date added to the database # noqa: E501
:return: The date_added of this Model.
:rtype: str
"""
return self._date_added
@date_added.setter
def date_added(self, date_added: str):
"""Sets the date_added of this Model.
Date added to the database # noqa: E501
:param date_added: The date_added of this Model.
:type date_added: str
"""
if date_added is None:
raise ValueError("Invalid value for `date_added`, must not be `None`") # noqa: E501
self._date_added = date_added
@property
def rule_id(self) -> int:
"""Gets the rule_id of this Model.
The Rule ID for the corresponding rule in the rules table # noqa: E501
:return: The rule_id of this Model.
:rtype: int
"""
return self._rule_id
@rule_id.setter
def rule_id(self, rule_id: int):
"""Sets the rule_id of this Model.
The Rule ID for the corresponding rule in the rules table # noqa: E501
:param rule_id: The rule_id of this Model.
:type rule_id: int
"""
if rule_id is None:
raise ValueError("Invalid value for `rule_id`, must not be `None`") # noqa: E501
self._rule_id = rule_id
| StarcoderdataPython |
3573122 | from django.db import models
from django.contrib.auth import get_user_model
class Friend(models.Model):
"""This is a model to build relationships between users"""
# the user doing the following
user_from = models.ForeignKey(
get_user_model(),
related_name='rel_from_set',
on_delete=models.CASCADE)
# the user being followed
user_to = models.ForeignKey(
get_user_model(),
related_name='rel_to_set',
on_delete=models.CASCADE)
# time stamp associated with the action
created_at = models.DateTimeField(auto_now=True)
class Meta:
ordering = ('-created_at',)
unique_together = ('user_from', 'user_to')
def __str__(self):
return '{} follows {}'.format(
self.user_from.username,
self.user_to.username)
# adds following field to user dynamically
get_user_model().add_to_class('following', models.ManyToManyField(
'self', through=Friend,
related_name='followers',
symmetrical=False
))
| StarcoderdataPython |
11227814 | #
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""OCP-on-Azure Report Serializers."""
from rest_framework import serializers
import api.report.azure.serializers as azureser
import api.report.ocp.serializers as ocpser
from api.report.serializers import validate_field
class OCPAzureGroupBySerializer(azureser.AzureGroupBySerializer, ocpser.GroupBySerializer):
"""Serializer for handling query parameter group_by."""
_opfields = (
"subscription_guid",
"resource_location",
"instance_type",
"service_name",
"project",
"cluster",
"node",
)
class OCPAzureOrderBySerializer(azureser.AzureOrderBySerializer, ocpser.OrderBySerializer):
"""Serializer for handling query parameter order_by."""
pass
class OCPAzureFilterSerializer(azureser.AzureFilterSerializer, ocpser.FilterSerializer):
"""Serializer for handling query parameter filter."""
pass
class OCPAzureQueryParamSerializer(azureser.AzureQueryParamSerializer):
"""Serializer for handling query parameters."""
def __init__(self, *args, **kwargs):
"""Initialize the OCP query param serializer."""
super().__init__(*args, **kwargs)
self._init_tagged_fields(
filter=OCPAzureFilterSerializer, group_by=OCPAzureGroupBySerializer, order_by=OCPAzureOrderBySerializer
)
def validate_group_by(self, value):
"""Validate incoming group_by data.
Args:
data (Dict): data to be validated
Returns:
(Dict): Validated data
Raises:
(ValidationError): if group_by field inputs are invalid
"""
validate_field(self, "group_by", OCPAzureGroupBySerializer, value, tag_keys=self.tag_keys)
return value
def validate_order_by(self, value):
"""Validate incoming order_by data.
Args:
data (Dict): data to be validated
Returns:
(Dict): Validated data
Raises:
(ValidationError): if order_by field inputs are invalid
"""
super().validate_order_by(value)
validate_field(self, "order_by", OCPAzureOrderBySerializer, value)
return value
def validate_filter(self, value):
"""Validate incoming filter data.
Args:
data (Dict): data to be validated
Returns:
(Dict): Validated data
Raises:
(ValidationError): if filter field inputs are invalid
"""
validate_field(self, "filter", OCPAzureFilterSerializer, value, tag_keys=self.tag_keys)
return value
def validate_delta(self, value):
"""Validate incoming delta value based on path."""
valid_delta = "usage"
request = self.context.get("request")
if request and "costs" in request.path:
valid_delta = "cost_total"
if value == "cost":
return valid_delta
if value != valid_delta:
error = {"delta": f'"{value}" is not a valid choice.'}
raise serializers.ValidationError(error)
return value
| StarcoderdataPython |
3499907 | # Copyright (c) 2018 Anki, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. _behavior:
Behavior related classes and functions.
Behaviors represent a complex task which requires Vector's
internal logic to determine how long it will take. This
may include combinations of animation, path planning or
other functionality. Examples include drive_on_charger,
set_lift_height, etc.
For commands such as go_to_pose, drive_on_charger and dock_with_cube,
Vector uses path planning, which refers to the problem of
navigating the robot from point A to B without collisions. Vector
loads known obstacles from his map, creates a path to navigate
around those objects, then starts following the path. If a new obstacle
is found while following the path, a new plan may be created.
The :class:`BehaviorComponent` class in this module contains
functions for all the behaviors.
"""
__all__ = ["MAX_HEAD_ANGLE", "MIN_HEAD_ANGLE",
"MAX_LIFT_HEIGHT", "MAX_LIFT_HEIGHT_MM", "MIN_LIFT_HEIGHT", "MIN_LIFT_HEIGHT_MM",
"BehaviorComponent", "ReserveBehaviorControl"]
from . import connection, faces, objects, util
from .messaging import protocol
from .exceptions import VectorException
# Constants
#: The minimum angle the robot's head can be set to.
MIN_HEAD_ANGLE = util.degrees(-22.0)
#: The maximum angle the robot's head can be set to
MAX_HEAD_ANGLE = util.degrees(45.0)
# The lowest height-above-ground that lift can be moved to in millimeters.
MIN_LIFT_HEIGHT_MM = 32.0
#: The lowest height-above-ground that lift can be moved to
MIN_LIFT_HEIGHT = util.distance_mm(MIN_LIFT_HEIGHT_MM)
# The largest height-above-ground that lift can be moved to in millimeters.
MAX_LIFT_HEIGHT_MM = 92.0
#: The largest height-above-ground that lift can be moved to
MAX_LIFT_HEIGHT = util.distance_mm(MAX_LIFT_HEIGHT_MM)
class BehaviorComponent(util.Component):
"""Run behaviors on Vector"""
_next_action_id = protocol.FIRST_SDK_TAG
@classmethod
def _get_next_action_id(cls):
# Post increment _current_action_id (and loop within the SDK_TAG range)
next_action_id = cls._next_action_id
if cls._next_action_id == protocol.LAST_SDK_TAG:
cls._next_action_id = protocol.FIRST_SDK_TAG
else:
cls._next_action_id += 1
return next_action_id
@connection.on_connection_thread()
async def _abort_action(self, action_id):
cancel_action_request = protocol.CancelActionByIdTagRequest(id_tag=action_id)
return await self.grpc_interface.CancelActionByIdTag(cancel_action_request)
@connection.on_connection_thread()
async def _abort_behavior(self):
cancel_behavior_request = protocol.CancelBehaviorRequest()
return await self.grpc_interface.CancelBehavior(cancel_behavior_request)
@connection.on_connection_thread(is_cancellable=connection.CancelType.CANCELLABLE_BEHAVIOR)
async def drive_off_charger(self) -> protocol.DriveOffChargerResponse:
"""Drive Vector off the charger
If Vector is on the charger, drives him off the charger.
.. testcode::
import anki_vector
with anki_vector.Robot() as robot:
robot.behavior.drive_off_charger()
Example of cancelling the :meth:`drive_off_charger` behavior:
.. testcode::
import anki_vector
import time
with anki_vector.AsyncRobot() as robot:
drive_off_future = robot.behavior.drive_off_charger()
time.sleep(3.0)
drive_off_future.cancel()
"""
drive_off_charger_request = protocol.DriveOffChargerRequest()
return await self.grpc_interface.DriveOffCharger(drive_off_charger_request)
@connection.on_connection_thread(is_cancellable=connection.CancelType.CANCELLABLE_BEHAVIOR)
async def drive_on_charger(self) -> protocol.DriveOnChargerResponse:
"""Drive Vector onto the charger
Vector will attempt to find the charger and, if successful, he will
back onto it and start charging.
Vector's charger has a visual marker so that the robot can locate it
for self-docking.
.. testcode::
import anki_vector
with anki_vector.Robot() as robot:
robot.behavior.drive_on_charger()
Example of cancelling the :meth:`drive_on_charger` behavior:
.. testcode::
import anki_vector
import time
with anki_vector.AsyncRobot() as robot:
drive_on_future = robot.behavior.drive_on_charger()
time.sleep(3.0)
drive_on_future.cancel()
"""
drive_on_charger_request = protocol.DriveOnChargerRequest()
return await self.grpc_interface.DriveOnCharger(drive_on_charger_request)
@connection.on_connection_thread(is_cancellable=connection.CancelType.CANCELLABLE_BEHAVIOR)
async def find_faces(self) -> protocol.FindFacesResponse:
"""Look around for faces
Turn in place and move head to look for faces
.. testcode::
import anki_vector
with anki_vector.Robot() as robot:
robot.behavior.find_faces()
Example of cancelling the :meth:`find_faces` behavior:
.. testcode::
import anki_vector
import time
with anki_vector.AsyncRobot() as robot:
find_faces_future = robot.behavior.find_faces()
time.sleep(3.0)
find_faces_future.cancel()
"""
find_faces_request = protocol.FindFacesRequest()
return await self.grpc_interface.FindFaces(find_faces_request)
@connection.on_connection_thread(is_cancellable=connection.CancelType.CANCELLABLE_BEHAVIOR)
async def look_around_in_place(self) -> protocol.LookAroundInPlaceResponse:
"""Look around in place
Turn in place and move head to see what's around Vector
.. testcode::
import anki_vector
with anki_vector.Robot() as robot:
robot.behavior.look_around_in_place()
Example of cancelling the :meth:`look_around_in_place` behavior:
.. testcode::
import anki_vector
import time
with anki_vector.AsyncRobot() as robot:
look_around_in_place_future = robot.behavior.look_around_in_place()
time.sleep(3.0)
look_around_in_place_future.cancel()
"""
look_around_in_place_request = protocol.LookAroundInPlaceRequest()
return await self.grpc_interface.LookAroundInPlace(look_around_in_place_request)
@connection.on_connection_thread(is_cancellable=connection.CancelType.CANCELLABLE_BEHAVIOR)
async def roll_visible_cube(self) -> protocol.RollBlockResponse:
"""Roll a cube that is currently known to the robot
This behavior will move into position as necessary based on relative
distance and orientation.
Vector needs to see the block for this to succeed.
.. testcode::
import anki_vector
with anki_vector.Robot() as robot:
robot.behavior.roll_visible_cube()
Example of cancelling the :meth:`roll_visible_cube` behavior:
.. testcode::
import anki_vector
import time
with anki_vector.AsyncRobot() as robot:
roll_visible_cube_future = robot.behavior.roll_visible_cube()
time.sleep(3.0)
roll_visible_cube_future.cancel()
"""
roll_block_request = protocol.RollBlockRequest()
return await self.grpc_interface.RollBlock(roll_block_request)
# TODO Make this cancellable with is_cancellable
@connection.on_connection_thread()
async def say_text(self, text: str, use_vector_voice: bool = True, duration_scalar: float = 1.0) -> protocol.SayTextResponse:
"""Make Vector speak text.
.. testcode::
import anki_vector
with anki_vector.Robot() as robot:
robot.behavior.say_text("Hello World")
:param text: The words for Vector to say.
:param use_vector_voice: Whether to use Vector's robot voice
(otherwise, he uses a generic human male voice).
:param duration_scalar: Adjust the relative duration of the
generated text to speech audio.
:return: object that provides the status and utterance state
"""
say_text_request = protocol.SayTextRequest(text=text,
use_vector_voice=use_vector_voice,
duration_scalar=duration_scalar)
return await self.conn.grpc_interface.SayText(say_text_request)
# TODO Make this cancellable with is_cancellable?
@connection.on_connection_thread()
async def set_eye_color(self, hue: float, saturation: float) -> protocol.SetEyeColorResponse:
"""Set Vector's eye color.
Eye color settings examples:
| Teal: Set hue to 0.42 and saturation to 1.00.
| Orange: Set hue to 0.05 and saturation to 0.95.
| Yellow: Set hue to 0.11 and saturation to 1.00.
| Lime: Set hue to 0.21 and saturation to 1.00.
| Sapphire: Set hue to 0.57 and saturation to 1.00.
| Purple: Set hue to 0.83 and saturation to 0.76.
.. testcode::
import anki_vector
import time
with anki_vector.Robot() as robot:
print("Set Vector's eye color to purple...")
robot.behavior.set_eye_color(0.83, 0.76)
time.sleep(5)
:param hue: The hue to use for Vector's eyes.
:param saturation: The saturation to use for Vector's eyes.
"""
eye_color_request = protocol.SetEyeColorRequest(hue=hue, saturation=saturation)
return await self.conn.grpc_interface.SetEyeColor(eye_color_request)
@connection.on_connection_thread(is_cancellable=connection.CancelType.CANCELLABLE_ACTION)
async def go_to_pose(self,
pose: util.Pose,
relative_to_robot: bool = False,
num_retries: int = 0,
_action_id: int = None) -> protocol.GoToPoseResponse:
"""Tells Vector to drive to the specified pose and orientation.
In navigating to the requested pose, Vector will use path planning.
If relative_to_robot is set to True, the given pose will assume the
robot's pose as its origin.
Since the robot understands position by monitoring its tread movement,
it does not understand movement in the z axis. This means that the only
applicable elements of pose in this situation are position.x position.y
and rotation.angle_z.
Note that actions that use the wheels cannot be performed at the same time,
otherwise you may see a TRACKS_LOCKED error. Methods that use the wheels include
:meth:`go_to_pose`, :meth:`dock_with_cube`, :meth:`turn_in_place`, :meth:`drive_straight`, and :meth:`pickup_object`.
:param pose: The destination pose.
:param relative_to_robot: Whether the given pose is relative to
the robot's pose.
:param num_retries: Number of times to reattempt action in case of a failure.
Returns:
A response from the robot with status information sent when this request successfully completes or fails.
.. testcode::
import anki_vector
from anki_vector.util import degrees, Angle, Pose
with anki_vector.Robot() as robot:
pose = Pose(x=50, y=0, z=0, angle_z=Angle(degrees=0))
robot.behavior.go_to_pose(pose)
Example of cancelling the :meth:`go_to_pose` behavior:
.. testcode::
import anki_vector
from anki_vector.util import degrees, Angle, Pose
import time
with anki_vector.AsyncRobot() as robot:
pose = Pose(x=50, y=0, z=0, angle_z=Angle(degrees=0))
pose_future = robot.behavior.go_to_pose(pose)
time.sleep(3.0)
pose_future.cancel()
"""
if relative_to_robot and self.robot.pose:
pose = self.robot.pose.define_pose_relative_this(pose)
go_to_pose_request = protocol.GoToPoseRequest(x_mm=pose.position.x,
y_mm=pose.position.y,
rad=pose.rotation.angle_z.radians,
id_tag=_action_id,
num_retries=num_retries)
return await self.grpc_interface.GoToPose(go_to_pose_request)
# TODO alignment_type coming out ugly in the docs without real values
@connection.on_connection_thread(is_cancellable=connection.CancelType.CANCELLABLE_ACTION)
async def dock_with_cube(self,
target_object: objects.LightCube,
approach_angle: util.Angle = None,
alignment_type: protocol.AlignmentType = protocol.ALIGNMENT_TYPE_LIFT_PLATE,
distance_from_marker: util.Distance = None,
num_retries: int = 0,
_action_id: int = None) -> protocol.DockWithCubeResponse:
"""Tells Vector to dock with a light cube, optionally using a given approach angle and distance.
While docking with the cube, Vector will use path planning.
Note that actions that use the wheels cannot be performed at the same time,
otherwise you may see a TRACKS_LOCKED error. Methods that use the wheels include
:meth:`go_to_pose`, :meth:`dock_with_cube`, :meth:`turn_in_place`, :meth:`drive_straight`, and :meth:`pickup_object`.
:param target_object: The LightCube object to dock with.
:param approach_angle: Angle to approach the dock with.
:param alignment_type: Which part of the robot to align with the object.
:param distance_from_marker: How far from the object to approach (0 to dock)
:param num_retries: Number of times to reattempt action in case of a failure.
Returns:
A response from the robot with status information sent when this request successfully completes or fails.
.. testcode::
import anki_vector
with anki_vector.Robot() as robot:
robot.world.connect_cube()
if robot.world.connected_light_cube:
robot.behavior.dock_with_cube(robot.world.connected_light_cube)
Example of cancelling the :meth:`dock_with_cube` behavior:
.. testcode::
import anki_vector
from anki_vector.util import degrees
import time
with anki_vector.AsyncRobot() as robot:
# If necessary, move Vector's Head and Lift down
robot.behavior.set_head_angle(degrees(-5.0))
robot.behavior.set_lift_height(0.0)
robot.world.connect_cube()
time.sleep(10.0)
dock_future = robot.behavior.dock_with_cube(
robot.world.connected_light_cube,
num_retries=3)
time.sleep(3.0)
dock_future.cancel()
robot.world.disconnect_cube()
"""
if target_object is None:
raise VectorException("Must supply a target_object to dock_with_cube")
dock_request = protocol.DockWithCubeRequest(object_id=target_object.object_id,
alignment_type=alignment_type,
id_tag=_action_id,
num_retries=num_retries)
if approach_angle is not None:
dock_request.use_approach_angle = True
dock_request.use_pre_dock_pose = True
dock_request.approach_angle_rad = approach_angle.radians
if distance_from_marker is not None:
dock_request.distance_from_marker_mm = distance_from_marker.distance_mm
return await self.grpc_interface.DockWithCube(dock_request)
# Movement actions
@connection.on_connection_thread(is_cancellable=connection.CancelType.CANCELLABLE_ACTION)
async def drive_straight(self,
distance: util.Distance,
speed: util.Speed,
should_play_anim: bool = True,
num_retries: int = 0,
_action_id: int = None) -> protocol.DriveStraightResponse:
"""Tells Vector to drive in a straight line.
Vector will drive for the specified distance (forwards or backwards)
Vector must be off of the charger for this movement action.
Note that actions that use the wheels cannot be performed at the same time,
otherwise you may see a TRACKS_LOCKED error. Methods that use the wheels include
:meth:`go_to_pose`, :meth:`dock_with_cube`, :meth:`turn_in_place`, :meth:`drive_straight`, and :meth:`pickup_object`.
:param distance: The distance to drive
(>0 for forwards, <0 for backwards)
:param speed: The speed to drive at
(should always be >0, the abs(speed) is used internally)
:param should_play_anim: Whether to play idle animations whilst driving
(tilt head, hum, animated eyes, etc.)
:param num_retries: Number of times to reattempt action in case of a failure.
Returns:
A response from the robot with status information sent when this request successfully completes or fails.
.. testcode::
import anki_vector
from anki_vector.util import distance_mm, speed_mmps
with anki_vector.Robot() as robot:
robot.behavior.drive_straight(distance_mm(200), speed_mmps(100))
Example of cancelling the :meth:`drive_straight` behavior:
.. testcode::
import anki_vector
from anki_vector.util import distance_mm, speed_mmps
import time
with anki_vector.AsyncRobot() as robot:
drive_future = robot.behavior.drive_straight(distance_mm(300), speed_mmps(50))
time.sleep(2.0)
drive_future.cancel()
"""
drive_straight_request = protocol.DriveStraightRequest(speed_mmps=speed.speed_mmps,
dist_mm=distance.distance_mm,
should_play_animation=should_play_anim,
id_tag=_action_id,
num_retries=num_retries)
return await self.grpc_interface.DriveStraight(drive_straight_request)
@connection.on_connection_thread(is_cancellable=connection.CancelType.CANCELLABLE_ACTION)
async def turn_in_place(self,
angle: util.Angle,
speed: util.Angle = util.Angle(0.0),
accel: util.Angle = util.Angle(0.0),
angle_tolerance: util.Angle = util.Angle(0.0),
is_absolute: bool = 0,
num_retries: int = 0,
_action_id: int = None) -> protocol.TurnInPlaceResponse:
"""Turn the robot around its current position.
Vector must be off of the charger for this movement action.
Note that actions that use the wheels cannot be performed at the same time,
otherwise you may see a TRACKS_LOCKED error. Methods that use the wheels include
:meth:`go_to_pose`, :meth:`dock_with_cube`, :meth:`turn_in_place`, :meth:`drive_straight`, and :meth:`pickup_object`.
:param angle: The angle to turn. Positive
values turn to the left, negative values to the right.
:param speed: Angular turn speed (per second).
:param accel: Acceleration of angular turn
(per second squared).
:param angle_tolerance: angular tolerance
to consider the action complete (this is clamped to a minimum
of 2 degrees internally).
:param is_absolute: True to turn to a specific angle, False to
turn relative to the current pose.
:param num_retries: Number of times to reattempt the turn in case of a failure.
Returns:
A response from the robot with status information sent when this request successfully completes or fails.
.. testcode::
import anki_vector
from anki_vector.util import degrees
with anki_vector.Robot() as robot:
robot.behavior.turn_in_place(degrees(90))
Example of cancelling the :meth:`turn_in_place` behavior:
.. testcode::
import anki_vector
from anki_vector.util import degrees
import time
with anki_vector.AsyncRobot() as robot:
turn_future = robot.behavior.turn_in_place(degrees(360))
time.sleep(0.5)
turn_future.cancel()
"""
turn_in_place_request = protocol.TurnInPlaceRequest(angle_rad=angle.radians,
speed_rad_per_sec=speed.radians,
accel_rad_per_sec2=accel.radians,
tol_rad=angle_tolerance.radians,
is_absolute=is_absolute,
id_tag=_action_id,
num_retries=num_retries)
return await self.grpc_interface.TurnInPlace(turn_in_place_request)
@connection.on_connection_thread(is_cancellable=connection.CancelType.CANCELLABLE_ACTION)
async def set_head_angle(self,
angle: util.Angle,
accel: float = 10.0,
max_speed: float = 10.0,
duration: float = 0.0,
num_retries: int = 0,
_action_id: int = None) -> protocol.SetHeadAngleResponse:
"""Tell Vector's head to move to a given angle.
:param angle: Desired angle for Vector's head.
(:const:`MIN_HEAD_ANGLE` to :const:`MAX_HEAD_ANGLE`).
(we clamp it to this range internally).
:param accel: Acceleration of Vector's head in radians per second squared.
:param max_speed: Maximum speed of Vector's head in radians per second.
:param duration: Time for Vector's head to move in seconds. A value
of zero will make Vector try to do it as quickly as possible.
:param num_retries: Number of times to reattempt the action in case of a failure.
Returns:
A response from the robot with status information sent when this request successfully completes or fails.
.. testcode::
import anki_vector
from anki_vector.util import degrees
from anki_vector.behavior import MIN_HEAD_ANGLE, MAX_HEAD_ANGLE
with anki_vector.Robot() as robot:
# move head from minimum to maximum angle
robot.behavior.set_head_angle(MIN_HEAD_ANGLE)
robot.behavior.set_head_angle(MAX_HEAD_ANGLE)
# move head to middle
robot.behavior.set_head_angle(degrees(35.0))
Example of cancelling the :meth:`set_head_angle` behavior:
.. testcode::
import anki_vector
from anki_vector.behavior import MIN_HEAD_ANGLE, MAX_HEAD_ANGLE
import time
with anki_vector.AsyncRobot() as robot:
# move head from minimum to maximum angle
robot.behavior.set_head_angle(MIN_HEAD_ANGLE)
time.sleep(1.0)
robot.behavior.set_head_angle(MAX_HEAD_ANGLE)
time.sleep(1.0)
# move head to middle
head_future = robot.behavior.set_head_angle(MIN_HEAD_ANGLE)
head_future.cancel()
"""
if angle < MIN_HEAD_ANGLE:
self.logger.warning("head angle %s too small, should be in %f..%f range - clamping",
angle.degrees, MIN_HEAD_ANGLE.degrees, MAX_HEAD_ANGLE.degrees)
angle = MIN_HEAD_ANGLE
elif angle > MAX_HEAD_ANGLE:
self.logger.warning("head angle %s too large, should be in %f..%f range - clamping",
angle.degrees, MIN_HEAD_ANGLE.degrees, MAX_HEAD_ANGLE.degrees)
angle = MAX_HEAD_ANGLE
set_head_angle_request = protocol.SetHeadAngleRequest(angle_rad=angle.radians,
max_speed_rad_per_sec=max_speed,
accel_rad_per_sec2=accel,
duration_sec=duration,
id_tag=_action_id,
num_retries=num_retries)
return await self.grpc_interface.SetHeadAngle(set_head_angle_request)
@connection.on_connection_thread(is_cancellable=connection.CancelType.CANCELLABLE_ACTION)
async def set_lift_height(self,
height: float,
accel: float = 10.0,
max_speed: float = 10.0,
duration: float = 0.0,
num_retries: int = 0,
_action_id: int = None) -> protocol.SetLiftHeightResponse:
"""Tell Vector's lift to move to a given height.
:param height: desired height for Vector's lift 0.0 (bottom) to
1.0 (top) (we clamp it to this range internally).
:param accel: Acceleration of Vector's lift in radians per
second squared.
:param max_speed: Maximum speed of Vector's lift in radians per second.
:param duration: Time for Vector's lift to move in seconds. A value
of zero will make Vector try to do it as quickly as possible.
:param num_retries: Number of times to reattempt the action in case of a failure.
Returns:
A response from the robot with status information sent when this request successfully completes or fails.
.. testcode::
import anki_vector
with anki_vector.Robot() as robot:
robot.behavior.set_lift_height(1.0)
robot.behavior.set_lift_height(0.0)
Example of cancelling the :meth:`set_lift_height` behavior:
.. testcode::
import anki_vector
from anki_vector.behavior import MIN_LIFT_HEIGHT_MM, MAX_LIFT_HEIGHT_MM
import time
with anki_vector.AsyncRobot() as robot:
robot.behavior.set_lift_height(1.0)
time.sleep(1.0)
lift_future = robot.behavior.set_lift_height(0.0)
time.sleep(1.0)
lift_future = robot.behavior.set_lift_height(1.0)
lift_future.cancel()
"""
if height < 0.0:
self.logger.warning("lift height %s too small, should be in 0..1 range - clamping", height)
height = MIN_LIFT_HEIGHT_MM
elif height > 1.0:
self.logger.warning("lift height %s too large, should be in 0..1 range - clamping", height)
height = MAX_LIFT_HEIGHT_MM
else:
height = MIN_LIFT_HEIGHT_MM + (height * (MAX_LIFT_HEIGHT_MM - MIN_LIFT_HEIGHT_MM))
set_lift_height_request = protocol.SetLiftHeightRequest(height_mm=height,
max_speed_rad_per_sec=max_speed,
accel_rad_per_sec2=accel,
duration_sec=duration,
id_tag=_action_id,
num_retries=num_retries)
return await self.grpc_interface.SetLiftHeight(set_lift_height_request)
@connection.on_connection_thread(is_cancellable=connection.CancelType.CANCELLABLE_ACTION)
async def turn_towards_face(self,
face: faces.Face,
num_retries: int = 0,
_action_id: int = None) -> protocol.TurnTowardsFaceResponse:
"""Tells Vector to turn towards this face.
:param face_id: The face Vector will turn towards.
:param num_retries: Number of times to reattempt the action in case of a failure.
Returns:
A response from the robot with status information sent when this request successfully completes or fails.
.. testcode::
import anki_vector
with anki_vector.Robot() as robot:
robot.behavior.turn_towards_face(1)
Example of cancelling the :meth:`turn_towards_face` behavior:
.. testcode::
import anki_vector
with anki_vector.Robot() as robot:
turn_towards_face_future = robot.behavior.turn_towards_face(1)
turn_towards_face_future.cancel()
"""
turn_towards_face_request = protocol.TurnTowardsFaceRequest(face_id=face.face_id,
max_turn_angle_rad=util.degrees(180).radians,
id_tag=_action_id,
num_retries=num_retries)
return await self.grpc_interface.TurnTowardsFace(turn_towards_face_request)
@connection.on_connection_thread(is_cancellable=connection.CancelType.CANCELLABLE_ACTION)
async def go_to_object(self,
target_object: objects.LightCube,
distance_from_object,
num_retries: int = 0,
_action_id: int = None) -> protocol.GoToObjectResponse:
"""Tells Vector to drive to his Cube.
:param target_object: The destination object. CustomObject instances are not supported.
:param distance_from_object: The distance from the object to stop. This is the distance
between the origins. For instance, the distance from the robot's origin
(between Vector's two front wheels) to the cube's origin (at the center of the cube) is ~40mm.
:param num_retries: Number of times to reattempt action in case of a failure.
Returns:
A response from the robot with status information sent when this request successfully completes or fails.
.. testcode::
import anki_vector
from anki_vector.util import distance_mm
with anki_vector.Robot() as robot:
robot.world.connect_cube()
if robot.world.connected_light_cube:
robot.behavior.go_to_object(robot.world.connected_light_cube, distance_mm(70.0))
"""
if target_object is None:
raise VectorException("Must supply a target_object of type LightCube to go_to_object")
go_to_object_request = protocol.GoToObjectRequest(object_id=target_object.object_id,
distance_from_object_origin_mm=distance_from_object.distance_mm,
use_pre_dock_pose=False,
id_tag=_action_id,
num_retries=num_retries)
return await self.grpc_interface.GoToObject(go_to_object_request)
@connection.on_connection_thread(is_cancellable=connection.CancelType.CANCELLABLE_ACTION)
async def roll_cube(self,
target_object: objects.LightCube,
approach_angle: util.Angle = None,
num_retries: int = 0,
_action_id: int = None) -> protocol.RollObjectResponse:
"""Tells Vector to roll a specified cube object.
:param target_object: The cube to roll.
:param approach_angle: The angle to approach the cube from. For example, 180 degrees will cause Vector to drive
past the cube and approach it from behind.
:param num_retries: Number of times to reattempt action in case of a failure.
Returns:
A response from the robot with status information sent when this request successfully completes or fails.
.. testcode::
import anki_vector
from anki_vector.util import distance_mm
with anki_vector.Robot() as robot:
robot.world.connect_cube()
if robot.world.connected_light_cube:
robot.behavior.roll_cube(robot.world.connected_light_cube)
"""
if target_object is None:
raise VectorException("Must supply a target_object of type LightCube to roll_cube")
if approach_angle is None:
use_approach_angle = False
approach_angle = util.degrees(0)
else:
use_approach_angle = True
approach_angle = approach_angle
roll_object_request = protocol.RollObjectRequest(object_id=target_object.object_id,
approach_angle_rad=approach_angle.radians,
use_approach_angle=use_approach_angle,
use_pre_dock_pose=use_approach_angle,
id_tag=_action_id,
num_retries=num_retries)
return await self.grpc_interface.RollObject(roll_object_request)
@connection.on_connection_thread(is_cancellable=connection.CancelType.CANCELLABLE_ACTION)
async def pop_a_wheelie(self,
target_object: objects.LightCube,
approach_angle: util.Angle = None,
num_retries: int = 0,
_action_id: int = None) -> protocol.PopAWheelieResponse:
"""Tells Vector to "pop a wheelie" using his light cube.
:param target_object: The cube to push down on with Vector's lift, to start the wheelie.
:param approach_angle: The angle to approach the cube from. For example, 180 degrees will cause Vector to drive
past the cube and approach it from behind.
:param num_retries: Number of times to reattempt action in case of a failure.
Returns:
A response from the robot with status information sent when this request successfully completes or fails.
.. testcode::
import anki_vector
from anki_vector.util import distance_mm
with anki_vector.Robot() as robot:
robot.world.connect_cube()
if robot.world.connected_light_cube:
robot.behavior.pop_a_wheelie(robot.world.connected_light_cube)
"""
if target_object is None:
raise VectorException("Must supply a target_object of type LightCube to pop_a_wheelie")
if approach_angle is None:
use_approach_angle = False
approach_angle = util.degrees(0)
else:
use_approach_angle = True
approach_angle = approach_angle
pop_a_wheelie_request = protocol.PopAWheelieRequest(object_id=target_object.object_id,
approach_angle_rad=approach_angle.radians,
use_approach_angle=use_approach_angle,
use_pre_dock_pose=use_approach_angle,
id_tag=_action_id,
num_retries=num_retries)
return await self.grpc_interface.PopAWheelie(pop_a_wheelie_request)
@connection.on_connection_thread(is_cancellable=connection.CancelType.CANCELLABLE_ACTION)
async def pickup_object(self,
target_object: objects.LightCube,
use_pre_dock_pose: bool = True,
num_retries: int = 0,
_action_id: int = None) -> protocol.PickupObjectResponse:
"""Instruct the robot to pick up his LightCube.
While picking up the cube, Vector will use path planning.
Note that actions that use the wheels cannot be performed at the same time,
otherwise you may see a TRACKS_LOCKED error. Methods that use the wheels include
:meth:`go_to_pose`, :meth:`dock_with_cube`, :meth:`turn_in_place`, :meth:`drive_straight`, and :meth:`pickup_object`.
:param target_object: The LightCube object to dock with.
:param use_pre_dock_pose: Whether or not to try to immediately pick
up an object or first position the robot next to the object.
:param num_retries: Number of times to reattempt action in case of a failure.
Returns:
A response from the robot with status information sent when this request successfully completes or fails.
.. testcode::
import anki_vector
with anki_vector.Robot() as robot:
robot.world.connect_cube()
if robot.world.connected_light_cube:
robot.behavior.pickup_object(robot.world.connected_light_cube)
"""
if target_object is None:
raise VectorException("Must supply a target_object to dock_with_cube")
pickup_object_request = protocol.PickupObjectRequest(object_id=target_object.object_id,
use_pre_dock_pose=use_pre_dock_pose,
id_tag=_action_id,
num_retries=num_retries)
return await self.grpc_interface.PickupObject(pickup_object_request)
@connection.on_connection_thread(is_cancellable=connection.CancelType.CANCELLABLE_ACTION)
async def place_object_on_ground_here(self,
num_retries: int = 0,
_action_id: int = None) -> protocol.PlaceObjectOnGroundHereResponse:
"""Ask Vector to place the object he is carrying on the ground at the current location.
:param num_retries: Number of times to reattempt action in case of a failure.
Returns:
A response from the robot with status information sent when this request successfully completes or fails.
.. testcode::
import anki_vector
with anki_vector.Robot() as robot:
robot.world.connect_cube()
if robot.world.connected_light_cube:
robot.behavior.pickup_object(robot.world.connected_light_cube)
robot.behavior.place_object_on_ground_here()
"""
place_object_on_ground_here_request = protocol.PlaceObjectOnGroundHereRequest(id_tag=_action_id,
num_retries=num_retries)
return await self.grpc_interface.PlaceObjectOnGroundHere(place_object_on_ground_here_request)
class ReserveBehaviorControl():
"""A ReserveBehaviorControl object can be used to suppress the ordinary idle behaviors of
the Robot and keep Vector still between SDK control instances. Care must be taken when
blocking background behaviors, as this may make Vector appear non-responsive.
This class is most easily used via a built-in SDK script, and can be called on the command-line
via the executable module :class:`anki_vector.reserve_control`:
.. code-block:: bash
python3 -m anki_vector.reserve_control
As long as the script is running, background behaviors will not activate, keeping Vector
still while other SDK scripts may take control. Highest-level behaviors like returning to
the charger due to low battery will still activate.
System-specific shortcuts calling this executable module can be found in the examples/scripts
folder. These scripts can be double-clicked to easily reserve behavior control for the current
SDK default robot.
If there is a need to keep background behaviors from activating in a single script, the class
may be used to reserve behavior control while in scope:
.. code-block:: python
import anki_vector
from anki_vector import behavior
with behavior.ReserveBehaviorControl():
# At this point, Vector will remain still, even without
# a Robot instance being in scope.
# take control of the robot as usual
with anki_vector.Robot() as robot:
robot.anim.play_animation("anim_turn_left_01")
# Robot will not perform idle behaviors until the script completes
:param serial: Vector's serial number. The robot's serial number (ex. 00e20100) is located on
the underside of Vector, or accessible from Vector's debug screen. Used to
identify which Vector configuration to load.
:param ip: Vector's IP address. (optional)
:param config: A custom :class:`dict` to override values in Vector's configuration. (optional)
Example: :code:`{"cert": "/path/to/file.cert", "name": "Vector-XXXX", "guid": "<secret_key>"}`
where :code:`cert` is the certificate to identify Vector, :code:`name` is the
name on Vector's face when his backpack is double-clicked on the charger, and
:code:`guid` is the authorization token that identifies the SDK user.
Note: Never share your authentication credentials with anyone.
:param behavior_activation_timeout: The time to wait for control of the robot before failing.
"""
def __init__(self,
serial: str = None,
ip: str = None,
config: dict = None,
behavior_activation_timeout: int = 10):
config = config if config is not None else {}
self.logger = util.get_class_logger(__name__, self)
config = {**util.read_configuration(serial, name=None, logger=self.logger), **config}
self._name = config["name"]
self._ip = ip if ip is not None else config["ip"]
self._cert_file = config["cert"]
self._guid = config["guid"]
self._port = "443"
if 'port' in config:
self._port = config["port"]
if self._name is None or self._ip is None or self._cert_file is None or self._guid is None:
raise ValueError("The Robot object requires a serial and for Vector to be logged in (using the app then running the anki_vector.configure executable submodule).\n"
"You may also provide the values necessary for connection through the config parameter. ex: "
'{"name":"Vector-XXXX", "ip":"XX.XX.XX.XX", "cert":"/path/to/cert_file", "guid":"<secret_key>"}')
self._conn = connection.Connection(self._name, ':'.join([self._ip, self._port]), self._cert_file, self._guid,
behavior_control_level=connection.ControlPriorityLevel.RESERVE_CONTROL)
self._behavior_activation_timeout = behavior_activation_timeout
def __enter__(self):
self._conn.connect(self._behavior_activation_timeout)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._conn.close()
| StarcoderdataPython |
1847900 | # -*- coding: utf-8 -*-
from builtins import str
from flask import render_template,g
from flask_mail import Message
from cineapp import mail, db
from cineapp.models import User
from threading import Thread
from cineapp import app
import html2text, time, json, traceback
# Send mail into a dedicated thread in order to avoir the web app to wait
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
# Wrapper function for sending mails using flask-mail plugin
def send_email(subject, sender, recipients, text_body):
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
# Function which sends notifications to users when a show is added
def add_show_notification(show):
users = User.query.all()
for cur_user in users:
# Check if the cur_user is the logged user who added the show
# in order to change the mail text
send_own_activity_mail=True
if cur_user.id==g.user.id:
you_user=True
# Check if we must send an email for user own activity
if cur_user.notifications != None and cur_user.notifications["notif_own_activity"] == False:
send_own_activity_mail=False
else:
you_user=False
# Send the mail if we have too
if cur_user.notifications != None and cur_user.notifications["notif_show_add"] == True and send_own_activity_mail==True:
try:
send_email('[Cineapp] - %s' % g.messages["email_title_add"] , app.config['MAIL_SENDER'],[ cur_user.email ] ,
render_template('add_show_notification.txt', dest_user=cur_user, add_user=g.user,show=show,you_user=you_user))
except Exception as e:
app.logger.error("Impossible d'envoyer le mail d'ajout: %s",e)
return 1
# Function which sends notifications to users when a show is added
def mark_show_notification(mark,notif_type):
users = User.query.filter_by().all()
# Convert the HTML content to text in order to have a nice display in the mail
html_converter = html2text.HTML2Text()
mark.comment=html_converter.handle(mark.comment).strip()
for cur_user in users:
# Check if the cur_user is the logged user who added the show
# in order to change the mail text
send_own_activity_mail=True
if cur_user.id==g.user.id:
you_user=True
# Check if we must send an email for user own activity
if cur_user.notifications != None and cur_user.notifications["notif_own_activity"] == False:
send_own_activity_mail=False
else:
you_user=False
# Send the mail if we have too
if cur_user.notifications != None and cur_user.notifications["notif_show_add"] == True and send_own_activity_mail==True:
try:
if notif_type == "add":
send_email('[Cineapp] - %s' % g.messages["email_title_mark"] , app.config['MAIL_SENDER'],[ cur_user.email ] ,
render_template('mark_show_notification.txt', dest_user=cur_user, add_user=g.user,mark=mark,you_user=you_user,notif_type=notif_type))
elif notif_type == "update":
send_email('[Cineapp] - Note mise à jour' , app.config['MAIL_SENDER'],[ cur_user.email ] ,
render_template('mark_show_notification.txt', dest_user=cur_user, add_user=g.user,mark=mark,you_user=you_user,notif_type=notif_type))
elif notif_type == "homework":
send_email('[Cineapp] - Devoir rempli' , app.config['MAIL_SENDER'],[ cur_user.email ] ,
render_template('mark_show_notification.txt', dest_user=cur_user, add_user=g.user,mark=mark,you_user=you_user,notif_type=notif_type))
return 0
except Exception as e:
app.logger.error("Impossible d'envoyer le mail: %s",e)
return 1
# Everything has been done correctly ==> return 0
return 0
# Function which sends notification to user who received an homework
# For the homework, just send a mail to the user who has to handle the homework.
def add_homework_notification(mark):
# Check if notifications are enabled for the destination user
if mark.user.notifications != None and mark.user.notifications["notif_homework_add"] == True:
try:
send_email('[Cineapp] - Attribution d\'un devoir', app.config['MAIL_SENDER'],[ mark.user.email ],
render_template('add_homework_notification.txt', dest_user=mark.user, homework_who=mark.homework_who_user, show=mark.show))
return 0
except Exception as e:
# We couldn't send the mail
app.logger.error("Impossible d\'envoyer la notification de devoir : %s", e)
app.logger.error("%s" % traceback.print_exc())
return 1
else:
# Display a message that the user don't want to be notified
return 2
# Function which sends notification when an homework has been cancelled
# Send a notification to the user who cancelled the homework and another to
# the destination user the homework was for
def delete_homework_notification(mark):
# Check if notifications are enabled for the user
if mark.user.notifications != None and mark.user.notifications["notif_homework_add"] == True:
try:
send_email('[Cineapp] - Annulation d\'un devoir', app.config['MAIL_SENDER'],[ mark.user.email ],
render_template('_homework_notification.txt', dest_user=mark.user, homework_who=mark.homework_who_user, show=mark.show))
return 0
except:
# We couldn't send the mail
return 1
else:
# Display a message that the user don't want to be notified
return 2
# Function which sends notification to user when a show has been updated into the database
def update_show_notification(notif):
users = User.query.filter_by().all()
for cur_user in users:
# Check if the cur_user is the logged user who added the show
# in order to change the mail text
send_own_activity_mail=True
if cur_user.id==g.user.id:
you_user=True
# Check if we must send an email for user own activity
if cur_user.notifications != None and cur_user.notifications["notif_own_activity"] == False:
send_own_activity_mail=False
else:
you_user=False
# Send the mail if we have too
if cur_user.notifications != None and cur_user.notifications["notif_show_add"] == True and send_own_activity_mail==True:
send_email('[Cineapp] - %s' % g.messages["email_title_update"] , app.config['MAIL_SENDER'],[ cur_user.email ] ,
render_template('update_show_notification.txt', dest_user=cur_user, add_user=g.user,notif=notif,you_user=you_user))
# Function which sends notification to user when a comment has been posted on a mark
def mark_comment_notification(mark_comment,notif_type):
users = User.query.filter_by().all()
# Check if the comment is posted by a user on his own mark
if mark_comment.user.id==mark_comment.mark.user.id:
own_mark_user=True
else:
own_mark_user=False
for cur_user in users:
send_own_activity_mail=True
# Check if the logged user posted the comment
if cur_user.id==g.user.id:
you_user=True
# Check if we must send an email for user own activity
if cur_user.notifications != None and "notif_own_activity" in cur_user.notifications and cur_user.notifications["notif_own_activity"] == False:
send_own_activity_mail=False
else:
you_user=False
# Check if the comment refers to a mark for the logged user
if cur_user.id==mark_comment.mark.user.id:
you_dest_user=True
else:
you_dest_user=False
# Send the mail if we have too
if cur_user.notifications != None and "notif_comment_add" in cur_user.notifications and cur_user.notifications["notif_comment_add"] == True and send_own_activity_mail==True:
# Check the kind of mail we must send considering the notification type
if notif_type == "add_mark_comment":
mail_title = "Ajout d\'un commentaire"
notif_template = "mark_comment_notification.txt"
elif notif_type == "edit_mark_comment":
mail_title = "Modification d\'un commentaire"
notif_template = "mark_update_comment_notification.txt"
elif notif_type == "delete_mark_comment":
mail_title = "Suppression d\'un commentaire"
notif_template = "mark_delete_comment_notification.txt"
send_email('[Cineapp] - ' + mail_title , app.config['MAIL_SENDER'],[ cur_user.email ] ,
render_template(notif_template, dest_user=cur_user, mark_comment=mark_comment, you_user=you_user,you_dest_user=you_dest_user,own_mark_user=own_mark_user))
# Function which sends notification to user when the favorite/star status has been updated for a show
def favorite_update_notification(favorite_show,notif_type):
users = User.query.filter_by().all()
for cur_user in users:
send_own_activity_mail=True
# Check if the logged user posted the comment
if cur_user.id==g.user.id:
you_user=True
# Check if we must send an email for user own activity
if cur_user.notifications != None and "notif_own_activity" in cur_user.notifications and cur_user.notifications["notif_own_activity"] == False:
send_own_activity_mail=False
else:
you_user=False
# Send the mail if we have too
if cur_user.notifications != None and "notif_favorite_update" in cur_user.notifications and cur_user.notifications["notif_favorite_update"] == True and send_own_activity_mail==True:
# Check the kind of mail we must send considering the notification type
if notif_type == "add":
mail_title = g.messages["email_title_favorite_add"]
notif_template = "favorite_update_notification.txt"
elif notif_type == "delete":
mail_title = g.messages["email_title_favorite_delete"]
notif_template = "favorite_update_notification.txt"
send_email('[Cineapp] - ' + mail_title , app.config['MAIL_SENDER'],[ cur_user.email ] ,
render_template(notif_template, dest_user=cur_user, favorite_show=favorite_show, you_user=you_user, notif_type=notif_type))
# Function that sends a notification when a user is named on the chat
def chat_message_notification(message,user):
if user.notifications != None and "notif_chat_message" in user.notifications and user.notifications["notif_chat_message"] == True:
app.logger.info("Sending mail for chat quote to %s " % user.email)
send_email('[Cineapp] - Message depuis le chat' , app.config['MAIL_SENDER'],[ user.email ] ,
render_template('chat_message_notification.txt', dest_user=user, message=message))
| StarcoderdataPython |
7010 | <filename>util.py
import numpy as np
import pandas as pd
from skimage import io
import skimage.measure as measure
import os
from lpg_pca_impl import denoise
def getNoisedImage(originalImage, variance):
# return random_noise(originalImage, mode='gaussian', var=variance)
np.random.seed(42)
noise = np.random.normal(size=originalImage.shape)
noise = noise/np.sqrt(np.power(noise, 2).mean())
noisedImage = originalImage + variance*noise
return noisedImage
def clip(img):
img = np.minimum(np.ones(img.shape), img)
img = np.maximum(np.zeros(img.shape), img)
return img
def readImg(path):
return io.imread(path, as_gray=True).astype('float64')/255.0
def showImg(img, name):
print(name)
img = clip(img)
io.imshow((img*255.0).astype('uint8'))
def saveImg(img, path):
img = clip(img)
io.imsave(path, (img*255.0).astype('uint8'))
def compare_psnr(img1, img2):
return measure.compare_psnr(img1, img2)
def compare_ssim(img1, img2):
return measure.compare_ssim(img1, img2)
def generate_images(img_name='mri'):
experiments_folder = 'experiments'
noise_variances = [10, 20, 30, 40]
for noise_variance in noise_variances:
corrected_noise_variance = noise_variance / 255.0
original_img = readImg(os.path.join('images', img_name + '.png'))
noised_img = getNoisedImage(original_img, corrected_noise_variance)
noised_file_name = img_name + '_noised_' + str(noise_variance) + '.png'
saveImg(noised_img, os.path.join(experiments_folder, noised_file_name))
print(noised_file_name + ' started.')
denoised_img = denoise(noised_img, noise_variance)
denoised_file_name = img_name + '_denoised_' + str(noise_variance) + '.png'
saveImg(denoised_img, os.path.join(experiments_folder, denoised_file_name))
print(denoised_file_name + ' finished.')
print("noised PSNR: " + str(compare_psnr(original_img, noised_img)) + ", SSIM: " + str(compare_ssim(original_img, noised_img)))
print("denoised PSNR: " + str(compare_psnr(original_img, denoised_img)) + ", SSIM: " + str(compare_ssim(original_img, denoised_img)))
def generate_latex_tables():
df = pd.read_csv('data.csv')
df = df.round(2)
image_texts = np.array([])
temp_directory = os.path.join(os.path.dirname(__file__), 'temp')
if not os.path.exists(temp_directory):
os.makedirs(temp_directory)
for image_name in list(set(df['image_name'])):
image_df = df[df['image_name'] == image_name]
image_df['denoise_lpg_pca'] = image_df['denoise_psnr_lpg_pca'].map(str) + '(' + image_df['denoise_ssim_lpg_pca'].map(str) + ')'
image_df['denoise_mf'] = image_df['denoise_psnr_mf'].map(str) + '(' + image_df['denoise_ssim_mf'].map(str) + ')'
image_df['denoise_nlm'] = image_df['denoise_psnr_nlm'].map(str) + '(' + image_df['denoise_ssim_nlm'].map(str) + ')'
image_df['denoise_bm3d'] = image_df['denoise_psnr_bm3d'].map(str) + '(' + image_df['denoise_ssim_bm3d'].map(str) + ')'
image_df = image_df[['sigma', 'denoise_lpg_pca', 'denoise_mf', 'denoise_nlm', 'denoise_bm3d']]
image_df['sigma'] = image_df['sigma'].map(int)
image_df.columns = ['sigma', 'LPG-PCA', 'MF', "NLM", 'BM3D']
path = os.path.join(temp_directory, image_name + '.tex')
image_df.to_latex(path, index=False, column_format='lrrrr')
with open(path, 'r') as file:
image_text = file.read()
image_text = image_text.replace(' ', '').replace(r'\toprule', r'\toprule &&' + image_name + r'\\ \midrule')
image_text = r'\noindent\begin{minipage}{.5\linewidth}' + '\n' + image_text + '\n' + r'\end{minipage}'
image_text = image_text.replace('\n\n', '\n').replace('sigma&', '$\\sigma$&')
image_texts = np.append(image_texts, image_text)
os.remove(path)
result = '\n'.join(image_texts)
filename = 'tables.tex'
with open(filename, "w+") as file:
file.write(result)
if(len(os.listdir(temp_directory))) == 0:
os.rmdir(temp_directory)
| StarcoderdataPython |
6468862 | <reponame>jnthn/intellij-community
from b import f
def g():
return f() | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.