content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
def uncentime(jours):
total = [0.01]
for i in range(jours - 1):
total.append(total[-1] * 2)
return sum(total)
print(uncentime(30))
|
nilq/baby-python
|
python
|
a=1
for i in range(int(input())):
print(f"{a} {a+1} {a+2} PUM")
a+=4
|
nilq/baby-python
|
python
|
import pandas as pd
class Security():
'''
Generic Class that initializes an object to hold price and volume
data on a particular financial security.
'''
def __init__(self, dataframe, security_name, ticker):
'''
Arguments
---------
dataframe Pandas Dataframe. Column names can include
'Open'
'High'
'Low'
'Close'
'Volume'
Index values are datetimeindex
security_name String with the security name
ticker String of the ticker for a security
'''
self.volume = dataframe.Volume
self.open = dataframe.Open
self.close = dataframe.Close
self.high = dataframe.High
self.low = dataframe.Low
self.dates = pd.to_datetime(dataframe.index.values)
self.name = security_name
self.ticker = ticker
|
nilq/baby-python
|
python
|
from pprint import pprint
from intentBox import IntentAssistant
i = IntentAssistant()
i.load_folder("test/intents")
print("\nADAPT:")
pprint(i.adapt_intents)
print("\nPADATIOUS:")
pprint(i.padatious_intents)
print("\nFUZZY MATCH:")
pprint(i.fuzzy_intents)
|
nilq/baby-python
|
python
|
"""
1013 : Contact
URL : https://www.acmicpc.net/problem/1013
Input :
3
10010111
011000100110001
0110001011001
Output :
NO
NO
YES
"""
def test(s):
if len(s) == 0:
return True
if s.startswith('10'):
i = 2
if i >= len(s):
return False
if s[i] != '0':
return False
while i < len(s) and s[i] == '0':
i += 1
if i >= len(s):
return False
if s[i] != '1':
return False
success = False
while i < len(s) and s[i] == '1' and not success:
i += 1
success = test(s[i:])
return test(s[i:])
elif s.startswith('01'):
return test(s[2:])
return False
t = int(input())
for i in range(t):
s = input()
if test(s):
print('YES')
else:
print('NO')
|
nilq/baby-python
|
python
|
"""
Simple API implementation within a single GET request.
Generally wrap these in a docker container and could deploy to cluster.
I use cherryPy because it was so simple to run, but heard Flask was pretty
good. So I decided to give it a try.
Wanted to try out connecting pymongo to flask, but was spending too much time
trying to connect to the server.
[Flask API](https://flask.palletsprojects.com/en/1.1.x/)
"""
from flask import Flask, jsonify, request
from fuzzywuzzy import fuzz
# Placed code into other files, I find the larger the py file, the harder
# it is to support.
from calculate_distance import (calc_latlong_distance, find_max_latitude,
find_max_longitude, find_min_latitude,
find_min_longitude)
from data_connection import dataconn
application = Flask(__name__)
dbconn = dataconn(json_quickload="data_fulldump.json")
def _find_rentals_nearby(latitude, longitude, dist_range):
"""
#### Input:
- Coordinates of the center of the search area.
- Range distance extending outside of the center.
#### Output:
- Dict of rental ids using the distance as the key.
- Sorted list of keys in asc order.
#### Assumption:
- range is the max dist, the user is willing to go.
#### Ideas:
Maybe we can cache some of these calculations? We could quickly
calculate distance using pythagorean theorem. Although the Earth is curved,
shouldn't be much of a difference in distance if nearby.
When calculating distances, if the zone falls within a cached search range,
we could use those locations from the search. Then we could search the
area that sits outside of the full search zone.
"""
rentals_by_range = {}
# make sure they are explicitly floats.
latitude = float(latitude)
longitude = float(longitude)
"""
Find coordinate max min for lat and long.
Range would be a circle a target coordinate.
First build a square box around the search location then find rentals
within that zone. Only using it to reduce the search results.
Then calculate the distance for each result, and if it falls within the
distance range, the zone would turn into a circle.
The more we breadown the search zone, the closer we could get to a circle.
Maybe consider an octogon instead of a square?
"""
max_lat = find_max_latitude(latitude, longitude, dist_range)
max_lng = find_max_longitude(latitude, longitude, dist_range)
min_lat = find_min_latitude(latitude, longitude, dist_range)
min_lng = find_min_longitude(latitude, longitude, dist_range)
"""
Currently we are iterating through all elements of the dataset to see
if the rental property resides within the search zone limits. We could
reorganize the data here into smaller search areas. Maybe breakdown into
neighborhoods, then calculate the distance between different neighborhoods.
THen when calculating the distance, we could search through certain areaas
and their neighbors.
"""
for row in dbconn.get_data_row_iter():
r_lat = float(row['latitude'])
r_lng = float(row['longitude'])
# if the entry is within the zone.
if ((r_lat >= min_lat and r_lat <= max_lat) or
(r_lng >= min_lng and r_lng <= max_lng)):
dist = calc_latlong_distance([r_lat, r_lng], [latitude, longitude])
if dist <= dist_range:
# create a reference of ids based on distance.
# when we sort the keys, we should have a list of closest
# to farthest locations.
if dist not in rentals_by_range:
rentals_by_range[dist] = []
rentals_by_range[dist].append(row["id"])
"""
Since we have the range as keys, we can sort the keys by distance and then
search through the dict of lists by distance order. Luckily we could use
ints as dict keys. If given time, could have figured out a better way to
implement the search. Original idea was to dump the data into MongoDB and
search for the elements using the keys. Just using the sorted function in
python, which seems to be pretty effecient already.
"""
return rentals_by_range, sorted(list(rentals_by_range.keys()))
def _check_request_fields(dict_datarequest, lst_requiredfields):
"""
#### Input:
- Data request from the HTTP call (Just passed the whole thing in)
- List of expected keys.
#### Desc:
Use set subtraction to figure out if we are missing any fields.
"""
reqfields = set(lst_requiredfields) - set(dict_datarequest.keys())
if len(reqfields) == 0:
return True
else:
return False
def _fuzzy_match(query_str, row_desc):
"""
#### Input:
- 2 strings to compare
#### Output:
- An average of the result values based on all available checks.
#### Desc:
For brevity, will use fuzzy wuzzy lib to do fuzzy string match.
Also allows API app to be more independent. Will put link to lib's github
in the README.
Will use fuzzy search to match up query with the name listed in the row.
Used at work before, better results using all four and averaging
out the score. This is to compensate for human mispellings.
#### Ideas:
Could load data into elastic search and we could utilize more flexible
search functions.
"""
f_part_r_val = fuzz.partial_ratio(query_str, row_desc)
f_r_val = fuzz.ratio(query_str, row_desc)
tsortr_val = fuzz.token_sort_ratio(query_str, row_desc)
tsetr_val = fuzz.token_set_ratio(query_str, row_desc)
avg_score = (f_part_r_val + f_r_val + tsortr_val + tsetr_val) / 4
return avg_score
def _fuzzy_search_query(query_str, lst_nearby_rentals):
"""
#### Input:
- query string to search
- subset of data set to look within
#### Output:
- dictionary
- key: fuzzy search score
- value: document id
- sorted list of the scores keys in desc order.
"""
fuzz_scores = {}
for dist_key in lst_nearby_rentals:
for doc_id in lst_nearby_rentals[dist_key]:
fuzz_score = _fuzzy_match(
query_str, dbconn.get_data_row_by_id(doc_id)["name"])
if fuzz_score > 60:
if fuzz_score not in fuzz_scores:
fuzz_scores[fuzz_score] = []
fuzz_scores[fuzz_score].append(doc_id)
# If working with a large set of keys, probably not the best idea to
# sort the keys here and return.
return fuzz_scores, sorted(list(fuzz_scores.keys()), reverse=True)
def _nearby_landmarks(query_str, coordinates, distance):
"""
TODO: implement function.
Could use the query to determine certain landmarks and find a location
nearby it. Could find rentals between the search location and the
landmark. Search could still be constrained by distance.
"""
pass
def _find_nearby_helper(data):
"""
#### Input:
- Data dictionary from Flask request
#### Output:
- List of rentals, ordered by relevance to query than distance.
- Includes the data row information to the rental as well.
#### Ideas:
Search optimization: maybe search only 100m (maybe a percentage range?)
around the target then do another search for locations 100m-1000m. Could
create an interable object to fetch chunks. No point in fetch all
locations at once if user can't digest all the information at once.
"""
lst_nearby_rentals, nearby_keys_sorted = _find_rentals_nearby(
data['latitude'], data['longitude'], data['distance'])
lst_relevence_scores = None
relv_keys_sorted = None
if "query" in data:
# Sort the search based on query relevance
# Will sort by relevance score. (closest to 100)
# Cap match at 60%, if anything lower, don't include.
# Don't need to pass in sorted range, since it is already close by
lst_relevence_scores, relv_keys_sorted = _fuzzy_search_query(
data["query"], lst_nearby_rentals)
# TODO: Lots of other data we can filter by, but will focus on getting
# it up and running. Reduce the list by the filters passed in.
lst_filtered_ids = None
if "filter" in data:
pass
# Build the return list based on the parameters above.
search_info = {}
doc_id_list = []
# How do we balance relevance and distance?
# For now will just prioritize relevance over distance.
# Only process it if we have the data for it.
if lst_relevence_scores is not None:
for r_key in relv_keys_sorted:
for d_id in lst_relevence_scores[r_key]:
search_info[d_id] = {
"rental_id": d_id,
"relevance": r_key
}
doc_id_list.append(d_id)
# Same as above, only process if we have the data.
if lst_filtered_ids is not None:
pass
# Always process the nearby locations list.
for n_key in nearby_keys_sorted:
for d_id in lst_nearby_rentals[n_key]:
if d_id not in search_info:
search_info[d_id] = {
"rental_id": d_id,
"distance": n_key
}
else:
search_info[d_id]["distance"] = n_key
# There will be duplicates, but we'll take care of that.
doc_id_list.append(d_id)
rebuild_search_list = []
for d_id in doc_id_list:
# Lookup should be instant since it is just looking for existence.
# Since we are popping the keys out, there will be no duplicates
# in the results.
if d_id in search_info:
popped_data = search_info.pop(d_id)
rebuild_search_list.append({
"searchinfo": popped_data,
"rentalinfo": dbconn.get_data_row_by_id(d_id)
})
return rebuild_search_list
# FLASK API Routes
@application.route('/')
def index():
return jsonify(status=True, message='Rental Serach API.')
@application.route('/findnearby', methods=['GET'])
def find_nearby():
data = request.get_json(force=True)
# Have a list of required fields and return error if not met.
lst_reqfields = ["latitude", "longitude", "distance"]
if _check_request_fields(data, lst_reqfields) is False:
return jsonify(
status=False, message='Missing required search fields.'), 400
rebuild_search_list = _find_nearby_helper(data)
return jsonify(status=True, data=rebuild_search_list)
# TODO: return an error status if no results.
@application.route('/addrental', methods=['PUT'])
def add_rental():
# Added a route to add rentals.
data = request.get_json(force=True)
write_success, message = dbconn.write_data_row(data)
dbconn.commit()
if write_success:
return jsonify(status=True, data=message)
else:
return jsonify(status=False, message=message), 400
|
nilq/baby-python
|
python
|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `deterministic.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import chex
from distrax._src.distributions import deterministic
from distrax._src.utils import equivalence
import jax.numpy as jnp
import numpy as np
RTOL = 1e-3
class DeterministicTest(equivalence.EquivalenceTest, parameterized.TestCase):
def setUp(self):
# pylint: disable=too-many-function-args
super().setUp(deterministic.Deterministic)
self.assertion_fn = lambda x, y: np.testing.assert_allclose(x, y, rtol=RTOL)
def test_loc(self):
dist_params = {'loc': [0.1, 0.5, 1.5]}
dist = self.distrax_cls(**dist_params)
self.assertion_fn(dist.loc, dist_params['loc'])
@parameterized.named_parameters(
('None', None),
('0.0', 0.0),
('0.1', 0.1))
def test_atol(self, atol):
dist_params = {'loc': np.asarray([0.1, 0.5, 1.5]), 'atol': atol}
dist = self.distrax_cls(**dist_params)
broadcasted_atol = np.zeros((3,)) if atol is None else atol * np.ones((3,))
self.assertion_fn(dist.atol, broadcasted_atol)
@parameterized.named_parameters(
('None', None),
('0.0', 0.0),
('0.1', 0.1))
def test_rtol(self, rtol):
dist_params = {'loc': np.asarray([0.1, 0.5, 1.5]), 'rtol': rtol}
dist = self.distrax_cls(**dist_params)
broadcasted_rtol = np.zeros((3,)) if rtol is None else rtol * np.ones((3,))
self.assertion_fn(dist.rtol, broadcasted_rtol)
@parameterized.named_parameters(
('atol_None_rtol_None', None, None),
('atol_0.1_rtol_None', 0.1, None),
('atol_None_rtol_0.1', None, 0.1),
('atol_0.05_rtol_0.1', 0.05, 0.1))
def test_slack(self, atol, rtol):
loc = np.asarray([0.1, 0.5, 1.5])
target_value = (0 if atol is None else atol) + (
0 if rtol is None else rtol) * np.abs(loc)
dist_params = {'loc': loc, 'rtol': rtol, 'atol': atol}
dist = self.distrax_cls(**dist_params)
self.assertion_fn(dist.slack, target_value)
def test_invalid_parameters(self):
self._test_raises_error(
dist_kwargs={'loc': 2., 'atol': np.array([0.1, 0.2])})
self._test_raises_error(
dist_kwargs={'loc': 2., 'rtol': np.array([0.1, 0.2])})
@parameterized.named_parameters(
('1d', np.asarray([0., 1.])),
('2d', np.zeros((2, 3))),
)
def test_event_shape(self, loc):
dist_params = {'loc': loc}
super()._test_event_shape((), dist_params)
@chex.all_variants
@parameterized.named_parameters(
('1d, no shape', [0., 1.], ()),
('1d, int shape', [0., 1.], 1),
('1d, 1-tuple shape', [0., 1.], (1,)),
('1d, 2-tuple shape', [0., 1.], (2, 2)),
('2d, no shape', np.zeros((2, 3)), ()),
('2d, int shape', np.zeros((2, 3)), 1),
('2d, 1-tuple shape', np.zeros((2, 3)), (1,)),
('2d, 2-tuple shape', np.zeros((2, 3)), (5, 4)),
)
def test_sample_shape(self, loc, sample_shape):
dist_params = {'loc': np.asarray(loc)}
super()._test_sample_shape(
dist_args=(),
dist_kwargs=dist_params,
sample_shape=sample_shape)
@chex.all_variants
@parameterized.named_parameters(
('int32', jnp.int32),
('int64', jnp.int64),
('float32', jnp.float32),
('float64', jnp.float64))
def test_sample_dtype(self, dtype):
dist = self.distrax_cls(loc=jnp.zeros((), dtype=dtype))
samples = self.variant(dist.sample)(seed=self.key)
self.assertEqual(samples.dtype, dist.dtype)
chex.assert_type(samples, dtype)
@chex.all_variants
@parameterized.named_parameters(
('1d, no shape', [0., 1.], ()),
('1d, int shape', [0., 1.], 1),
('1d, 1-tuple shape', [0., 1.], (1,)),
('1d, 2-tuple shape', [0., 1.], (2, 2)),
('2d, no shape', np.zeros((2, 3)), ()),
('2d, int shape', np.zeros((2, 3)), 1),
('2d, 1-tuple shape', np.zeros((2, 3)), (1,)),
('2d, 2-tuple shape', np.zeros((2, 3)), (5, 4)),
)
def test_sample_and_log_prob(self, loc, sample_shape):
dist_params = {'loc': np.asarray(loc)}
super()._test_sample_and_log_prob(
dist_args=(),
dist_kwargs=dist_params,
sample_shape=sample_shape,
assertion_fn=self.assertion_fn)
@chex.all_variants
@parameterized.named_parameters(
('log_prob', 'log_prob'),
('prob', 'prob'),
('cdf', 'cdf'),
('log_cdf', 'log_cdf'),
)
def test_method_with_inputs_at_loc(self, function_string):
loc = np.asarray([0.1, -0.9, 5.1])
dist_params = {'loc': loc}
inputs = np.repeat(loc[None, :], 10, axis=0)
super()._test_attribute(
attribute_string=function_string,
dist_kwargs=dist_params,
call_args=(inputs,),
assertion_fn=self.assertion_fn)
@chex.all_variants
@parameterized.named_parameters(
('log_prob', 'log_prob'),
('prob', 'prob'),
('cdf', 'cdf'),
('log_cdf', 'log_cdf'),
)
def test_method_with_inputs_at_random_inputs(self, function_string):
loc = np.asarray([0.1, -0.9, 5.1])
dist_params = {'loc': loc}
inputs = 0.1 * np.random.normal(size=(10,) + (len(loc),))
super()._test_attribute(
attribute_string=function_string,
dist_kwargs=dist_params,
call_args=(inputs,),
assertion_fn=self.assertion_fn)
@chex.all_variants
@parameterized.named_parameters(
('log_prob_stddev0', 'log_prob', 0.0, 0.05, 0.1),
('log_prob_stddev0.05', 'log_prob', 0.05, 0.05, 0.1),
('log_prob_stddev0.1', 'log_prob', 0.1, 0.05, 0.1),
('prob_stddev0', 'prob', 0.0, 0.05, 0.1),
('prob_stddev0.05', 'prob', 0.05, 0.05, 0.1),
('prob_stddev0.1', 'prob', 0.1, 0.05, 0.1),
('cdf_stddev0', 'cdf', 0.0, 0.05, 0.1),
('cdf_stddev0.05', 'cdf', 0.05, 0.05, 0.1),
('cdf_stddev0.1', 'cdf', 0.1, 0.05, 0.1),
('log_cdf_stddev0', 'log_cdf', 0.0, 0.05, 0.1),
('log_cdf_stddev0.05', 'log_cdf', 0.05, 0.05, 0.1),
('log_cdf_stddev0.1', 'log_cdf', 0.1, 0.05, 0.1),
)
def test_method_with_inputs_and_slack(self, function_string, inputs_stddev,
atol, rtol):
loc = np.asarray([[4., -1., 0.], [0.5, 0.1, -8.]])
dist_params = {'loc': loc, 'atol': atol, 'rtol': rtol}
inputs = loc[None, ...] + inputs_stddev * np.random.normal(
size=(20,) + loc.shape)
super()._test_attribute(
attribute_string=function_string,
dist_kwargs=dist_params,
call_args=(inputs,),
assertion_fn=self.assertion_fn)
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('entropy', [0., 1.], 'entropy'),
('mean', [0., 1.], 'mean'),
('mode', [0., 1.], 'mode'),
('variance', [0., 1.], 'variance'),
('variance from rank-2 params', np.ones((2, 3)), 'variance'),
('stddev', [-1.], 'stddev'),
('stddev from rank-2 params', -np.ones((2, 3)), 'stddev'),
)
def test_method(self, distr_params, function_string):
super()._test_attribute(
attribute_string=function_string,
dist_kwargs={'loc': np.asarray(distr_params)},
assertion_fn=self.assertion_fn)
@chex.all_variants(with_pmap=False)
@parameterized.named_parameters(
('kl distrax_to_distrax', 'kl_divergence', 'distrax_to_distrax'),
('kl distrax_to_tfp', 'kl_divergence', 'distrax_to_tfp'),
('kl tfp_to_distrax', 'kl_divergence', 'tfp_to_distrax'),
('cross-ent distrax_to_distrax', 'cross_entropy', 'distrax_to_distrax'),
('cross-ent distrax_to_tfp', 'cross_entropy', 'distrax_to_tfp'),
('cross-ent tfp_to_distrax', 'cross_entropy', 'tfp_to_distrax'))
def test_with_two_distributions(self, function_string, mode_string):
loc1 = np.random.randn(3)
loc2 = np.stack([loc1, np.random.randn(3)], axis=0)
super()._test_with_two_distributions(
attribute_string=function_string,
mode_string=mode_string,
dist1_kwargs={
'loc': loc1,
},
dist2_kwargs={
'loc': loc2,
},
assertion_fn=self.assertion_fn)
def test_jittable(self):
super()._test_jittable((np.array([0., 4., -1., 4.]),))
@parameterized.named_parameters(
('single element', 2),
('range', slice(-1)),
('range_2', (slice(None), slice(-1))),
('ellipsis', (Ellipsis, -1)),
)
def test_slice(self, slice_):
loc = jnp.array(np.random.randn(3, 4, 5))
atol = jnp.array(np.random.randn(3, 4, 5))
rtol = jnp.array(np.random.randn(3, 4, 5))
dist = self.distrax_cls(loc=loc, atol=atol, rtol=rtol)
self.assertion_fn(dist[slice_].loc, loc[slice_])
self.assertion_fn(dist[slice_].atol, atol[slice_])
self.assertion_fn(dist[slice_].rtol, rtol[slice_])
def test_slice_different_parameterization(self):
loc = jnp.array(np.random.randn(3, 4, 5))
atol = jnp.array(np.random.randn(4, 5))
rtol = jnp.array(np.random.randn(4, 5))
dist = self.distrax_cls(loc=loc, atol=atol, rtol=rtol)
self.assertion_fn(dist[0].loc, loc[0])
self.assertion_fn(dist[0].atol, atol) # Not slicing atol.
self.assertion_fn(dist[0].rtol, rtol) # Not slicing rtol.
if __name__ == '__main__':
absltest.main()
|
nilq/baby-python
|
python
|
import enum
from EoraReader import EoraReader
from PrimaryInputs import PrimaryInputs
from DomesticTransactions import DomesticTransactions
from os import listdir
from os.path import isfile, join
import pandas as pd
class CountryTableSegment(enum.Enum):
DomesticTransations = 1
PrimaryInputs = 2
class CountryTable(EoraReader):
def __init__(self, segment, file_path):
self.segment = segment
self.table_class = []
self.files = self.__get_files(file_path)
self.__process_files(segment, file_path)
def __get_files(self, path):
if not isfile(path): return [join(path, f) for f in listdir(path) if isfile(join(path, f))]
else: return [path]
def __process_files(self, segment, file_path):
for file in self.files:
if segment == CountryTableSegment.DomesticTransations:
er = DomesticTransactions(file)
self.table_class.append(er)
else:
er = PrimaryInputs(file)
self.table_class.append(er)
def get_dataset(self, extended = False):
df = pd.DataFrame() # empty df here
for t_class in self.table_class:
if not df.empty:
df = df.append(t_class.get_dataset(extended))
else: df = t_class.get_dataset(extended)
self.df = df
return df
def append(self, country_table_part):
return self.table_class.append(country_table_part.table_class)
|
nilq/baby-python
|
python
|
from hazelcast.serialization.bits import *
from hazelcast.protocol.builtin import FixSizedTypesCodec
from hazelcast.protocol.client_message import OutboundMessage, REQUEST_HEADER_SIZE, create_initial_buffer, RESPONSE_HEADER_SIZE
from hazelcast.protocol.builtin import StringCodec
from hazelcast.protocol.builtin import EntryListUUIDLongCodec
# hex: 0x1D0200
_REQUEST_MESSAGE_TYPE = 1901056
# hex: 0x1D0201
_RESPONSE_MESSAGE_TYPE = 1901057
_REQUEST_DELTA_OFFSET = REQUEST_HEADER_SIZE
_REQUEST_GET_BEFORE_UPDATE_OFFSET = _REQUEST_DELTA_OFFSET + LONG_SIZE_IN_BYTES
_REQUEST_TARGET_REPLICA_UUID_OFFSET = _REQUEST_GET_BEFORE_UPDATE_OFFSET + BOOLEAN_SIZE_IN_BYTES
_REQUEST_INITIAL_FRAME_SIZE = _REQUEST_TARGET_REPLICA_UUID_OFFSET + UUID_SIZE_IN_BYTES
_RESPONSE_VALUE_OFFSET = RESPONSE_HEADER_SIZE
_RESPONSE_REPLICA_COUNT_OFFSET = _RESPONSE_VALUE_OFFSET + LONG_SIZE_IN_BYTES
def encode_request(name, delta, get_before_update, replica_timestamps, target_replica_uuid):
buf = create_initial_buffer(_REQUEST_INITIAL_FRAME_SIZE, _REQUEST_MESSAGE_TYPE)
FixSizedTypesCodec.encode_long(buf, _REQUEST_DELTA_OFFSET, delta)
FixSizedTypesCodec.encode_boolean(buf, _REQUEST_GET_BEFORE_UPDATE_OFFSET, get_before_update)
FixSizedTypesCodec.encode_uuid(buf, _REQUEST_TARGET_REPLICA_UUID_OFFSET, target_replica_uuid)
StringCodec.encode(buf, name)
EntryListUUIDLongCodec.encode(buf, replica_timestamps, True)
return OutboundMessage(buf, False)
def decode_response(msg):
initial_frame = msg.next_frame()
response = dict()
response["value"] = FixSizedTypesCodec.decode_long(initial_frame.buf, _RESPONSE_VALUE_OFFSET)
response["replica_count"] = FixSizedTypesCodec.decode_int(initial_frame.buf, _RESPONSE_REPLICA_COUNT_OFFSET)
response["replica_timestamps"] = EntryListUUIDLongCodec.decode(msg)
return response
|
nilq/baby-python
|
python
|
"""
@author: Jim
@project: DataBrain
@file: exceptions.py
@time: 2020/10/4 17:03
@desc:
"""
class StopException(BaseException):
""" 停止爬
"""
pass
|
nilq/baby-python
|
python
|
import matplotlib
matplotlib.use('Agg')
import numpy as np, scipy as sci, os, pylab as pyl, sys, smtplib
from trippy import pill
from glob import glob
from astropy.io import fits
from astropy.wcs import WCS
from datetime import datetime, timedelta
# Send email.
def sendemail(from_addr, to_addr_list, cc_addr_list,
subject, message,
login, password,
smtpserver='smtp.gmail.com:587'):
header = 'From: %s\n' % from_addr
header += 'To: %s\n' % ','.join(to_addr_list)
header += 'Cc: %s\n' % ','.join(cc_addr_list)
header += 'Subject: %s\n\n' % subject
message = header + message
server = smtplib.SMTP(smtpserver)
server.starttls()
server.login(login,password)
problems = server.sendmail(from_addr, to_addr_list, message)
server.quit()
return problems
if __name__ == '__main__':
# If there is no script created yesterday, then stop running the program.
yesterday = (datetime.now() - timedelta(days = 1)).strftime("%Y%m%d")
if [i for i in glob("/LWTanaly/*") if yesterday in i] == []:
sys.exit()
else:
pass
stkPaths_all = glob("/LWTanaly/{}/neo_stack/*_stack.fits".format(yesterday))
try:
neoNames = list(set([path.split('/')[-1].split('-')[0] for path in stkPaths_all]))
for neoName in neoNames:
stkPaths_neo = np.sort([path for path in stkPaths_all if neoName in path])
with open("/LWTanaly/{}/neo_stack/neoPosition_{}.dat".format(yesterday, neoName), 'r') as file:
positions = file.readlines()
for stkPath, position in zip(stkPaths_neo[:2], positions[:2]):
with fits.open(stkPath) as hdu:
stkData = hdu[0].data
stkHeader = hdu[0].header
zp = float(stkHeader['S-ZEROPT'])
obsTime = stkHeader['S-MIDATE']
expTime = np.round(stkHeader['S-EXPTIM'])
tsfFWHM = float(position.split(',')[2])
motion = float(glob("/LWTdata/LWT_{}/lulinLWT/others/*ephem*".format(yesterday))[0].split('_')[-1].split('.')[0]) # "/hr
# The length of stacked NEO is estimated to be 2 times the FWHM.
exptime = tsfFWHM*2 / (motion/3600)
neoX = float(position.split(',')[0])
neoY = float(position.split(',')[1])
# Calculate the moving direction of the NEO.
with open("/LWTdata/LWT_{0}/lulinLWT/{0}.txt".format(yesterday), 'r') as file:
lines = file.readlines()
ephes = [line for line in lines if '|' in line][0].split('|')
# The beginning position.
ephemRA = [float([i for i in ephes[0].split(' ') if '.' in i][0])]
ephemDec = [float([i for i in ephes[0].split(' ') if '.' in i][1])]
# The last position.
ephemRA.append(float(ephes[-1].split(' ')[1]))
ephemDec.append(float(ephes[-1].split(' ')[2].split('\n')[0]))
ephemX, ephemY = WCS(stkPath).wcs_world2pix(np.asarray(ephemRA)*15, np.asarray(ephemDec), 1)
minXidx = ephemX.tolist().index(np.min(ephemX))
maxXidx = ephemX.tolist().index(np.max(ephemX))
if (ephemY[minXidx] > ephemY[maxXidx]) and (minXidx == 1):
direction = 180 - np.arctan(abs(ephemY[0]-ephemY[1])/abs(ephemX[0]-ephemX[1]))
elif (ephemY[minXidx] > ephemY[maxXidx]) and (minXidx == 0):
direction = 360 - np.arctan(abs(ephemY[0]-ephemY[1])/abs(ephemX[0]-ephemX[1]))
elif (ephemY[minXidx] < ephemY[maxXidx]) and (minXidx == 1):
direction = 180 + np.arctan(abs(ephemY[0]-ephemY[1])/abs(ephemX[0]-ephemX[1]))
elif (ephemY[minXidx] < ephemY[maxXidx]) and (minXidx == 0):
direction = np.arctan(abs(ephemY[0]-ephemY[1])/abs(ephemX[0]-ephemX[1]))
direction = np.round(direction, 2)
# Do the pill aperture photometry.
phot = pill.pillPhot(stkData, repFact=10)
phot(neoX, neoY, radius=tsfFWHM*1.4, l=(exptime/3600.)*motion/1.22, a=direction, skyRadius=4*tsfFWHM, width=6*tsfFWHM, zpt=zp, exptime=exptime
, enableBGSelection=True, display=True, backupMode="smart", trimBGHighPix=3.)
neoMag = np.round((-2.5)*np.log10(phot.sourceFlux) + zp, 1)
with open("/LWTanaly/{}/ADESelement.dat".format(yesterday), 'a') as file:
neoRA, neoDec = WCS(stkPath).wcs_pix2world(neoX, neoY, 1)
neoRA = np.round(float(neoRA)/15, 2)
neoDec = np.round(float(neoDec), 1)
file.write("{}\t{}\t{}\t{}\t{}\t{}\n".format(neoName, obsTime, neoRA, neoDec, neoMag, expTime))
# Create the ACP report in ADES format.
os.system("/home/z94624/.conda/envs/py27/bin/python /home/z94624/Desktop/autoMPCreport.py /LWTanaly/{0}/{0}_IANCU-LWT_smoBEE.xml".format(yesterday))
# Submit the report to the MPC.
os.system("/anaconda3/bin/curl https://minorplanetcenter.net/submit_xml -F 'ack=Observations of {0} at {1}' -F 'ac2=lwt@gm.astro.ncu.edu.tw' -F 'obj_type=NEOCP' -F 'source=</LWTanaly/{1}/{1}_IANCU-LWT_smoBEE.xml'".format(neoNames, yesterday))
# If there are errors occur, email users of the error message.
except Exception as e:
sendemail(from_addr = 'lwt@gm.astro.ncu.edu.tw',
to_addr_list = ['smoBEE@astro.ncu.edu.tw'],
cc_addr_list = [],
subject = '[ERROR] Phot_Report.py ({})'.format(datetime.now().strftime("%Y-%b-%d %H:%M:%S")),
message = "Error on line {}: [{}] {}".format(sys.exc_info()[-1].tb_lineno, type(e).__name__, e),
login = 'lwt@gm.astro.ncu.edu.tw',
password = '')
|
nilq/baby-python
|
python
|
import collections
import dataclasses
import gc
import multiprocessing
import os
import traceback
from multiprocessing import Lock, Pipe, Pool, Process, Value
from typing import Any, Callable, Dict, Iterable, List, Tuple
from .exceptions import (UnidentifiedMessageReceivedError,
WorkerHasNoUserFunctionError, WorkerIsDeadError)
from .messaging import (DataPayload, WorkerStatus, UserFuncException, SigClose, UserFunc,
WorkerError, StatusRequest)
import time
@dataclasses.dataclass
class WorkerProcess:
'''Basic worker meant to be run in a process.'''
pipe: multiprocessing.Pipe
userfunc: UserFunc = None
gcollect: bool = False
verbose: bool = False
logging: bool = False
status: WorkerStatus = dataclasses.field(default_factory=WorkerStatus)
@property
def pid(self):
return os.getpid()
def __repr__(self):
return f'{self.__class__.__name__}[{self.pid}]'
def __call__(self):
'''Call when opening the process.
'''
# main receive/send loop
while True:
# wait to receive data
try:
if self.logging: start = time.time()
payload = self.recv()
if self.logging: self.status.time_waiting += time.time() - start
except (EOFError, BrokenPipeError):
exit(1)
# kill worker
if isinstance(payload, SigClose):
exit(1)
# process received data payload
elif isinstance(payload, DataPayload):
self.execute_and_send(payload)
# load new function
elif isinstance(payload, UserFunc):
self.userfunc = payload
# return status of worker
elif isinstance(payload, StatusRequest):
self.status.update_uptime()
self.send(self.status)
else:
self.send(WorkerError(UnidentifiedMessageReceivedError()))
def execute_and_send(self, payload: DataPayload):
'''Execute the provide function on the payload (modifies in-place), and return it.
'''
# check if worker has a user function
if self.userfunc is None:
self.send(WorkerError(WorkerHasNoUserFunctionError()))
return
# update pid and apply userfunc
payload.pid = self.pid
# try to execute function and raise any errors
try:
if self.logging: start = time.time()
payload.data = self.userfunc.execute(payload.data)
if self.logging:
self.status.time_working += time.time() - start
self.status.jobs_finished += 1
except BaseException as e:
self.send(UserFuncException(e))
traceback.print_exc()
return
# send result back to WorkerResource
self.send(payload)
# garbage collect if needed
if self.gcollect:
gc.collect()
############## Basic Send/Receive ##############
def recv(self):
if self.verbose: print(f'{self} waiting to receive')
payload = self.pipe.recv()
if self.verbose: print(f'{self} received: {payload}')
return payload
def send(self, data: Any):
if self.verbose: print(f'{self} sending: {data}')
return self.pipe.send(data)
|
nilq/baby-python
|
python
|
from collections import defaultdict
import numpy as np
import torch
class Filter(object):
def __init__(self):
pass
def __call__(self, cube_acts, **kwargs):
'''
Perform filter on an `activity_spec.CubeActivities` object,
returns a filtered `activity_spec.CubeActivities` object.
Other parameters can be added if needed.
'''
return cube_acts
def __repr__(self):
return '%s.%s' % (self.__module__, self.__class__.__name__)
class OverlapCubeMerger(Filter):
'''
Merge overlap cubes into non-overlap ones.
'''
def __init__(self, cube_length: int = 64, stride: int = 16):
self.stride = stride
self.cube_length = cube_length
self.n_overlaps = self.cube_length // self.stride
assert self.n_overlaps * self.stride == self.cube_length, \
'Cube length must be divisible by stride'
def __call__(self, cube_acts):
if len(cube_acts) == 0:
return cube_acts
columns = cube_acts.columns
grouped_acts = self._group_acts(cube_acts.cubes.numpy(), columns)
splited_acts = self._split_acts(grouped_acts, columns)
merged_acts_list = []
for acts in splited_acts:
if acts.shape[0] == 1:
merged_acts_list.append(acts)
continue
interpolated_acts = self._interpolate_acts(acts, columns)
intervaled_acts = self._interval_acts(interpolated_acts, columns)
merged_acts = self._merge_acts(intervaled_acts, columns)
merged_acts_list.append(merged_acts)
merged_acts = np.concatenate(merged_acts_list, axis=0)
merged_cube_acts = cube_acts.__class__(
torch.as_tensor(merged_acts), cube_acts.video_name,
cube_acts.type_names)
return merged_cube_acts
def _group_acts(self, cube_acts, columns):
grouped_acts = defaultdict(list)
for cube in cube_acts:
act_id = int(round(cube[columns.id]))
act_type = int(round(cube[columns.type]))
grouped_acts[(act_type, act_id)].append(cube)
grouped_acts = [np.stack(acts) for acts in grouped_acts.values()]
return grouped_acts
def _split_acts(self, grouped_acts, columns):
splited_acts = []
for acts in grouped_acts:
time_order = np.argsort(acts[:, columns.t0])
acts = acts[time_order]
times = acts[:, columns.t0]
split_points = np.where(
times[1:] - times[:-1] >= self.cube_length)[0]
prev, split = 0, 0
for split in split_points + 1:
splited_acts.append(acts[prev:split])
prev = split
splited_acts.append(acts[split:])
return splited_acts
def _interpolate_acts(self, acts, columns):
interpolated_acts = [acts[0]]
for act in acts[1:]:
prev_act = interpolated_acts[-1]
inter_slots = int(round(
act[columns.t0] - prev_act[columns.t0])) // self.stride
assert inter_slots > 0
if inter_slots == 1:
interpolated_acts.append(act)
continue
box_delta_stride = act[columns.x0:columns.y1 + 1] - \
prev_act[columns.x0:columns.y1 + 1] / inter_slots
for inter_i in range(1, inter_slots):
inter_act = prev_act.copy()
inter_act[[columns.t0, columns.t1]] += \
self.stride * inter_i
inter_act[columns.x0:columns.y1 + 1] += \
box_delta_stride * inter_i
interpolated_acts.append(inter_act)
interpolated_acts.append(act)
interpolated_acts = np.stack(interpolated_acts)
return interpolated_acts
def _interval_acts(self, interpolated_acts, columns):
overlaped_acts = np.zeros((
self.n_overlaps, interpolated_acts.shape[0] + self.n_overlaps - 1,
interpolated_acts.shape[1]), dtype=np.float32)
for i_row in range(self.n_overlaps):
start = i_row
end = start + interpolated_acts.shape[0]
overlaped_acts[i_row, :start] = interpolated_acts[0:1]
overlaped_acts[i_row, :start, columns.t0:columns.t1 + 1] -= \
np.arange(i_row, 0, -1)[:, None] * self.stride
overlaped_acts[i_row, start: end] = interpolated_acts
overlaped_acts[i_row, end:] = interpolated_acts[-1:]
overlaped_acts[i_row, end:, columns.t0:columns.t1 + 1] += \
np.arange(1, self.n_overlaps - i_row)[:, None] * self.stride
intervaled_acts = overlaped_acts[0].copy()
intervaled_acts[:, columns.score] = overlaped_acts[
..., columns.score].mean(axis=0)
max_columns = [columns.t0, columns.x0, columns.y0]
min_columns = [columns.t1, columns.x1, columns.y1]
intervaled_acts[:, max_columns] = overlaped_acts[
..., max_columns].max(axis=0)
intervaled_acts[:, min_columns] = overlaped_acts[
..., min_columns].min(axis=0)
return intervaled_acts
def _merge_acts(self, intervaled_acts, columns):
length = intervaled_acts.shape[0] - self.n_overlaps + 1
connected_acts = np.stack([intervaled_acts[start:start + length]
for start in range(self.n_overlaps)])
scores = connected_acts[:, :, columns.score].mean(axis=0)
max_index = scores.argmax(axis=0)
selected_acts = connected_acts[
:, max_index % self.n_overlaps::self.n_overlaps]
merged_acts = selected_acts[0].copy()
merged_acts[:, columns.score] = scores[
max_index % self.n_overlaps::self.n_overlaps]
max_columns = [columns.t1, columns.x1, columns.y1]
min_columns = [columns.t0, columns.x0, columns.y0]
merged_acts[:, max_columns] = selected_acts[
..., max_columns].max(axis=0)
merged_acts[:, min_columns] = selected_acts[
..., min_columns].min(axis=0)
return merged_acts
|
nilq/baby-python
|
python
|
import argparse
import numpy as np
import time
import torch
import json
import torch.nn as nn
import torch.nn.functional as FN
import cv2
import random
from tqdm import tqdm
from solver import Solver
from removalmodels.models import Generator, Discriminator
from removalmodels.models import GeneratorDiff, GeneratorDiffWithInp, GeneratorDiffAndMask, GeneratorDiffAndMask_V2, VGGLoss
from os.path import basename, exists, join, splitext
from os import makedirs
from torch.autograd import Variable
from utils.data_loader_stargan import get_dataset
from torch.backends import cudnn
from utils.utils import show
from skimage.measure import compare_ssim, compare_psnr
class ParamObject(object):
def __init__(self, adict):
"""Convert a dictionary to a class
@param :adict Dictionary
"""
self.__dict__.update(adict)
for k, v in adict.items():
if isinstance(v, dict):
self.__dict__[k] = ParamObject(v)
def __getitem__(self,key):
return self.__dict__[key]
def values(self):
return self.__dict__.values()
def itemsAsDict(self):
return dict(self.__dict__.items())
def get_sk_image(img):
img = img[:,[0,0,0], ::] if img.shape[1] == 1 else img
img = np.clip(img.data.cpu().numpy().transpose(0, 2, 3, 1),-1,1)
img = 255*((img[0,::] + 1) / 2)
return img
def VOCap(rec,prec):
nc = rec.shape[1]
mrec=np.concatenate([np.zeros((1,rec.shape[1])),rec,np.ones((1,rec.shape[1]))],axis=0)
mprec=np.concatenate([np.zeros((1,rec.shape[1])),prec,np.zeros((1,rec.shape[1]))],axis=0)
for i in reversed(np.arange(mprec.shape[0]-1)):
mprec[i,:]=np.maximum(mprec[i,:],mprec[i+1,:])
#-------------------------------------------------------
# Now do the step wise integration
# Original matlab code is
#-------------------------------------------------------
# i=find(mrec(2:end)~=mrec(1:end-1))+1;
# ap=sum((mrec(i)-mrec(i-1)).*mpre(i));
# Here we use boolean indexing of numpy instead of find
steps = (mrec[1:,:] != mrec[:-1,:])
ap = np.zeros(nc)
for i in xrange(nc):
ap[i]=sum((mrec[1:,:][steps[:,i], i] - mrec[:-1,:][steps[:,i], i])*mprec[1:,][steps[:,i],i])
return ap
def computeAP(allSc, allLb):
si = (-allSc).argsort(axis=0)
cid = np.arange(allLb.shape[1])
tp = allLb[si[:,cid],cid] > 0.
fp = allLb[si[:,cid],cid] == 0.
tp = tp.cumsum(axis=0).astype(np.float32)
fp = fp.cumsum(axis=0).astype(np.float32)
rec = (tp+1e-8)/((allLb>0.)+1e-8).sum(axis=0).astype(np.float32)
prec = (tp+1e-8)/ (tp+ fp+1e-8)
ap = VOCap(rec,prec)
return ap
def gen_samples(params):
# For fast training
#cudnn.benchmark = True
gpu_id = 0
use_cuda = params['cuda']
b_sz = params['batch_size']
if params['use_same_g']:
if len(params['use_same_g']) == 1:
gCV = torch.load(params['use_same_g'][0])
solvers = []
configs = []
for i, mfile in enumerate(params['model']):
model = torch.load(mfile)
configs.append(model['arch'])
configs[-1]['pretrained_model'] = mfile
configs[-1]['load_encoder'] = 1
configs[-1]['load_discriminator'] = 0 if params['evaluating_discr'] is not None else 1
if i==0:
configs[i]['onlypretrained_discr'] = params['evaluating_discr']
else:
configs[i]['onlypretrained_discr'] = None
if params['withExtMask'] and params['mask_size']!= 32:
configs[-1]['lowres_mask'] = 0
configs[-1]['load_encoder'] = 0
else:
params['mask_size'] = 32
solvers.append(Solver(None, None, ParamObject(configs[-1]), mode='test' if i > 0 else 'eval', pretrainedcv=model))
solvers[-1].G.eval()
if configs[-1]['train_boxreconst'] >0:
solvers[-1].E.eval()
if params['use_same_g']:
solvers[-1].no_inpainter = 0
solvers[-1].load_pretrained_generator(gCV)
print 'loaded generator again'
solvers[0].D.eval()
solvers[0].D_cls.eval()
dataset = get_dataset('', '', params['image_size'], params['image_size'], params['dataset'], params['split'],
select_attrs=configs[0]['selected_attrs'], datafile=params['datafile'], bboxLoader=1,
bbox_size = params['box_size'], randomrotate = params['randomrotate'],
randomscale=params['randomscale'], max_object_size=params['max_object_size'],
use_gt_mask = configs[0]['use_gtmask_inp'], onlyrandBoxes= params['extmask_type'] == 'randbox',
square_resize=configs[0].get('square_resize',0) if params['square_resize_override'] < 0 else params['square_resize_override'], filter_by_mincooccur= params['filter_by_mincooccur'],
only_indiv_occur = params['only_indiv_occur'])
#gt_mask_data = get_dataset('','', params['mask_size'], params['mask_size'], params['dataset'], params['split'],
# select_attrs=configs[0]['selected_attrs'], bboxLoader=0, loadMasks = True)
#data_iter = DataLoader(targ_split, batch_size=b_sz, shuffle=True, num_workers=8)
targ_split = dataset #train if params['split'] == 'train' else valid if params['split'] == 'val' else test
data_iter = np.random.permutation(len(targ_split))
if params['computeSegAccuracy']:
gt_mask_data = get_dataset('','', params['mask_size'], params['mask_size'],
params['dataset'],
params['split'], select_attrs=configs[0]['selected_attrs'], bboxLoader=0, loadMasks = True)
commonIds = set(gt_mask_data.valid_ids).intersection(set(dataset.valid_ids))
commonIndexes = [i for i in xrange(len(dataset.valid_ids)) if dataset.valid_ids[i] in commonIds]
data_iter = commonIndexes
if params['withExtMask'] and (params['extmask_type'] == 'mask'):
ext_mask_data = get_dataset('','', params['mask_size'], params['mask_size'],
params['dataset'] if params['extMask_source']=='gt' else params['extMask_source'],
params['split'], select_attrs=configs[0]['selected_attrs'], bboxLoader=0, loadMasks = True)
curr_valid_ids = [dataset.valid_ids[i] for i in data_iter]
commonIds = set(ext_mask_data.valid_ids).intersection(set(curr_valid_ids))
commonIndexes = [i for i in xrange(len(dataset.valid_ids)) if dataset.valid_ids[i] in commonIds]
data_iter = commonIndexes
if params['nImages'] > -1:
data_iter = data_iter[:params['nImages']]
print('-----------------------------------------')
print('%s'%(' | '.join(targ_split.selected_attrs)))
print('-----------------------------------------')
flatten = lambda l: [item for sublist in l for item in sublist]
selected_attrs = configs[0]['selected_attrs']
if params['showreconst'] and len(params['names'])>0:
params['names'] = flatten([[nm,nm+'-R'] for nm in params['names']])
#discriminator.load_state_dict(cv['discriminator_state_dict'])
c_idx = 0
np.set_printoptions(precision=2)
padimg = np.zeros((params['image_size'],5,3),dtype=np.uint8)
padimg[:,:,:] = 128
vggLoss = VGGLoss(network='squeeze')
cimg_cnt = 0
perclass_removeSucc = np.zeros((len(selected_attrs)))
perclass_confusion = np.zeros((len(selected_attrs), len(selected_attrs)))
perclass_classScoreDrop = np.zeros((len(selected_attrs), len(selected_attrs)))
perclass_cooccurence = np.zeros((len(selected_attrs), len(selected_attrs))) + 1e-6
perclass_vgg = np.zeros((len(selected_attrs)))
perclass_ssim = np.zeros((len(selected_attrs)))
perclass_psnr = np.zeros((len(selected_attrs)))
perclass_tp = np.zeros((len(selected_attrs)))
perclass_fp = np.zeros((len(selected_attrs)))
perclass_fn = np.zeros((len(selected_attrs)))
perclass_acc = np.zeros((len(selected_attrs)))
perclass_counts = np.zeros((len(selected_attrs))) + 1e-6
perclass_int = np.zeros((len(selected_attrs)))
perclass_union = np.zeros((len(selected_attrs)))
perclass_gtsize = np.zeros((len(selected_attrs)))
perclass_predsize = np.zeros((len(selected_attrs)))
perclass_segacc = np.zeros((len(selected_attrs)))
perclass_msz = np.zeros((len(selected_attrs)))
#perclass_th = Variable(torch.FloatTensor(np.array([0., 0.5380775, -0.49303985, -0.48941165, 2.8394265, -0.37880898, 1.0709367, 1.6613332, -1.5602279, 1.2631614, 2.4104881, -0.29175103, -0.6607682, -0.2128999, -1.286599, -2.24577, -0.4130093, -1.0535073, 0.038890466, -0.6808476]))).cuda()
perclass_th = Variable(torch.FloatTensor(np.zeros((len(selected_attrs))))).cuda()
perImageRes = {'images':{}, 'overall':{}}
total_count = 0.
if params['computeAP']:
allScores = []
allGT = []
allEditedSc = []
if params['dilateMask']:
dilateWeight = torch.ones((1,1,params['dilateMask'],params['dilateMask']))
dilateWeight = Variable(dilateWeight,requires_grad=False).cuda()
else:
dilateWeight = None
all_masks = []
all_imgidAndCls = []
for i in tqdm(xrange(len(data_iter))):
#for i in tqdm(xrange(2)):
idx = data_iter[i]
x, real_label, boxImg, boxlabel, mask, bbox, curCls = targ_split[idx]
cocoid = targ_split.getcocoid(idx)
nnz_cls = real_label.nonzero()
z_cls = (1-real_label).nonzero()
z_cls = z_cls[:,0] if len(z_cls.size()) > 1 else z_cls
x = x[None,::]; boxImg = boxImg[None,::]; mask = mask[None,::]; boxlabel = boxlabel[None,::]; real_label = real_label[None,::]
x, boxImg, mask, boxlabel = solvers[0].to_var(x, volatile=True), solvers[0].to_var(boxImg, volatile=True), solvers[0].to_var(mask, volatile=True), solvers[0].to_var(boxlabel, volatile=True)
real_label = solvers[0].to_var(real_label, volatile=True)
_, out_cls_real = solvers[0].classify(x)
out_cls_real = out_cls_real[0]# Remove the singleton dimension
pred_real_label = (out_cls_real > perclass_th)
total_count += 1
#;import ipdb; ipdb.set_trace()
if params['computeAP']:
allScores.append(out_cls_real[None,:])
allGT.append(real_label)
removeScores = out_cls_real.clone()
perclass_acc[(pred_real_label.float() == real_label)[0,:].data.cpu().numpy().astype(np.bool)] += 1.
if len(z_cls):
perclass_fp[z_cls.numpy()] += pred_real_label.data.cpu()[z_cls]
if len(nnz_cls):
nnz_cls = nnz_cls[:,0]
perclass_tp[nnz_cls.numpy()] += pred_real_label.data.cpu()[nnz_cls]
perclass_fn[nnz_cls.numpy()] += 1-pred_real_label.data.cpu()[nnz_cls]
perImageRes['images'][cocoid] = {'perclass': {}}
if params['dump_cls_results']:
perImageRes['images'][cocoid]['real_label'] = nnz_cls.tolist()
perImageRes['images'][cocoid]['real_scores'] = out_cls_real.data.cpu().tolist()
if not params['eval_only_discr']:
for cid in nnz_cls:
if configs[0]['use_gtmask_inp']:
mask = solvers[0].to_var(targ_split.getGTMaskInp(idx, configs[0]['selected_attrs'][cid])[None,::], volatile=True)
if params['withExtMask']:
if params['extmask_type'] == 'mask':
mask = solvers[0].to_var(ext_mask_data.getbyIdAndclass(cocoid,configs[0]['selected_attrs'][cid])[None,::], volatile=True)
elif params['extmask_type'] == 'box':
mask = solvers[0].to_var(dataset.getGTMaskInp(idx,configs[0]['selected_attrs'][cid], mask_type=2)[None,::],volatile=True)
elif params['extmask_type'] == 'randbox':
# Nothing to do here, mask is already set to random boxes
None
if params['computeSegAccuracy']:
gtMask = gt_mask_data.getbyIdAndclass(cocoid,configs[0]['selected_attrs'][cid]).cuda()
mask_target = torch.zeros_like(real_label)
fake_label = real_label.clone()
fake_label[0,cid] = 0.
mask_target[0,cid] = 1
fake_x, mask_out = solvers[0].forward_generator(x, imagelabel = mask_target, mask_threshold=params['mask_threshold'], onlyMasks=False, mask=mask, withGTMask=params['withExtMask'], dilate = dilateWeight)
_, out_cls_fake = solvers[0].classify(fake_x)
out_cls_fake = out_cls_fake[0]# Remove the singleton dimension
mask_out = mask_out.data[0,::]
if params['dump_mask']:
all_masks.append(mask_out.cpu().numpy())
all_imgidAndCls.append((cocoid,selected_attrs[cid]))
perImageRes['images'][cocoid]['perclass'][selected_attrs[cid]] = {}
if params['computeSegAccuracy']:
union = torch.clamp((gtMask + mask_out),max=1.0).sum()
intersection = (gtMask * mask_out).sum()
img_iou = (intersection/(union+1e-6))
img_acc = (gtMask == mask_out).float().mean()
img_recall = ((intersection/(gtMask.sum()+1e-6)))
img_precision = (intersection/(mask_out.sum()+1e-6))
perImageRes['images'][cocoid]['perclass'][selected_attrs[cid]].update({'iou': img_iou, 'rec':img_recall, 'prec': img_precision, 'acc': img_acc})
perImageRes['images'][cocoid]['perclass'][selected_attrs[cid]]['gtSize'] = gtMask.mean()
perImageRes['images'][cocoid]['perclass'][selected_attrs[cid]]['predSize'] = mask_out.mean()
# Compute metrics now
perclass_counts[cid] += 1
perclass_int[cid] += intersection
perclass_union[cid] += union
perclass_gtsize[cid] += gtMask.sum()
perclass_predsize[cid] += mask_out.sum()
perclass_segacc[cid] += img_acc
if params['dump_cls_results']:
perImageRes['images'][cocoid]['perclass'][selected_attrs[cid]]['remove_scores'] = out_cls_fake.data.cpu().tolist()
perImageRes['images'][cocoid]['perclass'][selected_attrs[cid]]['diff'] = out_cls_real.data[cid] - out_cls_fake.data[cid]
remove_succ = float((out_cls_fake.data[cid] < perclass_th[cid]))# and (out_cls_real[cid]>0.))
perclass_removeSucc[cid] += remove_succ
vL = vggLoss(fake_x, x).data[0]
perclass_vgg[cid] += 100.*vL
fake_x_sk = get_sk_image(fake_x)
x_sk = get_sk_image(x)
pSNR = compare_psnr(fake_x_sk,x_sk,data_range = 255.)
ssim = compare_ssim(fake_x_sk,x_sk,data_range = 255., multichannel=True)
msz = mask_out.mean()
if msz > 0.:
perclass_ssim[cid] += ssim
perclass_psnr[cid] += pSNR
if params['computeAP']:
removeScores[cid] = out_cls_fake[cid]
#---------------------------------------------------------------
# These are classes not trying to be removed;
# correctly detect on real image and not detected on fake image
# This are collateral damage. Count these
#---------------------------------------------------------------
false_remove = fake_label.byte()*(out_cls_fake<perclass_th)*(out_cls_real>perclass_th)
perclass_cooccurence[cid, nnz_cls.numpy()] += 1.
perclass_confusion[cid,false_remove.data.cpu().numpy().astype(np.bool)[0,:]] += 1
perImageRes['images'][cocoid]['perclass'][selected_attrs[cid]].update({'remove_succ':remove_succ, 'false_remove': float(false_remove.data.cpu().float().numpy().sum()), 'perceptual': 100.*vL})
if params['computeAP']:
allEditedSc.append(removeScores[None,:])
perImageRes['images'][cocoid]['overall'] = {}
perImageRes['images'][cocoid]['overall']['remove_succ'] = np.mean([perImageRes['images'][cocoid]['perclass'][cls]['remove_succ'] for cls in perImageRes['images'][cocoid]['perclass']])
perImageRes['images'][cocoid]['overall']['false_remove'] = np.mean([perImageRes['images'][cocoid]['perclass'][cls]['false_remove'] for cls in perImageRes['images'][cocoid]['perclass']])
perImageRes['images'][cocoid]['overall']['perceptual'] = np.mean([perImageRes['images'][cocoid]['perclass'][cls]['perceptual'] for cls in perImageRes['images'][cocoid]['perclass']])
perImageRes['images'][cocoid]['overall']['diff'] = np.mean([perImageRes['images'][cocoid]['perclass'][cls]['diff'] for cls in perImageRes['images'][cocoid]['perclass']])
if params['computeSegAccuracy']:
perImageRes['images'][cocoid]['overall']['iou'] = np.mean([perImageRes['images'][cocoid]['perclass'][cls]['iou'] for cls in perImageRes['images'][cocoid]['perclass']])
perImageRes['images'][cocoid]['overall']['acc'] = np.mean([perImageRes['images'][cocoid]['perclass'][cls]['acc'] for cls in perImageRes['images'][cocoid]['perclass']])
perImageRes['images'][cocoid]['overall']['prec'] = np.mean([perImageRes['images'][cocoid]['perclass'][cls]['prec'] for cls in perImageRes['images'][cocoid]['perclass']])
perImageRes['images'][cocoid]['overall']['rec'] = np.mean([perImageRes['images'][cocoid]['perclass'][cls]['rec'] for cls in perImageRes['images'][cocoid]['perclass']])
elif params['dump_cls_results']:
perImageRes['images'][cocoid] = {'perclass': {}}
perImageRes['images'][cocoid]['real_label'] = nnz_cls.tolist()
perImageRes['images'][cocoid]['real_scores'] = out_cls_real.data.cpu().tolist()
if params['dump_mask']:
np.savez('allMasks.npz', masks=np.concatenate(all_masks).astype(np.uint8), idAndClass=np.stack(all_imgidAndCls))
if params['computeAP']:
allScores = torch.cat(allScores,dim=0).data.cpu().numpy()
allGT= torch.cat(allGT,dim=0).data.cpu().numpy()
apR = computeAP(allScores, allGT)
if not params['eval_only_discr']:
allEditedSc= torch.cat(allEditedSc,dim=0).data.cpu().numpy()
apEdited = computeAP(allEditedSc, allGT)
#for i in xrange(len(selected_attrs)):
# pr,rec,th = precision_recall_curve(allGTArr[:,i],allPredArr[:,i]);
# f1s = 2*(pr*rec)/(pr+rec); mf1idx = np.argmax(f1s);
# #print 'Max f1 = %.2f, th =%.2f'%(f1s[mf1idx], th[mf1idx]);
# allMf1s.append(f1s[mf1idx])
# allTh.append(th[mf1idx])
recall = perclass_tp/(perclass_tp+perclass_fn+1e-6)
precision = perclass_tp/(perclass_tp+perclass_fp+1e-6)
f1_score = 2.0* (recall*precision)/(recall+precision+1e-6)
present_classes = (perclass_tp+perclass_fn)>0.
perclass_gt_counts = (perclass_tp+perclass_fn)
apROverall = (perclass_gt_counts*apR).sum() / (perclass_gt_counts.sum())
apR = apR[present_classes]
recall = recall[present_classes]
f1_score = f1_score[present_classes]
precision = precision[present_classes]
perclass_acc = perclass_acc[present_classes]
present_attrs = [att for i, att in enumerate(targ_split.selected_attrs) if present_classes[i]]
rec_overall = perclass_tp.sum()/ (perclass_tp.sum() + perclass_fn.sum() + 1e-6)
prec_overall = perclass_tp.sum()/ (perclass_tp.sum() + perclass_fp.sum() + 1e-6)
f1_score_overall = 2.0* (rec_overall*prec_overall)/(rec_overall+prec_overall+1e-6)
print '------------------------------------------------------------'
print ' Metrics have been computed '
print '------------------------------------------------------------'
print('Score: || %s |'%(' | '.join(['%6s'%att[:6] for att in ['Overall', 'OverCls']+present_attrs])))
print('Acc : || %s |'%(' | '.join([' %.2f' % sc for sc in [(perclass_acc/total_count).mean()]+[(perclass_acc/total_count).mean()]+list(perclass_acc/total_count)])))
print('F1-sc: || %s |'%(' | '.join([' %.2f' % sc for sc in [f1_score_overall]+[f1_score.mean()]+list(f1_score)])))
print('recal: || %s |'%(' | '.join([' %.2f' % sc for sc in [rec_overall]+[recall.mean()]+list(recall)])))
print('prec : || %s |'%(' | '.join([' %.2f' % sc for sc in [prec_overall]+[precision.mean()]+list(precision)])))
if params['computeAP']:
print('AP : || %s |'%(' | '.join([' %.2f' % sc for sc in [apROverall]+[apR.mean()]+list(apR)])))
print('Count: || %s |'%(' | '.join([' %4.0f' % sc for sc in [perclass_gt_counts.mean()]+[perclass_gt_counts.mean()]+list(perclass_gt_counts[present_classes])])))
if not params['eval_only_discr']:
print('R-suc: || %s |'%(' | '.join([' %.2f' % sc for sc in [(perclass_removeSucc.sum()/perclass_cooccurence.diagonal().sum())]+[(perclass_removeSucc/perclass_cooccurence.diagonal()).mean()]+list(perclass_removeSucc/perclass_cooccurence.diagonal())])))
print('R-fal: || %s |'%(' | '.join([' %.2f' % sc for sc in [(perclass_confusion.sum()/(perclass_cooccurence.sum() - perclass_cooccurence.diagonal().sum()))]+[(perclass_confusion.sum(axis=1)/(perclass_cooccurence.sum(axis=1) - perclass_cooccurence.diagonal())).mean()]+list((perclass_confusion/perclass_cooccurence).sum(axis=1)/(perclass_cooccurence.shape[0]-1))])))
print('Percp: || %s |'%(' | '.join([' %.2f' % sc for sc in [(perclass_vgg.sum()/perclass_cooccurence.diagonal().sum())]+[(perclass_vgg/perclass_cooccurence.diagonal()).mean()]+list(perclass_vgg/perclass_cooccurence.diagonal())])))
print('pSNR : || %s |'%(' | '.join([' %.2f' % sc for sc in [(perclass_psnr.sum()/perclass_cooccurence.diagonal().sum())]+[(perclass_psnr/perclass_cooccurence.diagonal()).mean()]+list(perclass_psnr/perclass_cooccurence.diagonal())])))
print('ssim : || %s |'%(' | '.join([' %.3f' % sc for sc in [(perclass_ssim.sum()/perclass_cooccurence.diagonal().sum())]+[(perclass_ssim/perclass_cooccurence.diagonal()).mean()]+list(perclass_ssim/perclass_cooccurence.diagonal())])))
if params['computeAP']:
print('R-AP : || %s |'%(' | '.join([' %.2f' % sc for sc in [apEdited.mean()]+[apEdited.mean()]+list(apEdited)])))
if params['computeSegAccuracy']:
print('mIou : || %s |'%(' | '.join([' %.2f' % sc for sc in [(perclass_int.sum()/(perclass_union+1e-6).sum())]+[(perclass_int/(perclass_union+1e-6)).mean()]+list(perclass_int/(perclass_union+1e-6))])))
print('mRec : || %s |'%(' | '.join([' %.2f' % sc for sc in [(perclass_int.sum()/(perclass_gtsize+1e-6).sum())]+[(perclass_int/(perclass_gtsize+1e-6)).mean()]+list(perclass_int/(perclass_gtsize+1e-6))])))
print('mPrc : || %s |'%(' | '.join([' %.2f' % sc for sc in [(perclass_int.sum()/(perclass_predsize.sum()))]+[(perclass_int/(perclass_predsize+1e-6)).mean()]+list(perclass_int/(perclass_predsize+1e-6))])))
print('mSzR : || %s |'%(' | '.join([' %.2f' % sc for sc in [(perclass_predsize.sum()/(perclass_gtsize.sum()))]+[(perclass_predsize/(perclass_gtsize+1e-6)).mean()]+list(perclass_predsize/(perclass_gtsize+1e-6))])))
print('Acc : || %s |'%(' | '.join([' %.2f' % sc for sc in [(perclass_segacc.sum()/(perclass_counts.sum()))]+[(perclass_segacc/(perclass_counts+1e-6)).mean()]+list(perclass_segacc/(perclass_counts+1e-6))])))
print('mSz : || %s |'%(' | '.join([' %.1f' % sc for sc in [(100.*(perclass_predsize.sum()/(params['mask_size']*params['mask_size']*perclass_counts).sum()))]+[(100.*(perclass_predsize/(params['mask_size']*params['mask_size']*perclass_counts+1e-6))).mean()]+list((100.*perclass_predsize)/(params['mask_size']*params['mask_size']*perclass_counts+1e-6))])))
perImageRes['overall'] = {'iou': 0., 'rec': 0., 'prec':0., 'acc':0.}
perImageRes['overall']['remove_succ'] =(perclass_removeSucc/perclass_cooccurence.diagonal()).mean()
perImageRes['overall']['false_remove'] =(perclass_confusion/perclass_cooccurence).mean()
perImageRes['overall']['perceptual'] =(perclass_vgg/perclass_cooccurence.diagonal()).mean()
if params['computeSegAccuracy']:
perImageRes['overall']['iou'] =(perclass_int/(perclass_union+1e-6)).mean()
perImageRes['overall']['acc'] =(perclass_segacc/(perclass_counts+1e-6)).mean()
perImageRes['overall']['prec'] =(perclass_int/(perclass_predsize+1e-6)).mean()
perImageRes['overall']['psize'] =(perclass_predsize).mean()
perImageRes['overall']['psize_rel'] =(perclass_predsize/(perclass_gtsize+1e-6)).mean()
perImageRes['overall']['rec'] =(perclass_int/(perclass_gtsize+1e-6)).mean()
if params['computeAP']:
perImageRes['overall']['ap-orig'] = list(apR)
perImageRes['overall']['ap-edit'] = list(apEdited)
if params['dump_perimage_res']:
json.dump(perImageRes, open(join(params['dump_perimage_res'], params['split']+'_'+ basename(params['model'][0]).split('.')[0]),'w'))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--showdiff', type=int, default=0)
parser.add_argument('--showperceptionloss', type=int, default=0)
parser.add_argument('--showdeform', type=int, default=0)
parser.add_argument('--showmask', type=int, default=0)
#parser.add_argument('--showclassifier', type=int, default=0)
parser.add_argument('--showreconst', type=int, default=0)
parser.add_argument('--mask_threshold', type=float, default=0.3)
parser.add_argument('-d', '--dataset', dest='dataset', type=str, default='coco', help='dataset: celeb')
parser.add_argument('-m', '--model', type=str, default=[], nargs='+', help='checkpoint to resume training from')
parser.add_argument('-n', '--names', type=str, default=[], nargs='+', help='checkpoint to resume training from')
parser.add_argument('-b', '--batch_size', dest='batch_size', type=int, default=1, help='max batch size')
parser.add_argument('--sample_dump_dir', type=str, default='gen_samples', help='print every x iters')
parser.add_argument('--swap_attr', type=str, default='rand', help='which attribute to swap')
parser.add_argument('--split', type=str, default='val', help='which attribute to swap')
parser.add_argument('--nImages', type=int, default=-1)
parser.add_argument('--no_cuda', action='store_true', default=False, help='disables CUDA training')
parser.add_argument('--max_object_size', type=float, default=0.3)
parser.add_argument('--filter_by_mincooccur', type=float, default=-1.)
parser.add_argument('--only_indiv_occur', type=float, default=0)
parser.add_argument('--square_resize_override',type=int, default=-1)
parser.add_argument('--dump_perimage_res', type=str, default=None, help='perImageResults')
parser.add_argument('--evaluating_discr', type=str, default=None)
parser.add_argument('--eval_only_discr', type=int, default=0)
parser.add_argument('--withExtMask', type=int, default=0)
parser.add_argument('--extmask_type', type=str, default='mask')
parser.add_argument('--computeSegAccuracy', type=int, default=0)
parser.add_argument('--dump_cls_results', type=int, default=0)
parser.add_argument('--extMask_source', type=str, default='gt')
parser.add_argument('--dilateMask', type=int, default=0)
parser.add_argument('--dump_mask', type=int, default=0)
parser.add_argument('--use_same_g', type=str, default=[], nargs='+', help='Evaluation scores to visualize')
# Deformations applied to mnist images;
parser.add_argument('--randomrotate', type=int, default=0)
parser.add_argument('--randomscale', type=float, nargs='+', default=[0.5,0.5])
parser.add_argument('--image_size', type=int, default=128)
parser.add_argument('--mask_size', type=int, default=32)
parser.add_argument('--scaleDisp', type=int, default=0)
parser.add_argument('--box_size', type=int, default=64)
parser.add_argument('--computeAP', type=int, default=1)
parser.add_argument('--datafile', type=str, default='datasetBoxAnn_80pcMaxObj.json')
parser.add_argument('--compute_deform_stats', type=int, default=0)
args = parser.parse_args()
params = vars(args) # convert to ordinary dict
params['cuda'] = not args.no_cuda
print json.dumps(params, indent = 2)
gen_samples(params)
|
nilq/baby-python
|
python
|
from django.contrib import admin
from django.contrib.admin import register
from .models import Organization, Employee, Position, Phone, PhoneType
from .forms import EmployeeAdminForm
@register(Organization)
class OrganizationAdmin(admin.ModelAdmin):
pass
@register(Employee)
class EmployeeAdmin(admin.ModelAdmin):
form = EmployeeAdminForm
pass
admin.site.register(Position)
admin.site.register(Phone)
admin.site.register(PhoneType)
|
nilq/baby-python
|
python
|
import os
import subprocess
from typing import Tuple
import pandas as pd
LABEL_MAPPING = {
"Negative": 0,
"Positive": 1,
"I can't tell": 2,
"Neutral / author is just sharing information": 2,
"Tweet not related to weather condition": 2,
}
def load_data() -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame]:
if os.path.basename(os.getcwd()) != "crowdsourcing":
raise ValueError("Function must be called from crowdsourcing/ directory.")
try:
subprocess.run(["bash", "download-data.sh"], check=True, stderr=subprocess.PIPE)
except subprocess.CalledProcessError as e:
print(e.stderr.decode())
raise e
gold_labels = pd.read_csv("data/weather-evaluated-agg-DFE.csv")
gold_labels = gold_labels.set_index("tweet_id", drop=False)
labeled = gold_labels[
(gold_labels["is_the_category_correct_for_this_tweet:confidence"] == 1)
& (
(gold_labels.sentiment == "Positive")
| (gold_labels.sentiment == "Negative")
)
]
labeled = labeled.sample(frac=1, random_state=123) # Shuffle data points.
crowd_labels = pd.read_csv("data/weather-non-agg-DFE.csv")
# Keep only the tweets with available ground truth.
crowd_labels = crowd_labels.join(
labeled, on=["tweet_id"], lsuffix=".raw", rsuffix=".gold", how="inner"
)
crowd_labels = crowd_labels[["tweet_id", "worker_id", "emotion"]]
crowd_labels.emotion = crowd_labels.emotion.map(LABEL_MAPPING)
crowd_labels = crowd_labels.rename(columns=dict(emotion="label"))
crowd_labels = crowd_labels.set_index("tweet_id")
crowd_labels = crowd_labels[crowd_labels["label"] != 2]
df_dev = labeled[:50]
df_dev = df_dev[["tweet_id", "tweet_text", "sentiment"]]
df_dev.sentiment = df_dev.sentiment.map(LABEL_MAPPING).values
# Remove half the labels
crowd_labels = crowd_labels.drop(df_dev[: int(len(df_dev) / 2)].tweet_id)
df_test = labeled[50:100]
df_test = df_test[["tweet_id", "tweet_text", "sentiment"]]
df_test.sentiment = df_test.sentiment.map(LABEL_MAPPING).values
crowd_labels = crowd_labels.drop(df_test.tweet_id)
df_train = labeled[100:][["tweet_id", "tweet_text"]]
# Remove half the labels
crowd_labels = crowd_labels.drop(df_train[: int(len(df_train) / 2)].tweet_id)
return crowd_labels, df_train, df_dev, df_test
|
nilq/baby-python
|
python
|
import itertools
import random
import re
from dataclasses import dataclass
from enum import Enum
from typing import Dict, Iterable, Iterator, List, Tuple, cast
FIELD_SIZE = 10
class GameError(Exception):
"""Game error."""
class CellState(Enum):
UNKNOWN = "unknown"
EMPTY = "empty"
WOUNDED = "wounded"
DEAD = "dead"
LETTERS = "абвгдежзик"
POSITION_PATTERN = r"[" + LETTERS + r"]{1}\d{1,2}"
@dataclass(eq=True, frozen=True)
class Player:
id: str
name: str = "Nameless"
class Game:
def __init__(self, size: int = FIELD_SIZE) -> None:
self.size = size
self.players: List[Player] = []
self.fields: Dict[str, List[CellState]] = {}
self.started: bool = False
def _init_field(self) -> List[CellState]:
return [CellState.UNKNOWN for _ in range(self.size ** 2)]
def _next_player_move(self) -> Tuple[Player, Player, int]:
p1, p2 = next(self.__next_iter)
return p1, p2, 1
def _same_player_move(self) -> Tuple[Player, Player, int]:
return self.current_move[0], self.current_move[1], self.current_move[2] + 1
def start(self, players: List[Player]) -> None:
if len(players) < 2:
raise GameError("Need at least two players")
self.players = players
random.shuffle(self.players)
self.__next_iter: Iterable[Tuple[Player, Player]] = cast(
Iterable[Tuple[Player, Player]],
itertools.cycle(itertools.permutations(self.players, 2)),
)
# initiate first move
self.current_move = self._next_player_move()
for player in players:
self.fields[player.id] = self._init_field()
self.started = True
def __iter__(self) -> Iterator[Tuple[Player, Player, int]]:
return self
def __next__(self) -> Tuple[Player, Player, int]:
if not self.started:
raise GameError("Can't iter over not started game")
return self.current_move
def _translate_position(self, position: str) -> int:
position = position.strip().lower()
if not re.match(POSITION_PATTERN, position):
raise GameError("Wrong position!")
letter = position[0]
number = int(position[1:]) - 1
if number < 0 or number >= self.size:
raise GameError("Wrong position!")
return LETTERS.index(letter) * self.size + number
def attack(self, victim: Player, position: str, new_state: CellState) -> None:
if not self.started:
raise GameError("Can't attack in not started game")
int_position = self._translate_position(position)
self.fields[victim.id][int_position] = new_state
if new_state is CellState.EMPTY:
self.current_move = self._next_player_move()
elif new_state is CellState.WOUNDED or new_state is CellState.DEAD:
self.current_move = self._same_player_move()
else:
raise GameError(f"WTF? Unknown cell state: {new_state}")
|
nilq/baby-python
|
python
|
"""
Sams Teach Yourself Python in 24 Hours
by Katie Cunningham
Hour 4: Storing Text in Strings
Exercise:
1.
a) You're given a string that contains the body of an email.
If the email contains the word "emergency", print out
"Do you wnat to make this email urgent?"
If it contains the word "joke", print out
"Do you wnat to set this email as non-urgent?"
"""
#Hour 4: Storing Text in Strings
email_body = "This is an joke" #email type string
if 'Emeregency' in email_body:
importance = raw_input("Do you want to set this email as urgent? YES or NO: ")
if importance == "YES":
print ('Subject: Urgent! \n' + 'Message: ' + email_body.upper() + "!!!!!!!!!!!!") #urgent email
elif importance == "NO":
print ('Subject: Non-Urgent \n' + 'Message: ' + email_body.lower() + "!!!!!!!!!!!!") #non-urgent email
else:
print ('Subject: Regular \n' + 'Message: ' + email_body) #regular email
elif 'joke' in email_body:
importance = raw_input("Do you want to set this email as non-urgent? YES or NO: ")
if importance == "YES":
print ('Subject: Joke \n' + 'Message: ' + email_body.lower()) #non-urgent email
elif importance == "NO":
print ('Subject: Urgent \n' + 'Message: ' + email_body) #urgent email
else:
print ('Subject: Regular \n' + 'Message: ' + email_body) #regular email
|
nilq/baby-python
|
python
|
import sys
sys.path.append('../')
import codecs
from docopt import docopt
from dsm_creation.common import *
def main():
"""
Weeds Precision - as described in:
J. Weeds and D. Weir. 2003. A general framework for distributional similarity. In EMNLP.
"""
# Get the arguments
args = docopt("""Compute Weeds Precision for a list of (x, y) pairs and save their scores.
Usage:
weeds.py <testset_file> <dsm_prefix> <output_file>
<testset_file> = a file containing term-pairs, labels and relations, each line in the form
of x\ty\tlabel\trelation
<dsm_prefix> = the prefix for the pkl files for the vector space
<output_file> = where to save the results: a tab separated file with x\ty\tlabel\trelation\tscore,
where the score is Weeds Precision (for y as the hypernym of x).
""")
testset_file = args['<testset_file>']
dsm_prefix = args['<dsm_prefix>']
output_file = args['<output_file>']
# Load the term-pairs
with codecs.open(testset_file) as f_in:
test_set = [tuple(line.strip().split('\t')) for line in f_in]
# Load the vector space
vector_space = load_pkl_files(dsm_prefix)
target_index = { w : i for i, w in enumerate(vector_space.id2row) }
cooc_mat = vector_space.cooccurrence_matrix
# Compute the score for each term
with codecs.open(output_file, 'w', 'utf-8') as f_out:
for (x, y, label, relation) in test_set:
x_index, y_index = target_index.get(x, -1), target_index.get(y, -1)
score = 0.0
if x_index > -1 and y_index > -1:
x_row, y_row = cooc_mat[x_index, :], cooc_mat[y_index, :]
score = weeds_prec(x_row, y_row)
print >> f_out, '\t'.join((x, y, label, '%.5f' % score))
def weeds_prec(x_row, y_row):
"""
WeedsPrec(x -> y) = (\sigma_c \in Fx,y w_x(c)) / (\sigma_c \in Fx w_x(c))
Fx,y is the mutual contexts (non-zero entries) of x and y rows in the ppmi matrix
w_x(c) is the weight of feature c in x's feature vector, i.e. ppmi(x, c)
Fx is the row of x in the ppmi matrix
:param x_row: x's row in the co-occurrence matrix
:param y_row: y's row in the co-occurrence matrix
:return:
"""
# Get the mutual contexts: use y as a binary vector and apply dot product with x:
# If c is a mutual context, it is 1 in y_non_zero and the value ppmi(x, c) is added to the sum
# Otherwise, if it is 0 in either x or y, it adds 0 to the sum.
y_row.to_ones()
numerator = x_row.multiply(y_row).sum() # dot-product
# The sum of x's contexts (for ppmi) is the sum of x_row.
denominator = x_row.sum()
return 0.0 if denominator == 0 else numerator * (1.0 / denominator)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
class renderizar(object):
def savarLegenda(self,legenda,local_legenda):
with open(local_legenda, 'wt') as f:
f.write(legenda)
f.close()
return True
def criarArquivoHTML(self,titulo,local_video,local_legenda,token):
with open('model/template.html','rt') as f:
modelo = f.read()
f.close()
JavaScript = 'var ht = new HT({\n'\
'token: "'+token+'",\n' \
'videoEnabled: true\n'\
'});'
modelo = modelo.format(titulo=titulo,video=local_video,legenda=local_legenda,script=JavaScript)
video = 'video/'+titulo+'/video.html'
with open(video,'wt') as f:
f.write(modelo)
f.close()
with open('model/handtalk.min.js','rt') as fr:
with open('video/'+titulo+'/handtalk.min.js','wt') as fw:
fw.write(fr.read())
fw.close()
fr.close()
return True,video
|
nilq/baby-python
|
python
|
from utils import *
full_exps = []
for k, v in experiments.items():
full_exps.extend(f"{k}={vv}" for vv in v)
print(full_exps)
|
nilq/baby-python
|
python
|
from mock import patch
print(patch.object)
import mock
print(mock.patch.object)
|
nilq/baby-python
|
python
|
"""Generate a random key."""
import argparse
import os
from intervention_system.deploy import settings_key_path as default_keyfile_path
from intervention_system.util import crypto
def main(key_path):
box = crypto.generate_box()
crypto.save_box(box, key_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate a key file.')
parser.add_argument(
'-k', '--key', type=str, default=default_keyfile_path,
help='Path to save the key. Default: {}'.format(default_keyfile_path)
)
args = parser.parse_args()
main(args.key)
|
nilq/baby-python
|
python
|
############################ USED LIBRARIES ############################
from tkinter import *
from tkinter import messagebox
import os, sys, time
import mysql.connector
import queries as q
import datetime as d
import functions as f
from constants import DEBUG, HOST, USER, PASSWORD, DATABASE
############################ BUTTON HANDLERS ############################
def btn1_handler():
'''
Bus coordinates\n
'''
def btn1A_handler():
try:
mydb = mysql.connector.connect(host=HOST, user=USER, password=PASSWORD, database=DATABASE)
mycursor = mydb.cursor()
line_id = str(entry.get())
coordinates = q.get_Line_Bus_Coordinates(mycursor, line_id)
coordinates = [temp[1] for temp in coordinates]
msg = ["################\nBus position: {}".format(foo) for foo in coordinates]
messagebox.showinfo("MAP", "\n".join(msg))
mydb.close()
except Exception as e:
messagebox.showinfo("Exception raised: ", e)
window2 = Toplevel(window1)
window2.title("Enter the line_id")
window2.geometry('300x50+{}+{}'.format(window1.winfo_x()+150, window1.winfo_y()+150))
window2.resizable(width=False, height=False)
entry = Entry(window2)
entry.grid(row=1, column=1)
btnT1 = Button(window2,text='ENTER',command=btn1A_handler)
btnT1.grid(row=1, column=2)
def btn2_handler():
'''
Line Timetables\n
'''
def get_bus_stops(line_title):
'''
Extracts start and stop of line from its name\n
DEPRECATED
'''
bus_stops = line_title.split(" to ")
express_ind = bus_stops[1].find("(")
express = False
if express_ind> 0:
express = True
bus_stops[1] = bus_stops[1][:express_ind-1]
return bus_stops,express
def create_timetable(line_info, data, day_type, width=30):
rows = len(data)
bus_stops, express = get_bus_stops(line_info[1])
headers = ["Start: ", "End: "]
timetable_win = Toplevel(window1)
if express:
timetable_win.title("Line "+str(line_info[0])+" on "+day_type+" (EXPRESS)")
else:
timetable_win.title("Line "+str(line_info[0])+" on "+day_type)
timetable_win.resizable(height=False, width=False)
canvas = Canvas(timetable_win) #CANVAS
canvas.pack(side=LEFT)
scrollbar = Scrollbar(timetable_win, command=canvas.yview) #SCROLLBAR
scrollbar.pack(side=LEFT, fill='y')
canvas.configure(yscrollcommand = scrollbar.set)
canvas.bind('<Configure>', lambda event:canvas.configure(scrollregion=canvas.bbox('all')))
frame = Frame(canvas) #FRAME
canvas.create_window((0,0), window=frame, anchor='nw')
for j in range(2):
e = Entry(frame, width=width, font="Helvetica 10 bold")
e.insert(0, headers[j]+bus_stops[j])
e.configure(state="readonly")
e.grid(row=0,column=j)
for i in range(rows):
for j in range(2):
e = Entry(frame, width=width, font="Helvetica 10")
e.insert(0, str(data[i][j]))
e.configure(state="readonly")
e.grid(row=i+1, column=j)
timetable_win.mainloop()
def btn2A_handler():
try:
mydb = mysql.connector.connect(host=HOST, user=USER, password=PASSWORD, database=DATABASE)
mycursor = mydb.cursor()
line_id = str(entry1.get())
day_type = str(entry2.get())
line_info, line_itineraries = q.get_Line_Timetable(mycursor, line_id, day_type)
itin_table = [[y[1],y[2]] for y in line_itineraries]
create_timetable(line_info, itin_table, day_type)
except Exception as e:
messagebox.showinfo("Exception raised: ", e)
window2 = Toplevel(window1)
window2.title("Enter the line_id and day_type")
window2.geometry('350x60+{}+{}'.format(window1.winfo_x()+150, window1.winfo_y()+150))
window2.resizable(width=False, height=False)
entry1 = Entry(window2)
entry1.grid(row=1, column=1)
entry2 = Entry(window2)
entry2.grid(row=2, column=1)
btnT1 = Button(window2,text='ENTER',command=btn2A_handler)
btnT1.grid(row=2, column=2)
def btn3_handler():
'''
Available routes\n
'''
try:
mydb = mysql.connector.connect(host=HOST, user=USER, password=PASSWORD, database=DATABASE)
mycursor = mydb.cursor()
lines_info = q.get_All_Lines_Information(mycursor)
msg = ["################\nLine id: {} Name: {}".format(foo[0], foo[1]) for foo in lines_info]
messagebox.showinfo("Lines", "\n".join(msg))
mydb.close()
#window2.destroy()
except Exception as e:
messagebox.showinfo("Exception raised: ", e)
return
def btn4_handler():
'''
Card status\n
'''
def btn4A_handler():
try:
mydb = mysql.connector.connect(host=HOST, user=USER, password=PASSWORD, database=DATABASE)
mycursor = mydb.cursor()
card_id = str(entry.get())
current_date = d.datetime.now()
current_zone = -1
isValid, msg, zones = q.get_Cards_Status(mycursor, card_id, current_date, current_zone)
c = "Your card is: " + msg
if isValid: c += "\nIts eligible zones are: " + str(zones)
messagebox.showinfo("Card status", c)
mydb.close()
window2.destroy()
except Exception as e:
messagebox.showinfo("Exception raised:", e)
window2 = Toplevel(window1)
window2.title("Enter your card id:")
window2.geometry('290x50+{}+{}'.format(window1.winfo_x()+150, window1.winfo_y()+150))
window2.resizable(width=False, height=False)
entry = Entry(window2)
entry.grid(row=1, column=1)
btnT1 = Button(window2,text='ENTER',command=btn4A_handler)
btnT1.grid(row=1, column=2)
def btn5_handler():
'''
Closest bus stops\n
'''
def btn5A_handler():
try:
mydb = mysql.connector.connect(host=HOST, user=USER, password=PASSWORD, database=DATABASE)
mycursor = mydb.cursor()
line_id = str(entry1.get())
if line_id=='': line_id='-1'
line_id = int(line_id)
#personCoordinates = f.get_My_Coordinates()
temp = str(entry2.get()).split(",")
personCoordinates = [int(i) for i in temp]
nearest_stops = q.get_Closest_BusStops(mycursor, personCoordinates, maxDist=200, line_id=line_id)
msg = ["################\nBus stop name: {} Distance from you: {} meters".format(foo[1], int(foo[2])) for foo in nearest_stops]
messagebox.showinfo("MAP", "\n".join(msg))
mydb.close()
except Exception as e:
messagebox.showinfo("Exception raised: ", e)
window2 = Toplevel(window1)
window2.title("Enter the line_id and your position:")
window2.geometry('380x60+{}+{}'.format(window1.winfo_x()+150, window1.winfo_y()+150))
window2.resizable(width=False, height=False)
entry1 = Entry(window2)
entry1.grid(row=1, column=1)
entry2 = Entry(window2)
entry2.grid(row=2, column=1)
btnT1 = Button(window2,text='ENTER',command=btn5A_handler)
btnT1.grid(row=2, column=2)
def btn6_handler():
'''
Bus stop arrivals with ETAs\n
'''
def btn6A_handler():
try:
mydb = mysql.connector.connect(host=HOST, user=USER, password=PASSWORD, database=DATABASE)
mycursor = mydb.cursor()
bus_stop_id = str(entry.get())
current_time = d.datetime.now().time()
if d.datetime.now().weekday()<6: day_type = 'WEEKDAYS'
else: day_type = 'WEEKENDS'
etas = q.get_BusStop_Statistical_ETAs(mycursor, bus_stop_id, current_time, day_type)
msg = ["################\nLine number: {} ETA: {} minutes".format(foo[0], int(foo[1])) for foo in etas]
if len(msg)>0: messagebox.showinfo("ETAS", "\n".join(msg))
else: messagebox.showinfo("ETAS", "(no arrivals currently expected)")
mydb.close()
except Exception as e:
messagebox.showinfo("Exception raised: ", e)
window2 = Toplevel(window1)
window2.title("Enter the bus stop id:")
window2.geometry('300x50+{}+{}'.format(window1.winfo_x()+150, window1.winfo_y()+150))
window2.resizable(width=False, height=False)
entry = Entry(window2)
entry.grid(row=1, column=1)
btnT1 = Button(window2,text='ENTER',command=btn6A_handler)
btnT1.grid(row=1, column=2)
def btn7_handler():
'''
New cardholder and card\n
'''
def btn7A_handler(cardholder_id, name, surname, status, start_date, card_duration, zones):
try:
mydb = mysql.connector.connect(host=HOST, user=USER, password=PASSWORD, database=DATABASE)
mycursor = mydb.cursor()
new_card_id = q.create_New_Cardholder_and_Card(mycursor, cardholder_id, name, surname, status, start_date, card_duration, zones)
mydb.commit()
mydb.close()
msg = "# \nCard id assigned: {}".format(new_card_id)
messagebox.showinfo("New Card id", msg)
except Exception as e:
messagebox.showinfo("Exception raised: ", e)
def get_var(pointer):
window2 = Tk()
window2.title("Enter {}".format(pointer))
window2.geometry('300x50+{}+{}'.format(window1.winfo_x()+150, window1.winfo_y()+150))
window2.resizable(width=False, height=False)
entry = Entry(window2)
entry.grid(row=1, column=1)
btnT1 = Button(window2,text='ENTER',command=lambda: set_var(window2, entry, pointer))
btnT1.grid(row=1, column=2)
def set_var(mywindow, entry, pointer):
global adt, name, surname, status, card_duration, zones
temp = entry.get()
if pointer=="id":
adt = temp
get_var("name")
elif pointer=="name":
name = temp
get_var("surname")
elif pointer=="surname":
surname = temp
get_var("status")
elif pointer=="status":
status = temp
get_var("duration")
elif pointer=="duration":
card_duration = int(temp)
get_var("zones")
elif pointer=="zones":
zones = temp.split(",")
if DEBUG: print(name+", "+surname+", "+status+", "+str(card_duration)+", "+str(zones))
btn7A_handler(adt, name, surname, status, start_date, card_duration, zones)
mywindow.destroy()
adt = ""
name = ""
surname = ""
status = ""
start_date = d.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
card_duration = 0
zones = []
get_var("id")
return
def btn8_handler():
'''
Renew card\n
'''
def btn8A_handler(card_id, start_date, card_duration, zones):
#renew_Card(mycursor, card_id, start_date, card_duration, zones)
try:
mydb = mysql.connector.connect(host=HOST, user=USER, password=PASSWORD, database=DATABASE)
mycursor = mydb.cursor()
q.renew_Card(mycursor, card_id, start_date, card_duration, zones)
mydb.commit()
mydb.close()
except Exception as e:
messagebox.showinfo("Exception raised: ", e)
def get_var(pointer):
window2 = Tk()
window2.title("Enter {}".format(pointer))
window2.geometry('300x50+{}+{}'.format(window1.winfo_x()+150, window1.winfo_y()+150))
window2.resizable(width=False, height=False)
entry = Entry(window2)
entry.grid(row=1, column=1)
btnT1 = Button(window2,text='ENTER',command=lambda: set_var(window2, entry, pointer))
btnT1.grid(row=1, column=2)
def set_var(mywindow, entry, pointer):
global card_id, card_duration, zones
temp = entry.get()
if pointer=="id":
card_id = temp
get_var("duration")
elif pointer=="duration":
card_duration = int(temp)
get_var("zones")
elif pointer=="zones":
zones = temp.split(",")
if DEBUG: print(str(card_id)+", "+start_date+", "+str(card_duration)+", "+str(zones))
btn8A_handler(card_id, start_date, card_duration, zones)
mywindow.destroy()
card_id = ""
start_date = d.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
card_duration = 0
zones = []
get_var("id")
return
def btn9_handler():
'''
Line Information\n
'''
#get_Line_Information(mycursor, line_id)
def btn9A_handler():
try:
mydb = mysql.connector.connect(host=HOST, user=USER, password=PASSWORD, database=DATABASE)
mycursor = mydb.cursor()
line_id = str(entry.get())
line_info = q.get_Line_Information(mycursor, line_id)
line_id = str(line_info[0]); line_name = line_info[1]; start = line_info[2]; stop = line_info[3]
msg = ["Number: "+line_id, "Name: "+line_name, "First stop: "+start, "Last stop: "+stop]
messagebox.showinfo("Line {} information".format(line_id), "\n".join(msg))
mydb.close()
except Exception as e:
messagebox.showinfo("Exception raised: ", e)
window2 = Toplevel(window1)
window2.title("Enter the line_id")
window2.geometry('300x50+{}+{}'.format(window1.winfo_x()+150, window1.winfo_y()+150))
window2.resizable(width=False, height=False)
entry = Entry(window2)
entry.grid(row=1, column=1)
btnT1 = Button(window2,text='ENTER',command=btn9A_handler)
btnT1.grid(row=1, column=2)
return
############################ MAIN PROGRAM ############################
if __name__=="__main__":
# Setting the main window
window1 = Tk()
window1.title("Bringing U Spiti (B.U.S.)")
window1.geometry('800x600+300+150')
window1.resizable(width=False, height=False)
# Setting the background
bg_image = PhotoImage(file="background.gif")
bg_label = Label(window1, image=bg_image)
bg_label.place(x=0, y=0, relwidth=1, relheight=1)
# Creating the main buttons
HEIGHT = 1
WIDTH = 20
PADx = 3
PADy = 2
#object.place(x=110, y=0) or object.grid(row=20, column=2)
btn1 = Button(window1,text='Current Bus Positions', command=btn1_handler, height = HEIGHT, width = WIDTH)
btn1.grid(row=1, column=1, padx=PADx, pady=PADy)
btn2 = Button(window1,text='Line Timetables', command=btn2_handler, height = HEIGHT, width = WIDTH)
btn2.grid(row=1, column=2, padx=PADx, pady=PADy)
btn3 = Button(window1,text='Available routes', command=btn3_handler, height = HEIGHT, width = WIDTH)
btn3.grid(row=1, column=3, padx=PADx, pady=PADy)
btn4 = Button(window1,text='Card status', command=btn4_handler, height = HEIGHT, width = WIDTH)
btn4.grid(row=1, column=4, padx=PADx, pady=PADy)
btn5 = Button(window1,text='Nearest Stops', command=btn5_handler, height = HEIGHT, width = WIDTH)
btn5.grid(row=2, column=1, padx=PADx, pady=PADy)
btn6 = Button(window1,text='Arrivals', command=btn6_handler, height = HEIGHT, width = WIDTH)
btn6.grid(row=2, column=2, padx=PADx, pady=PADy)
btn7 = Button(window1,text='New customer', command=btn7_handler, height = HEIGHT, width = WIDTH)
btn7.grid(row=2, column=3, padx=PADx, pady=PADy)
btn8 = Button(window1,text='Renew card', command=btn8_handler, height = HEIGHT, width = WIDTH)
btn8.grid(row=2, column=4, padx=PADx, pady=PADy)
btn9 = Button(window1,text='Line Information', command=btn9_handler, height = HEIGHT, width = WIDTH)
btn9.grid(row=3, column=1, padx=PADx, pady=PADy)
window1.mainloop()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# Write a program that prints the reverse-complement of a DNA sequence
# You must use a loop and conditional
dna = 'ACTGAAAAAAAAAAA'
rcdna = ''
for i in range(len(dna) -1, -1, -1):
nt = dna[i]
if nt == 'A': nt = 'T'
elif nt == 'T': nt = 'A'
elif nt == 'C': nt = 'G'
elif nt == 'G': nt = 'C'
else : nt = 'N'
rcdna += nt
print(rcdna)
# for nt in dna[::-1]: print(nt) <-- reverse order
"""
python3 anti.py
TTTTTTTTTTTCAGT
"""
|
nilq/baby-python
|
python
|
from math import sqrt
from numpy import matrix
from intpm import intpm
A = matrix([[1, 0, 1, 0], [0, 1, 0, 1]])
b = matrix([1, 1]).T
c = matrix([-1, -2, 0, 0]).T
mu = 100
x1 = 0.5 * (-2 * mu + 1 + sqrt(1 + 4*mu*mu))
x2 = 0.5 * (-mu + 1 + sqrt(1 + mu * mu))
x0 = matrix([x1, x2, 1 - x1, 1 - x2]).T
intpm(A, b, c, x0, mu)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 21 10:57:29 2020
@author: cheritie
"""
# modules for the KL basis computation:
import numpy as np
from astropy.io import fits as pfits
from AO_modules.tools.tools import createFolder
import AO_modules.calibration.ao_cockpit_psim as aou
def compute_M2C(telescope, atmosphere, deformableMirror, param, nameFolder = None, nameFile = None,remove_piston = False,HHtName = None, baseName = None, SpM_2D = None, nZer = 3, SZ=None, mem_available = None, NDIVL = None, computeSpM = True, ortho_spm = True, computeSB = True, computeKL = True, minimF = False, P2F = None, alpha = None, beta = None, nmo = None, IF_2D = None, IFma = None, returnSB = False, returnHHt = False, extra_name = ''):
"""
- HHtName = None extension for the HHt Covariance file
- baseName = None extension to the filename for basis saving
- SpM_2D = None 2D Specific modes [dim,dim,nspm], if None then automatic
- nZer = 3 number of zernike (PTT,...) for automatic computation
- SZ = None telescope.resolutione of FFts for HHt (By default SZ=2*dim)
- mem_available = None Memory allocated for HHt computation (default is 50GB)
- NDIVL = None Subdiv. of HHt task in ~NDIVL**2. None:-> mem_available
- computeSpM = True Flag to compute Specific modes
- ortho_spm = True Flag to orthonormalize specific modes (QR decomposition)
- computeSB = True Flag to compute the Seed Basis
- computeKL = True Flag to compute the KL basis
- minimF = False Flag to minimize Forces
- P2F = None Stiffness matrix (loaded by default)
- alpha = None Force regularization parameter (expert)
- beta = None Position damping parameter (expert)
- nmo = None Number of modes to compute
- IF_2D = None 2D Influence Functions (only for speeding up)
- IFma = None Serial Influence Functions (only for speeding up)
- returnSB = False Flag to return also the Seed Basis (w/ or w/o KL)
"""
if nmo is None:
nmo=param['nModes']
if deformableMirror.isM4:
initName = 'M2C_M4_'
else:
initName = 'M2C_'
if baseName is not None:
initName = initName + baseName+'_'
if nameFolder is None:
nameFolder = param['pathInput']
createFolder(nameFolder)
if nameFile is None:
try:
nameFile = initName + str(param['resolution'])+'_res'+param['extra']+extra_name
except:
nameFile = initName + str(param['resolution'])+'_res'+extra_name
# the function takes as an input an object with obj.tel, obj.atm,obj.
diameter = telescope.D
r0 = atmosphere.r0
L0 = atmosphere.L0
pupil = telescope.pupil
telescope.isPaired = False # separate from eventual atmosphere
if IF_2D is None:
deformableMirror.coefs = np.eye(deformableMirror.nValidAct) # assign dm coefs to get the cube of IF in OPD
print('COMPUTING TEL*DM...')
print(' ')
telescope*deformableMirror # propagate to get the OPD of the IFS after reflection
print('PREPARING IF_2D...')
print(' ')
IF_2D = np.moveaxis(telescope.OPD,-1,0)
nact = IF_2D.shape[0]
print('Computing Specific Modes ...')
print(' ')
GEO = aou.mkp(telescope.resolution/telescope.resolution*diameter,telescope.resolution,diameter,0.)
if nZer is not None and SpM_2D is None:
SpM_2D = aou.give_zernike(GEO, diameter, nZer)
nspm = nZer
if SpM_2D is not None:
nspm=SpM_2D.shape[2]
if SZ is None:
SZ = int(2*telescope.resolution) ## SZ=1110 for dxo=0.06944 and SZ=1542 for dxo=0.05
print('COMPUTING VON KARMAN 2D PSD...')
print(' ')
PSD_atm , df, pterm = aou.VK_DSP_up(diameter,r0,L0,SZ,telescope.resolution,1,pupil)
#%% ---------- EVALUATE SPLIT OF WORK UPON MEMORY AVAILABLE ----------
#%% ----------COMPUTE HHt COVARIANCE MATRIX (OR LOAD EXISTING ONE) ----------
#pdb.set_trace()
try:
#HHt, PSD_atm, df = aou.load(nameFolder+'HHt_PSD_df_'+HHtName+'_r'+str(r0)+'_SZ'+str(SZ)+'.pkl')
HHt, PSD_atm, df = aou.load(nameFolder+'HHt_PSD_df_'+HHtName+'.pkl')
print('LOADED COV MAT HHt...')
print(' ')
except:
print('COMPUTING COV MAT HHt...')
print(' ')
#pdb.set_trace()
if mem_available is None:
mem_available=100.e9
if NDIVL is None:
mem,NDIVL=aou.estimate_ndivl(SZ,telescope.resolution,nact,mem_available)
if NDIVL == 0:
NDIVL = 1
BLOCKL=nact//NDIVL
REST=nact-BLOCKL*NDIVL
HHt = aou.DO_HHt(IF_2D,PSD_atm,df,pupil,BLOCKL,REST,SZ,0)
try:
aou.save(nameFolder+'HHt_PSD_df_'+HHtName+'.pkl',[HHt, PSD_atm, df])
except:
aou.save(nameFolder+'HHt_PSD_df_'+initName+'r'+str(r0)+'_SZ'+str(SZ)+'.pkl',[HHt, PSD_atm, df])
#%% ----------PRECOMPUTE MOST USED QUANTITIES ----------
if computeSpM == True or computeSB == True or computeKL == True:
## VALID OPD POINTS IN PUPIL
idxpup=np.where(pupil==1)
tpup=len(idxpup[0])
## Matrix of serialized IFs
if IFma is None:
print('SERIALIZING IFs...')
print(' ')
IFma=np.matrix(aou.vectorifyb(IF_2D,idxpup))
## Matrix of serialized Special modes
print('SERIALIZING Specific Modes...')
print(' ')
Tspm=np.matrix(aou.vectorify(SpM_2D,idxpup))
## CROSS-PRODUCT OF IFs
print('COMPUTING IFs CROSS PRODUCT...')
print(' ')
DELTA=IFma.T @ IFma
#%% ----------COMPUTE SPECIFIC MODES BASIS ----------
if minimF == True:
if P2F is None:
P2F=np.float64(pfits.getdata(param['pathInput']+'P2F.fits'))*1.e6 #( in N/m)
P2Ff=np.zeros([nact,nact],dtype=np.float64)
nap=nact//6
for k in range(0,6):
P2Ff[k*nap:(k+1)*nap,k*nap:(k+1)*nap] = P2F.copy()
K=np.asmatrix(P2Ff)
del P2Ff
if alpha is None:
alpha = 1.e-12
if beta is None:
beta=1.e-6
if computeSpM == True and minimF == True:
print('BUILDING FORCE-OPTIMIZED SPECIFIC MODES...')
print(' ')
check=1
amp_check=1.e-6
SpM = aou.build_SpecificBasis_F(Tspm,IFma,DELTA,K,alpha,ortho_spm,check,amp_check)
# SpM_opd = IFma @ SpM
print('CHECKING ORTHONORMALITY OF SPECIFIC MODES...')
print(' ')
DELTA_SpM_opd = SpM.T @ DELTA @ SpM
print('Orthonormality error for SpM = ', np.max(np.abs(DELTA_SpM_opd/tpup-np.eye(nspm))))
if computeSpM == True and minimF == False:
check=1
amp_check=1.e-6
lim=1.e-3
SpM = aou.build_SpecificBasis_C(Tspm,IFma,DELTA,lim,ortho_spm,check,amp_check)
print('CHECKING ORTHONORMALITY OF SPECIFIC MODES...')
print(' ')
DELTA_SpM_opd = SpM.T @ DELTA @ SpM
print('Orthonormality error for SpM = ', np.max(np.abs(DELTA_SpM_opd/tpup-np.eye(nspm))))
#%% ----------COMPUTE SEED BASIS ----------
if computeKL == True:
computeSB = True
if computeSB == True:
#pdb.set_trace()
if minimF == False:
print('BUILDING SEED BASIS ...')
print(' ')
lim=1.e-3
SB = aou.build_SeedBasis_C(IFma, SpM,DELTA,lim)
nSB=SB.shape[1]
DELTA_SB = SB.T @ DELTA @ SB
print('Orthonormality error for '+str(nSB)+' modes of the Seed Basis = ',np.max(np.abs(DELTA_SB[0:nSB,0:nSB]/tpup-np.eye(nSB))))
if minimF == True:
print('BUILDING FORCE OPTIMIZED SEED BASIS ...')
print(' ')
SB = aou.build_SeedBasis_F(IFma, SpM, K, beta)
nSB=SB.shape[1]
DELTA_SB = SB.T @ DELTA @ SB
print('Orthonormality error for '+str(nmo)+' modes of the Seed Basis = ',np.max(np.abs(DELTA_SB[0:nmo,0:nmo]/tpup-np.eye(nmo))))
if computeKL == False:
BASIS=np.asmatrix(np.zeros([nact,nspm+nSB],dtype=np.float64))
BASIS[:,0:nspm] = SpM
BASIS[:,nspm:] = SB
if remove_piston == True:
BASIS = np.asarray(BASIS[:,1:])
print('Piston removed from the modal basis!' )
#%% ----------COMPUTE KL BASIS ----------
if computeKL == True:
check=1
if nmo>SB.shape[1]:
print('WARNING: Number of modes requested too high, taking the maximum value possible!')
nmoKL = SB.shape[1]
else:
nmoKL = nmo
KL=aou.build_KLBasis(HHt,SB,DELTA,nmoKL,check)
#pdb.set_trace()
DELTA_KL = KL.T @ DELTA @ KL
print('Orthonormality error for '+str(nmoKL)+' modes of the KL Basis = ',np.max(np.abs(DELTA_KL[0:nmoKL,0:nmoKL]/tpup-np.eye(nmoKL))))
BASIS=np.asmatrix(np.zeros([nact,nspm+nmoKL],dtype=np.float64))
BASIS[:,0:nspm] = SpM
BASIS[:,nspm:] = KL
if remove_piston == True:
BASIS = np.asarray(BASIS[:,1:])
print('Piston removed from the modal basis!' )
# save output in fits file
hdr=pfits.Header()
hdr['TITLE'] = initName+'_KL' #'M4_KL'
empty_primary = pfits.PrimaryHDU(header=hdr)
## CAREFUL THE CUBE IS SAVED AS A NON SPARSE MATRIX
primary_hdu = pfits.ImageHDU(BASIS)
hdu = pfits.HDUList([empty_primary, primary_hdu])
hdu.writeto(nameFolder+nameFile+'.fits',overwrite=True)
return BASIS
if returnSB == True:
hdr=pfits.Header()
hdr['TITLE'] = initName+'_SB' #'M4_KL'
empty_primary = pfits.PrimaryHDU(header=hdr)
## CAREFUL THE CUBE IS SAVED AS A NON SPARSE MATRIX
primary_hdu = pfits.ImageHDU(BASIS)
hdu = pfits.HDUList([empty_primary, primary_hdu])
hdu.writeto(nameFolder+nameFile+'.fits',overwrite=True)
return BASIS,SB
|
nilq/baby-python
|
python
|
from collections import deque
from re import sub
from sys import stderr
p1 = deque([], maxlen = 52)
p2 = deque([], maxlen = 52)
n = int(input()) # the number of cards for player 1
for i in range(n):
p1.append(input()) # the n cards of player 1
m = int(input()) # the number of cards for player 2
for i in range(m):
p2.append(input()) # the m cards of player 2
# Sorted from lowest rank to highest.
ranks = ['2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K', 'A']
# Decks only when case of war.
p1_war = []
p2_war = []
rounds = 0
try:
while p1 and p2:
rounds += 1
c1 = p1.popleft()
v1 = sub('[DCHS]', '', c1)
print('Card for P1: {}'.format(c1), file = stderr)
c2 = p2.popleft()
v2 = sub('[DCHS]', '', c2)
print('Card for P2: {}'.format(c2), file = stderr)
# Order is : P1 cards always first, then P2 cards.
result = [card for card in p1_war] + [c1] + [card for card in p2_war] + [c2]
print('The card that will be won: {}'.format(result), file = stderr)
if ranks.index(v1) > ranks.index(v2):
print('P1 won this turn.', file = stderr)
p1.extend(result)
p1_war, p2_war, result = [], [], []
elif ranks.index(v1) < ranks.index(v2):
print('P2 won this turn.', file = stderr)
p2.extend(result)
p1_war, p2_war, result = [], [], []
else:
print('WAR !', file = stderr)
# Exception to short-circuit the flow.
if len(p1) <= 3 or len(p2) <= 3:
raise ValueError
p1_war.append(c1)
p2_war.append(c2)
for x in range(3):
p1_war.append(p1.popleft())
p2_war.append(p2.popleft())
rounds -= 1
print('At end of turn {} for P1: {}'.format(rounds, p1), file = stderr)
print('At end of turn {} for P2: {}\n'.format(rounds, p2), file = stderr)
if p1:
print('1 {}'.format(rounds))
else:
print('2 {}'.format(rounds))
except ValueError:
print('PAT')
|
nilq/baby-python
|
python
|
import operator
from pandas import notnull, isnull
from zeex.core.utility.collection import Eval
STRING_TO_OP = {
'+' : operator.add,
'-' : operator.sub,
'*' : operator.mul,
'/' : operator.truediv,
'%' : operator.mod,
'^' : operator.xor,
'>' : operator.ge,
'<' : operator.le,
'==': operator.eq,
'&' : operator.and_,
'AND': operator.and_,
'OR': operator.xor
}
NULL_VALUES = ['nan', 'na', 'none', 'null', '']
def is_like(x, y):
if x in y:
return True
return False
def not_like(x, y):
if x not in y:
return True
return False
def is_null(x):
return isnull(x) or str(x).lower() in NULL_VALUES
def not_null(x):
return notnull(x) and str(x).lower() not in NULL_VALUES
WORD_TO_OP = {'IS LIKE':is_like,
'IS NOT LIKE': not_like,
'IS NULL': is_null,
'IS NOT NULL': not_null}
WORD_TO_MAP = {k: k.replace(' ', '__') for k in WORD_TO_OP.keys()}
class CriteriaParser(object):
def __init__(self):
self.options = STRING_TO_OP.copy()
self.words = WORD_TO_OP.copy()
def parse(self, string):
string = string.upper().lstrip().rstrip()\
.replace(' ', ' ').replace('""', '').replace("''", "")
for w in self.words.keys():
# Normalize word operators replacing spaces with '__'
piece = w.replace(' ', '__')
string = string.replace(w, piece)
for o in self.options.keys():
# Normalize spacing around operators.
piece = " {} ".format(o)
string = string.replace(piece, o)
string = string.replace(o, piece)
data = list(string.split(' ')) # Split string by spaces
new_data = []
for d in data:
try:
# Try a direct match
new_data.append(self.options[d])
except KeyError:
try:
# Try to map the word to a word operator.
op_word = self.words[d.replace('__', ' ')]
new_data.append(op_word)
except KeyError:
# No parsing done on this word, append it to the list.
new_data.append(d)
return new_data
def evaluate(self, data: list):
if len(data) == 3:
op1, op, op2 = data
return op(op1, op2)
elif len(data) == 2:
op1, op = data
return op(op1)
return None
cp = CriteriaParser()
tests = ['ID > 10', 'FIELD == YO', "CHECK < MATE", "CAT IS LIKE DOG",
"'' IS NOT NULL", "'' IS NULL", "CAT IS LIKE CAT"]
for t in tests:
p = cp.parse(t)
e = cp.evaluate(p)
print("ORIGINAL: {}\n {}: {}".format(t, p, e))
|
nilq/baby-python
|
python
|
# tensorflow core
import tensorflow as tf
# a=3,b=5,(a+b)+(a*b)=?
node1 = tf.constant(3.0)
node2 = tf.constant(4.0)
print("node1:", node1, "\nnode2:", node2)
sess = tf.Session()
print("sess.run([node1,node2]):", sess.run([node1, node2]))
node3 = tf.add(node1, node2)
print("node3:", node3)
print("sess.run(node3):", sess.run(node3))
a = tf.placeholder(tf.float32)
b = tf.placeholder(tf.float32)
adder_node = a + b
print("sess.run(adder_node,{a:3,b:4.5}):",
sess.run(adder_node, {a: 3, b: 4.5}))
print("sess.run(adder_node,{a:[1,3],b:[2,4]}):",
sess.run(adder_node, {a: [1, 3], b: [2, 4]}))
adderandtriple = adder_node * 3
print("sess.run(adderandtriple,{a:3,b:4.5}):",
sess.run(adderandtriple, {a: 3, b: 4.5}))
W = tf.Variable([.3], dtype=tf.float32)
b = tf.Variable([-.3], dtype=tf.float32)
x = tf.placeholder(dtype=tf.float32)
linearmodel = tf.matmul(x, W) + b
initv = tf.global_variables_initializer()
sess.run(initv)
print("sess.run(linearmodel,{x:[1,2,3,4]}):",
sess.run(linearmodel, {x: [1, 2, 3, 4]}))
y = tf.placeholder(tf.float32)
squareddeltas = tf.square(linearmodel - y)
loss = tf.reduce_mean(squareddeltas)
print("sess.run(loss,{x:[1,2,3,4],y:[0,-1,-2,-3]}):",
sess.run(loss, {x: [1, 2, 3, 4], y: [0, -1, -2, -3]}))
fixW = tf.assign(W, [-1.])
fixb = tf.assign(b, [1.])
sess.run([fixW, fixb])
print("sess.run(loss,{x:[1,2,3,4],y:[0,-1,-2,-3]}):",
sess.run(loss, {x: [1, 2, 3, 4], y: [0, -1, -2, -3]}))
opti = tf.train.GradientDescentOptimizer(0.01)
train = opti.minimize(loss)
for i in range(1000):
sess.run(train, {x: [1, 2, 3, 4], y: [0, -1, -2, -3]})
print("sess.run([W,b]):", sess.run([W, b]))
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# encoding: utf-8
"""
display.py is the portion of Death God that manages the screen.
by William Makley
"""
import pygame
from pygame.locals import Rect
from . import colors
from . import settings
from . import fonts
from .map_view import MapView
from . import message
from .status_view import StatusView
from . import event
from . import tile
from .view import View
(ASCII, TILES) = list(range(2))
class Screen(View):
"""Wraps the pygame screen in a View."""
def __init__(self):
self.__window = pygame.display.set_mode(settings.screen_dimensions)
pygame.display.set_caption(settings.window_caption)
screen = pygame.display.get_surface()
View.__init__(self,
None,
Rect(0, 0, screen.get_width(), screen.get_height()),
colors.black,
screen)
event.add_handler(self.handle_display_needs_update, event.DisplayNeedsUpdate)
@property
def window(self):
return self.__window
def refresh(self):
pygame.display.flip()
def paint(self):
View.paint(self)
self.refresh()
def handle_display_needs_update(self, _):
self.paint()
screen = Screen()
class Display(View):
"""The display class.
It is a special type of View that paints directly to the screen
instead of a parent view.
"""
def __init__(self, player_object, map_object):
self.player_object = player_object
self.map_object = map_object
View.__init__(self,
parent=screen,
rect=Rect(0, 0, screen.width, screen.height),
clear_color=colors.black)
# whoops, so much for the surface made by View.__init__
# set up event handlers
event.add_handler(
self.handle_game_state_changed,
event.GameStateChanged
)
self.set_clear_on_paint(True)
self.map_view = MapView(
self,
Rect(settings.map_view_position, settings.map_view_size),
settings.map_center_color,
settings.num_tiles_displayed
)
self.map_view.sprite_list = tile.get_tile_sprites(self.map_view.tile_size)
self.message_view = message.MessageView(
self,
Rect(settings.message_view_position, settings.message_view_size),
settings.message_view_bg_color,
fonts.Font(settings.font_regular, settings.message_text_size)
)
self.status_view = StatusView(
self,
Rect(settings.status_view_position, settings.status_view_size),
settings.status_view_color,
pygame.font.Font(settings.font_regular, settings.status_view_font_size),
pygame.font.Font(settings.font_regular, settings.status_view_font_size),
colors.green, colors.yellow
)
# self.t_area = text_views.WrappedTextArea(
# parent = self,
# rect = Rect(100, 100, 200, 200),
# background = colors.black,
# font = fonts.regular,
# padding = (10,10,10,10)
# )
# self.add_view(self.t_area)
#
# self.t_area.add_string(
# string = "The quick brown fox jumped over the lazy dog.",
# indent = 4,
# color = colors.white,
# background = (50, 50, 50)
# )
#
# self.t_area.newline(2)
#
# self.t_area.add_string("Also I am really smart.",
# 0, colors.blue)
#
# self.t_area.add_string("The quick brown fox jumped over the lazy dog\n and there was much rejoicing.")
# event.DisplayInited().dispatch()
from .ui.menu import Menu
test_menu = Menu(self)
# new
def update(self):
self.map_view.update(self.player_object, self.map_object)
self.status_view.update(self.player_object)
self.parent.paint()
#old
def handle_game_state_changed(self, evt):
self.map_view.update(evt.player_state, evt.map_state)
self.status_view.update(evt.player_state)
self.parent.paint()
if __name__ == "__main__":
print(__doc__)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# coding: utf-8
import re
import logging
from collections import defaultdict
from abc import ABC, abstractmethod
from . import strutil
REPLACER = "**"
REPLACER_HEAD = "*"
REPLACER_TAIL = "*"
REPLACER_REGEX = re.compile(r"\*[A-Z]*?\*") # shortest match
ANONYMIZED_DESC = "##"
_logger = logging.getLogger(__package__)
class LTTable:
def __init__(self):
self._ltdict = {}
def __iter__(self):
return self._generator()
def _generator(self):
for ltid in self._ltdict:
yield self._ltdict[ltid]
def __len__(self):
return len(self._ltdict)
def __getitem__(self, key):
assert isinstance(key, int)
if key not in self._ltdict:
raise IndexError("index out of range")
return self._ltdict[key]
def next_ltid(self):
cnt = 0
while cnt in self._ltdict:
cnt += 1
else:
return cnt
def restore_lt(self, ltid, ltgid, ltw, lts, count):
assert ltid not in self._ltdict
self._ltdict[ltid] = LogTemplate(ltid, ltgid, ltw, lts, count)
def add_lt(self, ltline):
assert ltline.ltid not in self._ltdict
self._ltdict[ltline.ltid] = ltline
def update_lt(self, ltobj):
assert ltobj.ltid in self._ltdict
self._ltdict[ltobj.ltid] = ltobj
def remove_lt(self, ltid):
self._ltdict.pop(ltid)
class LogTemplate:
def __init__(self, ltid, ltgid, ltw, lts, count):
if len(ltw) == 0:
raise ValueError("empty ltw, failed to generate LogTemplate")
self.ltid = ltid
self.ltgid = ltgid
self.ltw = ltw
self.lts = lts
self.count = count
def __iter__(self):
return self.ltw
def __str__(self):
return self.restore_message(self.ltw)
def get(self, key):
if key == "ltid":
return self.ltid
elif key == "ltgid":
return self.ltgid
else:
raise KeyError
def desc(self):
return [w for w in self.ltw if w != REPLACER]
def var(self, l_w):
if len(l_w) == 0:
return [REPLACER for w in self.ltw if w == REPLACER]
else:
return [w_org for w_org, w_lt in zip(l_w, self.ltw)
if w_lt == REPLACER]
def var_location(self):
return [i for i, w_lt in enumerate(self.ltw) if w_lt == REPLACER]
def restore_message(self, l_w, esc=False):
if l_w is None or len(l_w) == 0:
l_w = self.ltw
if esc:
l_w = [w for w in l_w]
else:
l_w = [strutil.restore_esc(w) for w in l_w]
if self.lts is None:
return "".join(l_w)
else:
return "".join([s + w for w, s in zip(l_w + [""], self.lts)])
def increment(self):
self.count += 1
return self.count
def replace(self, l_w, l_s=None, count=None):
self.ltw = l_w
if l_s is not None:
self.lts = l_s
if count is not None:
self.count = count
class TemplateTable:
"""Temporal template table for log template generator."""
def __init__(self):
self._d_tpl = {} # key = tid, val = template
self._d_rtpl = {} # key = key_template, val = tid
self._d_ltid = {} # key = tid, val = ltid
self._d_cand = defaultdict(list) # key = tid, val = List[ltid]
self._last_modified = None # used for LTGenJoint
def __str__(self):
ret = []
for tid, tpl in self._d_tpl.items():
ret.append(" ".join([str(tid)] + tpl))
return "\n".join(ret)
def __iter__(self):
return self._generator()
def _generator(self):
for tid in self._d_tpl:
yield self._d_tpl[tid]
def __getitem__(self, key):
assert isinstance(key, int)
if key not in self._d_tpl:
raise IndexError("index out of range")
return self._d_tpl[key]
def __len__(self):
return len(self._d_tpl)
def next_tid(self):
cnt = 0
while cnt in self._d_tpl:
cnt += 1
else:
return cnt
def tids(self):
return self._d_tpl.keys()
@staticmethod
def _key_template(template):
# l_word = [strutil.add_esc(w) for w in template]
# return "@".join(l_word)
return tuple(template)
def exists(self, template):
key = self._key_template(template)
return key in self._d_rtpl
def get_tid(self, template):
key = self._key_template(template)
return self._d_rtpl[key]
def get_template(self, tid):
return self._d_tpl[tid]
def add(self, template):
tid = self.next_tid()
self._d_tpl[tid] = template
self._d_rtpl[self._key_template(template)] = tid
return tid
def replace(self, tid, template):
self._last_modified = self._d_tpl[tid]
self._d_tpl[tid] = template
self._d_rtpl[self._key_template(template)] = tid
def get_updated(self):
return self._last_modified
def add_ltid(self, tid, ltid):
self._d_ltid[tid] = ltid
def get_ltid(self, tid):
return self._d_ltid[tid]
# def getcand(self, tid):
# return self._d_cand[tid]
#
# def addcand(self, tid, ltid):
# self._d_cand[tid].append(ltid)
def load(self, obj):
self._d_tpl, self._d_cand = obj
for tid, tpl in self._d_tpl.items():
self._d_rtpl[self._key_template(tpl)] = tid
def dumpobj(self):
return self._d_tpl, self._d_cand
class LTGen(ABC):
state_added = 0
state_changed = 1
state_unchanged = 2
def __init__(self, table):
if table is None:
self._table = TemplateTable()
else:
self._table = table
def is_stateful(self):
return True
def get_tpl(self, tid):
return self._table[tid]
def add_tpl(self, ltw, tid=None):
if tid is None:
tid = self._table.add(ltw)
return tid
def update_tpl(self, ltw, tid):
self._table.replace(tid, ltw)
def merge_tpl(self, l_w, tid):
old_tpl = self._table[tid]
new_tpl = merged_template(old_tpl, l_w)
if old_tpl == new_tpl:
return self.state_unchanged
else:
self.update_tpl(new_tpl, tid)
return self.state_changed
def update_table(self, tpl):
if tpl is None:
return None, None
elif self._table.exists(tpl):
tid = self._table.get_tid(tpl)
return tid, self.state_unchanged
else:
tid = self._table.add(tpl)
return tid, self.state_added
def preprocess(self, plines):
"""This function do the pre-process for given data if needed.
This function is called by process_init_data."""
pass
def process_offline(self, d_pline):
"""If there is no need of special process for init phase,
this function simply call process_line multiple times.
"""
self.preprocess(d_pline.values())
d = {}
for mid, pline in d_pline.items():
tid, state = self.process_line(pline)
d[mid] = tid
return d
@abstractmethod
def process_line(self, pline):
"""Estimate log template for given message.
This method works in incremental processing phase.
Args:
pline (dict): parsed log message with log2seq
Returns:
tid (int): A template id in TemplateTable.
state (int)
"""
raise NotImplementedError
def postprocess(self, plines):
"""This function do the post-process for given data if needed.
This function is called by process_init_data."""
pass
@abstractmethod
def load(self, loadobj):
raise NotImplementedError
@abstractmethod
def dumpobj(self):
raise NotImplementedError
class LTGenOffline(LTGen, ABC):
@abstractmethod
def process_offline(self, plines):
raise NotImplementedError
def process_line(self, pline):
msg = "offline LTGen does not support incremental processing"
raise RuntimeError(msg)
def load(self, _):
# offline LTGen does not support suspension and restart
pass
def dumpobj(self):
# offline LTGen does not support suspension and restart
return None
class LTGenStateless(LTGen, ABC):
"""Subclasses of LTGenStateless is acceptable
for multiprocessing in offline template generation."""
@abstractmethod
def generate_tpl(self, pline):
raise NotImplementedError
def is_stateful(self):
return False
def process_line(self, pline):
tpl = self.generate_tpl(pline)
return self.update_table(tpl)
def load(self, loadobj):
# stateless
pass
def dumpobj(self):
# stateless
return None
class LTGenJoint(LTGen):
def __init__(self, table: TemplateTable, l_ltgen, ltgen_import_index=None):
super().__init__(table)
self._l_ltgen = l_ltgen
self._import_index = ltgen_import_index
class_names = [c.__class__.__name__ for c in self._l_ltgen]
_logger.warning("LTGenJoint init with {0}".format(class_names))
def is_stateful(self):
return any([ltgen.is_stateful() for ltgen in self._l_ltgen])
def _update_ltmap(self, pline, index, tid, state):
from .lt_import import LTGenImport
if (self._import_index is not None) and \
(index != self._import_index):
ltgen_import: LTGenImport = self._l_ltgen[self._import_index]
new_tpl = self._table.get_template(tid)
if state == self.state_added:
ltgen_import.add_definition(new_tpl)
msg = "LTGenJoint new template: {0}".format(new_tpl)
_logger.debug(msg)
elif state == self.state_changed:
old_tpl = self._table.get_updated()
ltgen_import.update_definition(old_tpl, new_tpl)
msg = "LTGenJoint template update: {0} -> {1}".format(
old_tpl, new_tpl)
_logger.debug(msg)
def process_line(self, pline):
_logger.debug("LTGenJoint input: {0}".format(pline["words"]))
for index, ltgen in enumerate(self._l_ltgen):
tid, state = ltgen.process_line(pline)
if tid is not None:
msg = ("LTGenJoint: method {0} ".format(index) +
"successfully generate a template")
_logger.debug(msg)
self._update_ltmap(pline, index, tid, state)
return tid, state
else:
msg = "Template not matched/generated: {0}".format(pline["words"])
_logger.debug(msg)
return None, None
def load(self, loadobj):
for ltgen, ltgen_data in zip(self._l_ltgen, loadobj):
ltgen.load(ltgen_data)
def dumpobj(self):
return [ltgen.dumpobj() for ltgen in self._l_ltgen]
#class LTGenMultiProcess(LTGenOffline):
#
# _ltgen: LTGenStateless
#
# def __init__(self, table: TemplateTable, n_proc, kwargs):
# super().__init__(table)
# self._n_proc = n_proc
## self._ltgen = init_ltgen(**kwargs)
## assert not self._ltgen.is_stateful(), \
## "multiprocessing is limited to stateless methods"
# assert(n_proc < 20)
#
# from multiprocessing import Pool
# self._pool = Pool(processes=self._n_proc,
# initializer=self._pool_init, initargs=(kwargs,))
#
# @staticmethod
# def _pool_init(ltgen_kwargs):
# global _LTGEN_MP_LOCAL
# _LTGEN_MP_LOCAL = init_ltgen(**ltgen_kwargs)
# assert not _LTGEN_MP_LOCAL.is_stateful(), \
# "multiprocessing is limited to stateless methods"
#
# @staticmethod
# def _pool_task(args):
# ret = []
# for message_id, pline in args:
# template = _LTGEN_MP_LOCAL.generate_tpl(pline)
# ret.append((message_id, template))
# return ret
#
# #message_id, pline = args
# #template = _LTGEN_MP_LOCAL.generate_tpl(pline)
# #return message_id, template
#
# def _process_offline_pool(self, plines):
# l_tmp_args = [(mid, pline)
# for mid, pline in enumerate(plines)]
# l_args = np.array_split(l_tmp_args, self._n_proc * 10)
# try:
# d_tpl = {}
# for ret in self._pool.imap_unordered(self._pool_task, l_args):
# for mid, tpl in ret:
# d_tpl[mid] = tpl
# self._pool.close()
# except KeyboardInterrupt:
# self._pool.terminate()
# exit()
# else:
# ret = {}
# for mid, tpl in d_tpl.items():
# tid, _ = self.update_table(tpl)
# ret[mid] = tid
# return ret
#
# def process_offline(self, plines):
# return self._process_offline_pool(plines)
class LTGroup(ABC):
def __init__(self):
self._init_dict()
def _init_dict(self):
self._d_group = {} # key : groupid, val : [ltline, ...]
self._d_rgroup = {} # key : ltid, val : groupid
def _next_groupid(self):
cnt = 0
while cnt in self._d_group:
cnt += 1
else:
return cnt
@abstractmethod
def make(self) -> LTTable:
raise NotImplementedError
def add_lt(self, gid, ltline):
self._d_group.setdefault(gid, []).append(ltline)
self._d_rgroup[ltline.ltid] = gid
def restore_ltg(self, db, table):
for ltid, ltgid in db.iter_ltg_def():
self._d_group.setdefault(ltgid, []).append(table[ltid])
self._d_rgroup[ltid] = ltgid
def update_lttable(self, lttable):
for ltid, ltgid in self._d_rgroup.items():
lttable[ltid].ltgid = ltgid
return lttable
def load(self, loadobj):
pass
def dumpobj(self):
return None
class LTGroupOnline(LTGroup, ABC):
def __init__(self, lttable):
super().__init__()
self.lttable = lttable
@abstractmethod
def add(self, ltline):
"""Returns ltgid"""
raise NotImplementedError
def make(self):
self.remake_all()
return self.lttable
def remake_all(self):
self._init_dict()
for ltline in self.lttable:
ltgid = self.add(ltline)
ltline.ltgid = ltgid
class LTGroupDummy(LTGroupOnline):
# This class will work as a dummy
# (ltgid is always same as ltid)
def add(self, ltline):
gid = ltline.ltid
self.add_lt(gid, ltline)
return gid
class LTGroupOffline(LTGroup, ABC):
def __init__(self, lttable):
super().__init__()
self.lttable = lttable
self._n_groups = 0
@abstractmethod
def make(self) -> LTTable:
raise NotImplementedError
@property
def n_groups(self):
return self._n_groups
class LTPostProcess(object):
def __init__(self, conf, table, lttable, l_alg):
self._table = table
self._lttable = lttable
self._rules = []
for alg in l_alg:
if alg == "dummy":
self._rules.append(VariableLabelRule())
elif alg == "host":
self._rules.append(VariableLabelHost(conf))
else:
raise NotImplementedError
self.sym_header = REPLACER_HEAD
self.sym_footer = REPLACER_TAIL
def _labeled_variable(self, w):
return "".join((self.sym_header, w, self.sym_footer))
def replace_variable(self, l_w, tpl, sym):
ret = []
for org_w, tpl_w in zip(l_w, tpl):
if tpl_w == sym:
for r in self._rules:
ww = r.replace_word(org_w)
if ww is not None:
ret.append(self._labeled_variable(ww))
break
else:
ret.append(tpl_w)
else:
ret.append(tpl_w)
return ret
def search(self, tid, ltw):
"""Search existing candidates of template derivation. Return None
if no possible candidates found."""
l_ltid = self._table.getcand(tid)
for ltid in l_ltid:
if self._lttable[ltid].ltw == ltw:
return ltid
else:
return None
# if len(self._table.getcand(tid)) == 0:
# return None
# else:
# return self._table.getcand(tid)[0]
class VariableLabelRule(object):
def __init__(self):
pass
def replace_word(self, w):
"""str: If the given word is a member of some host group,
return the group name. Otherwise, return None."""
return None
class VariableLabelHost(VariableLabelRule):
def __init__(self, conf):
super().__init__()
from . import host_alias
self.ha = host_alias.init_hostalias(conf)
def replace_word(self, w):
return self.ha.get_group(w)
def merged_template(m1, m2):
"""Return common area of log message (to be log template)"""
ret = []
for w1, w2 in zip(m1, m2):
if w1 == w2:
ret.append(w1)
else:
ret.append(REPLACER)
return ret
def template_from_messages(l_lm):
"""Generate a log template as the common part of given instances.
Args:
l_lm (List[log_db.LogMessage]): Log instances
Returns:
tpl (List[str])
"""
tpl = []
for words in zip(*[lm.l_w for lm in l_lm]):
s_words = set(words)
s_words.discard(REPLACER)
if len(s_words) == 1:
tpl.append(words[0])
else:
tpl.append(REPLACER)
return tpl
|
nilq/baby-python
|
python
|
import pandas as pd
import numpy as np
from sklearn.externals import joblib
from sklearn.preprocessing import LabelEncoder
from functools import partial
import re
num_months = 4
chunk_size = 1000000
indicators = ['ind_ahor_fin_ult1', 'ind_aval_fin_ult1', 'ind_cco_fin_ult1', 'ind_cder_fin_ult1', 'ind_cno_fin_ult1',
'ind_ctju_fin_ult1', 'ind_ctma_fin_ult1', 'ind_ctop_fin_ult1', 'ind_ctpp_fin_ult1', 'ind_deco_fin_ult1',
'ind_deme_fin_ult1', 'ind_dela_fin_ult1', 'ind_ecue_fin_ult1', 'ind_fond_fin_ult1', 'ind_hip_fin_ult1',
'ind_plan_fin_ult1', 'ind_pres_fin_ult1', 'ind_reca_fin_ult1', 'ind_tjcr_fin_ult1', 'ind_valo_fin_ult1',
'ind_viv_fin_ult1', 'ind_nomina_ult1', 'ind_nom_pens_ult1', 'ind_recibo_ult1']
column_prefix = 'bd'
main_dataframe = 'dataframe.pkl'
dataframe_dir = 'dataframes'
main_csv = 'stuff_6mo_all_indicators.csv'
def read_dataframe(): return pd.read_pickle(dataframe_dir + "/" + main_dataframe)
def read_dataframe_for_feature(feature): return pd.read_pickle(dataframe_dir + "/" + feature + ".pkl")
def save_dataframe_for_feature(df, feature): df.to_pickle(dataframe_dir + "/" + feature + ".pkl")
def save_dataframe(df): df.to_pickle(dataframe_dir + "/" + main_dataframe)
def predicted_column_name(name): return "{}{}_{}".format(column_prefix, num_months, name)
def save_model(clf, feature): joblib.dump(clf, "models/" + feature + ".pkl")
def read_model(feature): return joblib.load("models/" + feature + ".pkl")
def dtypes_range(name, upto, dtype): return {("bd{}_{}".format(i, name), dtype) for i in range(1, upto + 1)}
dtypes = {
'bd1_renta': np.int32,
'bd1_sexo': np.int8,
'bd1_pais_residencia': np.int16,
'bd1_canal_entrada': np.int16,
'bd1_ind_nuevo': np.bool
}
dtypes.update(dtypes_range('segmento_individual', num_months, np.bool))
dtypes.update(dtypes_range('segmento_vip', num_months, np.bool))
dtypes.update(dtypes_range('segmento_graduate', num_months, np.bool))
for i in indicators: dtypes.update(dtypes_range(i, num_months, np.bool))
dtypes.update(dtypes_range('cod_prov', num_months, np.uint8))
dtypes.update(dtypes_range('ind_empleado', num_months, np.int8))
dtypes.update(dtypes_range('age', num_months, np.int))
dtypes.update(dtypes_range('indrel_99', num_months, np.bool))
dtypes.update(dtypes_range('ind_actividad_cliente', num_months, np.bool))
dtypes.update(dtypes_range('antiguedad', num_months, np.int))
dtypes.update(dtypes_range('tipodom', num_months, np.bool))
dtypes.update(dtypes_range('indfall', num_months, np.bool))
dtypes.update(dtypes_range('indext', num_months, np.bool))
dtypes.update(dtypes_range('indresi', num_months, np.bool))
dtypes.update(dtypes_range('indrel_1mes', num_months, np.int8))
dtypes.update(dtypes_range('tiprel_1mes', num_months, np.int8))
rx_prefix = re.compile('bd\\d_')
le = LabelEncoder()
field_values = {
'sexo': LabelEncoder().fit(['V', 'H']),
'pais_residencia': LabelEncoder().fit(
['ES', 'CA', 'CH', 'CL', 'IE', 'AT', 'NL', 'FR', 'GB', 'DE', 'DO', 'BE', 'AR', 'VE', 'US', 'MX',
'BR', 'IT', 'EC', 'PE', 'CO', 'HN', 'FI', 'SE', 'AL', 'PT', 'MZ', 'CN', 'TW', 'PL', 'IN', 'CR',
'NI', 'HK', 'AD', 'CZ', 'AE', 'MA', 'GR', 'PR', 'RO', 'IL', 'RU', 'GT', 'GA', 'NO', 'SN',
'MR', 'UA', 'BG', 'PY', 'EE', 'SV', 'ET', 'CM', 'SA', 'CI', 'QA', 'LU', 'PA', 'BA', 'BO', 'AU',
'BY', 'KE', 'SG', 'HR', 'MD', 'SK', 'TR', 'AO', 'CU', 'GQ', 'EG', 'ZA', 'DK', 'UY', 'GE',
'TH', 'DZ', 'LB', 'JP', 'NG', 'PK', 'TN', 'TG', 'KR', 'GH', 'RS', 'VN', 'PH', 'KW', 'NZ',
'MM', 'KH', 'GI', 'SL', 'GN', 'GW', 'OM', 'CG', 'LV', 'LT', 'ML', 'MK', 'HU', 'IS', 'LY', 'CF',
'GM', 'KZ', 'CD', 'BZ', 'ZW', 'DJ', 'JM', 'BM', 'MT'
]),
'ind_empleado': LabelEncoder().fit(['N', 'A', 'B', 'F', 'S']),
'canal_entrada': LabelEncoder().fit(
['KHL', 'KHE', 'KHD', 'KFA', 'KFC', 'KAT', 'KAZ', 'RED', 'KHC', 'KHK', 'KGN', 'KHM', 'KHO', 'KDH',
'KEH', 'KAD', 'KBG', 'KGC', 'KHF', 'KFK', 'KHN', 'KHA', 'KAF', 'KGX', 'KFD', 'KAG', 'KFG', 'KAB',
'KCC', 'KAE', 'KAH', 'KAR', 'KFJ', 'KFL', 'KAI', 'KFU', 'KAQ', 'KFS', 'KAA', 'KFP', 'KAJ', 'KFN',
'KGV', 'KGY', 'KFF', 'KAP', 'KDE', 'KFV', '013', 'K00', 'KAK', 'KCK', 'KCL', 'KAY', 'KBU', 'KDR',
'KAC', 'KDT', 'KCG', 'KDO', 'KDY', 'KBQ', 'KDA', 'KBO', 'KCI', 'KEC', 'KBZ', 'KES', 'KDX', 'KAS',
'007', 'KEU', 'KCA', 'KAL', 'KDC', 'KAW', 'KCS', 'KCB', 'KDU', 'KDQ', 'KCN', 'KCM', '004', 'KCH',
'KCD', 'KCE', 'KEV', 'KBL', 'KEA', 'KBH', 'KDV', 'KFT', 'KEY', 'KAO', 'KEJ', 'KEO', 'KEI', 'KEW',
'KDZ', 'KBV', 'KBR', 'KBF', 'KDP', 'KCO', 'KCF', 'KCV', 'KAM', 'KEZ', 'KBD', 'KAN', 'KBY', 'KCT',
'KDD', 'KBW', 'KCU', 'KBX', 'KDB', 'KBS', 'KBE', 'KCX', 'KBP', 'KBN', 'KEB', 'KDS', 'KEL', 'KDG',
'KDF', 'KEF', 'KCP', 'KDM', 'KBB', 'KDW', 'KBJ', 'KFI', 'KBM', 'KEG', 'KEN', 'KEQ', 'KAV', 'KFH',
'KFM', 'KAU', 'KED', 'KFR', 'KEK', 'KFB', 'KGW', 'KFE', 'KGU', 'KDI', 'KDN', 'KEE', 'KCR', 'KCQ',
'KEM', 'KCJ', 'KHQ', 'KDL', '025', 'KHP', 'KHR', 'KHS']),
'indrel_1mes': LabelEncoder().fit(['1', '2', '3', '4', 'P']),
'tiprel_1mes': LabelEncoder().fit(['A', 'I', 'P', 'R', 'N']),
'indfall': LabelEncoder().fit(['N', 'S'])
}
def col_to_int(range, v):
if v is None:
return -1
else:
v = v.strip()
if len(v) == 0:
return -1
else:
return field_values[range].transform([v])[0]
def col_to_intvalue(default, v):
if v is None:
return default
else:
try:
return int(v)
except:
return default
def feature_range_list(name, upto): return ["bd{}_{}".format(i, name) for i in range(1, upto + 1)]
cols_as_integers = set(['segmento_', 'cod_prov', 'ind_', 'ind_empleado', 'age'])
cols_to_convert = ['bd1_sexo', 'bd1_pais_residencia', 'bd1_ind_empleado', 'bd1_canal_entrada'] + feature_range_list(
'indrel_1mes', num_months) + feature_range_list('indfall', num_months) + feature_range_list(
'segmento_individual', num_months) + feature_range_list('tiprel_1mes', num_months) + feature_range_list(
'segmento_graduate', num_months) + feature_range_list(
'segmento_vip', num_months) + feature_range_list('cod_prov', num_months) + feature_range_list('age', num_months)
for i in indicators: cols_to_convert = cols_to_convert + feature_range_list(i, num_months)
def make_converters():
converters = {}
for c in cols_to_convert:
if rx_prefix.match(c[:4]):
if any([c[4:].startswith(token) for token in cols_as_integers]):
converters[c] = partial(col_to_intvalue, 0)
else:
converters[c] = partial(col_to_int, c[4:])
else:
converters[c] = partial(col_to_int, c)
return converters
def fillna_range(data, name, upto, v):
for i in range(1, upto + 1):
data["bd{}_{}".format(i, name)].fillna(v, inplace=True)
def read_csv(source, names=None): return pd.read_csv(source, sep=',', nrows=chunk_size, converters=make_converters(),
dtype=dtypes, names=names)
|
nilq/baby-python
|
python
|
""" navigate env with velocity target """
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import rospy
from blimp_env.envs.common.abstract import ROSAbstractEnv
from blimp_env.envs.common.action import Action
from geometry_msgs.msg import Point, Quaternion
from std_msgs.msg import Float32MultiArray
from rotors_comm.msg import WindSpeed
from blimp_env.envs.script import close_simulation, change_buoynacy
import line_profiler
import copy
profile = line_profiler.LineProfiler()
Observation = Union[np.ndarray, float]
class PlanarNavigateEnv(ROSAbstractEnv):
"""Navigate blimp by path following decomposed to altitude and planar control"""
@classmethod
def default_config(cls) -> dict:
config = super().default_config()
config["simulation"].update(
{
"enable_wind": False,
"enable_wind_sampling": False,
"wind_speed": 2.0,
"enable_buoyancy_sampling": False,
"buoyancy_range": [0.9, 1.1],
}
)
config["observation"].update(
{
"type": "PlanarKinematics",
"noise_stdv": 0.02,
"scale_obs": True,
"enable_airspeed_sensor": True,
}
)
config["action"].update(
{
"type": "SimpleContinuousDifferentialAction",
"act_noise_stdv": 0.05,
"disable_servo": True,
"max_servo": -0.5,
"max_thrust": 0.5,
}
)
config["target"].update(
{
"type": "RandomGoal",
"target_name_space": "goal_",
"new_target_every_ts": 1200,
}
)
config.update(
{
"duration": 1200,
"simulation_frequency": 30, # [hz]
"policy_frequency": 10, # [hz] has to be greater than 5 to overwrite backup controller
"reward_weights": np.array(
[100, 0.8, 0.2]
), # success, tracking, action
"tracking_reward_weights": np.array(
[0.25, 0.25, 0.25, 0.25]
), # z_diff, planar_dist, yaw_diff, vel_diff
"success_threshhold": 5, # [meters]
}
)
return config
def _create_pub_and_sub(self):
super()._create_pub_and_sub()
self.rew_rviz_pub = rospy.Publisher(
self.config["name_space"] + "/rviz_reward", Float32MultiArray, queue_size=1
)
self.state_rviz_pub = rospy.Publisher(
self.config["name_space"] + "/rviz_state", Quaternion, queue_size=1
)
self.vel_rviz_pub = rospy.Publisher(
self.config["name_space"] + "/rviz_vel", Point, queue_size=1
)
self.vel_diff_rviz_pub = rospy.Publisher(
self.config["name_space"] + "/rviz_vel_diff", Point, queue_size=1
)
self.ang_rviz_pub = rospy.Publisher(
self.config["name_space"] + "/rviz_ang", Point, queue_size=1
)
self.ang_diff_rviz_pub = rospy.Publisher(
self.config["name_space"] + "/rviz_ang_diff", Point, queue_size=1
)
self.act_rviz_pub = rospy.Publisher(
self.config["name_space"] + "/rviz_act", Quaternion, queue_size=1
)
self.pos_cmd_pub = rospy.Publisher(
self.config["name_space"] + "/rviz_pos_cmd",
Point,
queue_size=1,
)
if self.config["simulation"]["enable_wind"]:
self.wind_state_pub = rospy.Publisher(
self.config["name_space"] + "/wind_state", WindSpeed, queue_size=1
)
if self.config["observation"]["enable_airspeed_sensor"]:
self.airspeed_rviz_pub = rospy.Publisher(
self.config["name_space"] + "/rviz_airspeed", Point, queue_size=1
)
@profile
def one_step(self, action: Action) -> Tuple[Observation, float, bool, dict]:
"""[perform a step action and observe result]
Args:
action (Action): action from the agent [-1,1] with size (4,)
Returns:
Tuple[Observation, float, bool, dict]:
obs: np.array [-1,1] with size (9,),
reward: scalar,
terminal: bool,
info: dictionary of all the step info,
"""
self._simulate(action)
obs, obs_info = self.observation_type.observe()
reward, reward_info = self._reward(obs.copy(), action, copy.deepcopy(obs_info))
terminal = self._is_terminal(copy.deepcopy(obs_info))
info = {
"step": self.steps,
"obs": obs,
"obs_info": obs_info,
"act": action,
"reward": reward,
"reward_info": reward_info,
"terminal": terminal,
}
self._update_goal_and_env()
self._step_info(info)
return obs, reward, terminal, info
def _step_info(self, info: dict):
"""publish all the step information to rviz
Args:
info ([dict]): [dict contain all step information]
"""
obs_info = info["obs_info"]
proc_info = obs_info["proc_dict"]
self.rew_rviz_pub.publish(
Float32MultiArray(data=np.array(info["reward_info"]["rew_info"]))
)
self.state_rviz_pub.publish(
Quaternion(
proc_info["planar_dist"],
proc_info["yaw_diff"],
proc_info["z_diff"],
proc_info["vel_diff"],
)
)
self.vel_rviz_pub.publish(Point(*obs_info["velocity"]))
self.vel_diff_rviz_pub.publish(
Point(
obs_info["velocity_norm"],
self.goal["velocity"],
proc_info["vel_diff"],
)
)
if self.config["observation"]["enable_airspeed_sensor"]:
self.airspeed_rviz_pub.publish(Point(obs_info["airspeed"], 0, 0))
self.ang_rviz_pub.publish(Point(*obs_info["angle"]))
self.ang_diff_rviz_pub.publish(Point(0, 0, proc_info["yaw_diff"]))
self.act_rviz_pub.publish(Quaternion(*info["act"]))
self.pos_cmd_pub.publish(Point(*self.goal["position"]))
if self.dbg:
print(
f"================= [ PlanarNavigateEnv ] step {self.steps} ================="
)
print("STEP INFO:", info)
print("\r")
def reset(self) -> Observation:
self.steps = 0
self.done = False
self._reset()
if self.config["target"]["type"] == "MultiGoal" and self.config["target"].get(
"enable_random_goal", True
):
n_waypoints = np.random.randint(4, 8)
self.target_type.sample_new_wplist(n_waypoints=n_waypoints)
if self.config["simulation"]["enable_wind_sampling"]:
self._sample_wind_state()
if self.config["simulation"]["enable_buoyancy_sampling"]:
self._sample_buoyancy(
buoyancy_range=self.config["simulation"]["buoyancy_range"]
)
obs, _ = self.observation_type.observe()
return obs
def _update_goal_and_env(self):
"""update goal and env state"""
self.goal = self.target_type.sample()
if (
self.config["simulation"]["enable_wind"]
and self.config["simulation"]["enable_wind_sampling"]
):
self.wind_state_pub.publish(self.wind_state)
def _sample_wind_state(self):
self.wind_state = WindSpeed()
wind_speed = self.config["simulation"]["wind_speed"]
self.wind_state.velocity.x = np.random.uniform(-wind_speed, wind_speed)
self.wind_state.velocity.y = np.random.uniform(-wind_speed, wind_speed)
self.wind_state.velocity.z = np.random.uniform(
-wind_speed / 10, wind_speed / 10
)
def _sample_buoyancy(
self,
deflation_range=[0.0, 1.5],
freeflop_angle_range=[0.0, 1.5],
collapse_range=[0.0, 0.02],
buoyancy_range=[0.9, 1.1],
):
change_buoynacy(
robot_id=self.config["robot_id"],
ros_port=self.config["ros_port"],
gaz_port=self.config["gaz_port"],
deflation=np.random.uniform(*deflation_range),
freeflop_angle=np.random.uniform(*freeflop_angle_range),
collapse=np.random.uniform(*collapse_range),
buoyancy=np.random.uniform(*buoyancy_range),
)
def _reward(
self, obs: np.array, act: np.array, obs_info: dict
) -> Tuple[float, dict]:
"""calculate reward
total_reward = success_reward + tracking_reward + action_reward
success_reward: +1 if agent stay in the vicinity of goal
tracking_reward: - L2 distance to goal - yaw angle difference - z diff - vel diff
action_reward: penalty for motor use
Args:
obs (np.array): ("z_diff", "planar_dist", "yaw_diff", "vel_diff", "vel", "yaw_vel", "action")
act (np.array): agent action [-1,1] with size (4,)
obs_info (dict): contain all information of a step
Returns:
Tuple[float, dict]: [reward scalar and a detailed reward info]
"""
track_weights = self.config["tracking_reward_weights"].copy()
reward_weights = self.config["reward_weights"].copy()
success_reward = self.compute_success_rew(
obs_info["position"], obs_info["goal_dict"]["position"]
)
obs[1] = (obs[1] + 1) / 2 # dist -1 should have max reward
tracking_reward = np.dot(track_weights, -np.abs(obs[0:4]))
action_reward = self.action_type.action_rew()
reward = np.dot(
reward_weights,
(
success_reward,
tracking_reward,
action_reward,
),
)
reward = np.clip(reward, -1, 1)
rew_info = (reward, success_reward, tracking_reward, action_reward)
reward_info = {"rew_info": rew_info}
return float(reward), reward_info
def compute_success_rew(self, pos: np.array, goal_pos: np.array) -> float:
"""task success if distance to goal is less than sucess_threshhold
Args:
pos ([np.array]): [position of machine]
goal_pos ([np.array]): [position of planar goal]
k (float): scaler for success
Returns:
[float]: [1 if success, otherwise 0]
"""
return (
1.0
if np.linalg.norm(pos[0:2] - goal_pos[0:2])
<= self.config["success_threshhold"]
else 0.0
)
def _is_terminal(self, obs_info: dict) -> bool:
"""if episode terminate
- time: episode duration finished
Returns:
bool: [episode terminal or not]
"""
time = False
if self.config["duration"] is not None:
time = self.steps >= int(self.config["duration"]) - 1
success = False
if self.config["target"]["type"] == "MultiGoal":
success = self.target_type.wp_index == self.target_type.wp_max_index
else:
success_reward = self.compute_success_rew(
obs_info["position"], obs_info["goal_dict"]["position"]
)
success = success_reward >= 0.9
return time or success
def close(self) -> None:
return super().close()
class PIDController:
def __init__(
self,
pid_param=np.array([1.0, 0.2, 0.05]),
gain=1.0,
offset=0.0,
delta_t=0.01,
i_from_sensor=False,
d_from_sensor=False,
):
self.pid_param = pid_param
self.gain = gain
self.offset = offset
self.delta_t = delta_t
self.i_from_sensor = i_from_sensor
self.d_from_sensor = d_from_sensor
self.err_sum, self.prev_err = 0.0, 0.0
self.windup = 0.0
def action(self, err, err_i=0, err_d=0):
if not self.i_from_sensor:
self.err_sum += err * self.delta_t
self.err_sum = np.clip(self.err_sum, -1, 1)
err_i = self.err_sum * (1 - self.windup)
if not self.d_from_sensor:
err_d = (err - self.prev_err) / (self.delta_t)
self.prev_err = err
ctrl = self.gain * np.dot(self.pid_param, np.array([err, err_i, err_d]))
return ctrl + self.offset
def clear(self):
self.err_sum, self.prev_err = 0, 0
self.windup = 0.0
class ResidualPlanarNavigateEnv(PlanarNavigateEnv):
@classmethod
def default_config(cls) -> dict:
config = super().default_config()
config["simulation"].update(
{
"enable_wind": True,
"enable_wind_sampling": True,
"wind_speed": 1.0,
"enable_buoyancy_sampling": True,
"buoyancy_range": [0.9, 1.1],
}
)
config["observation"].update(
{
"type": "PlanarKinematics",
"noise_stdv": 0.02,
"scale_obs": True,
"enable_rsdact_feedback": True,
"enable_airspeed_sensor": True,
"enable_next_goal": True, # only support target type: MultiGoal
}
)
config["action"].update(
{
"type": "SimpleContinuousDifferentialAction",
"act_noise_stdv": 0.05,
"disable_servo": False,
"max_servo": -0.5,
"max_thrust": 0.5,
}
)
trigger_dist = 5
config["target"].update(
{
"type": "MultiGoal",
"target_name_space": "goal_",
"trigger_dist": trigger_dist,
"enable_dependent_wp": True,
"enable_random_goal": True,
"dist_range": [10, 40],
}
)
config.update(
{
"duration": 2400,
"simulation_frequency": 30, # [hz]
"policy_frequency": 10, # [hz] has to be greater than 5 to overwrite backup controller
"reward_weights": np.array(
[100, 0.9, 0.1, 0.1]
), # success, tracking, action, bonus
"tracking_reward_weights": np.array(
[0.6, 0.2, 0.1, 0.1]
), # z_diff, planar_dist, yaw_diff, vel_diff
"success_threshhold": trigger_dist, # [meters]
"reward_scale": 0.05,
"clip_reward": False,
"enable_residual_ctrl": True,
"mixer_type": "absolute", # absolute, relative, hybrid
"mixer_param": (0.5, 0.5), # alpha, beta
"base_ctrl_config": {
"yaw": {
"pid_param": np.array([1.0, 0.01, 0.025]),
"gain": 0.3,
"d_from_sensor": True,
},
"alt": {
"pid_param": np.array([1.0, 0.01, 0.5]),
"gain": 2.0,
"offset": 0.005,
},
"vel": {
"pid_param": np.array([0.7, 0.01, 0.5]),
"gain": 1.0,
},
},
}
)
return config
def __init__(self, config: Optional[Dict[Any, Any]] = None) -> None:
super().__init__(config=config)
self.base_act = np.zeros(self.action_type.act_dim)
delta_t = 1 / self.config["policy_frequency"]
self.yaw_basectrl = PIDController(
delta_t=delta_t,
**self.config["base_ctrl_config"]["yaw"],
)
self.alt_basectrl = PIDController(
delta_t=delta_t,
**self.config["base_ctrl_config"]["alt"],
)
self.vel_basectrl = PIDController(
delta_t=delta_t,
**self.config["base_ctrl_config"]["vel"],
)
def _create_pub_and_sub(self):
self.ang_vel_rviz_pub = rospy.Publisher(
self.config["name_space"] + "/rviz_ang_vel", Point, queue_size=1
)
self.base_act_rviz_pub = rospy.Publisher(
self.config["name_space"] + "/rviz_base_act", Quaternion, queue_size=1
)
return super()._create_pub_and_sub()
@profile
def one_step(self, action: Action) -> Tuple[Observation, float, bool, dict]:
"""[perform a step action and observe result]
Args:
action (Action): action from the agent [-1,1] with size (4,)
Returns:
Tuple[Observation, float, bool, dict]:
obs: np.array [-1,1] with size (9,),
reward: scalar,
terminal: bool,
info: dictionary of all the step info,
"""
joint_act = self.mixer(
action,
self.base_act,
self.config["mixer_param"][0],
self.config["mixer_param"][1],
)
self._simulate(joint_act)
self.base_act = (
self.base_ctrl() if self.config["enable_residual_ctrl"] else np.zeros(4)
)
obs, obs_info = self.observation_type.observe(self.base_act.copy())
reward, reward_info = self._reward(
obs.copy(), joint_act, copy.deepcopy(obs_info)
)
terminal = self._is_terminal(copy.deepcopy(obs_info))
info = {
"step": self.steps,
"obs": obs,
"obs_info": obs_info,
"act": action,
"base_act": self.base_act,
"joint_act": joint_act,
"reward": reward,
"reward_info": reward_info,
"terminal": terminal,
}
self._update_goal_and_env()
self._step_info(info)
return obs, reward, terminal, info
def _step_info(self, info: dict):
obs_info = info["obs_info"]
proc_info = obs_info["proc_dict"]
self.ang_vel_rviz_pub.publish(Point(0, 0, proc_info["yaw_vel"]))
self.base_act_rviz_pub.publish(Quaternion(*info["base_act"]))
return super()._step_info(info)
def mixer(self, action, base_act, alpha=0.5, beta=0.5):
if self.config["enable_residual_ctrl"] == False:
return action
if self.config["mixer_type"] == "absolute":
joint_act = beta * action + (1 - beta) * base_act
elif self.config["mixer_type"] == "relative":
joint_act = base_act * (1 + beta * action)
elif self.config["mixer_type"] == "hybrid":
absolute = beta * action + (1 - beta) * base_act
relative = base_act * (1 + beta * action)
joint_act = alpha * absolute + (1 - alpha) * relative
else:
raise NotImplementedError
joint_act[2] = action[2]
return np.clip(joint_act, -1, 1)
def base_ctrl(self):
"""
generate base control signal
"""
obs, obs_dict = self.observation_type.observe()
yaw_ctrl = self.yaw_basectrl.action(
err=-obs[2], err_d=obs_dict["angular_velocity"][2]
)
alt_ctrl = self.alt_basectrl.action(obs[0])
vel_ctrl = self.vel_basectrl.action(-obs[3])
return np.clip(np.array([yaw_ctrl, alt_ctrl, 0, vel_ctrl]), -1, 1)
def clear_basectrl_param(self):
self.yaw_basectrl.clear()
self.alt_basectrl.clear()
self.vel_basectrl.clear()
def reset(self) -> Observation:
self.clear_basectrl_param()
return super().reset()
def _reward(
self, obs: np.array, act: np.array, obs_info: dict
) -> Tuple[float, dict]:
"""calculate reward
total_reward = success_reward + tracking_reward + action_reward
success_reward: +1 if agent stay in the vicinity of goal
tracking_reward: - L2 distance to goal - yaw angle difference - z diff - vel diff
action_reward: penalty for motor use
Args:
obs (np.array): ("z_diff", "planar_dist", "yaw_diff", "vel_diff", "vel", "yaw_vel" "action")
act (np.array): agent action [-1,1] with size (4,)
obs_info (dict): contain all information of a step
Returns:
Tuple[float, dict]: [reward scalar and a detailed reward info]
"""
track_weights = self.config["tracking_reward_weights"].copy()
reward_weights = self.config["reward_weights"].copy()
success_reward = self.compute_success_rew(
obs_info["position"], obs_info["goal_dict"]["position"]
)
obs[1] = (obs[1] + 1) / 2 # dist -1 should have max reward
tracking_reward = np.dot(track_weights, -np.abs(obs[0:4]))
action_reward = self.action_type.action_rew()
bonus_reward = 0
if self.config["observation"].get("enable_next_goal", False):
dist = np.linalg.norm(
obs_info["position"][0:2] - obs_info["goal_dict"]["position"][0:2]
)
bonus_reward += -np.abs(obs_info["proc_dict"]["next_yaw_diff"]) / (1 + dist)
reward = self.config["reward_scale"] * np.dot(
reward_weights,
(success_reward, tracking_reward, action_reward, bonus_reward),
)
if self.config["clip_reward"]:
reward = np.clip(reward, -1, 1)
rew_info = (
reward,
success_reward,
tracking_reward,
action_reward,
bonus_reward,
)
return float(reward), {"rew_info": rew_info}
def _is_terminal(self, obs_info: dict) -> bool:
"""if episode terminate
- time: episode duration finished
Returns:
bool: [episode terminal or not]
"""
time = False
if self.config["duration"] is not None:
time = self.steps >= int(self.config["duration"]) - 1
success = False
if self.config["target"]["type"] == "MultiGoal":
success = self.target_type.wp_index == self.target_type.wp_max_index
else:
success_reward = self.compute_success_rew(
obs_info["position"], obs_info["goal_dict"]["position"]
)
success = success_reward >= 0.9
return time or success
class YawControlEnv(ResidualPlanarNavigateEnv):
@classmethod
def default_config(cls) -> dict:
config = super().default_config()
config["simulation"].update(
{
"enable_wind": False,
"enable_wind_sampling": False,
"wind_speed": 0.0,
"enable_buoyancy_sampling": False,
"enable_next_goal": False,
}
)
config["observation"].update(
{
"type": "DummyYaw",
"noise_stdv": 0.02,
"scale_obs": True,
"enable_rsdact_feedback": True,
}
)
config["action"].update(
{
"type": "DummyYawAction",
"act_noise_stdv": 0.05,
"disable_servo": True,
}
)
config["target"].update(
{
"type": "RandomGoal",
"target_name_space": "goal_",
"new_target_every_ts": 1200,
}
)
config.update(
{
"duration": 1200,
"simulation_frequency": 30, # [hz]
"policy_frequency": 10, # [hz] has to be greater than 5 to overwrite backup controller
"reward_weights": np.array([1.0, 1.0, 0]), # success, tracking, action
"tracking_reward_weights": np.array([1.0]), # yaw_diff
"success_seconds": 5, # seconds within threshold as success
"reward_scale": 0.1,
"clip_reward": False,
"enable_residual_ctrl": True,
"mixer_type": "absolute", # absolute, relative, hybrid
"mixer_param": (0.5, 0.5), # alpha, beta
"pid_param": np.array([1.0, 0.0, 0.05]),
}
)
return config
def __init__(self, config: Optional[Dict[Any, Any]] = None) -> None:
super().__init__(config=config)
delta_t = 10 / self.config["policy_frequency"]
self.yaw_basectrl = PIDController(
pid_param=self.config["pid_param"], delta_t=delta_t, d_from_sensor=True
)
self.success_cnt = 0
@profile
def one_step(self, action: Action) -> Tuple[Observation, float, bool, dict]:
"""[perform a step action and observe result]
Args:
action (Action): action from the agent [-1,1] with size (4,)
Returns:
Tuple[Observation, float, bool, dict]:
obs: np.array [-1,1] with size (9,),
reward: scalar,
terminal: bool,
info: dictionary of all the step info,
"""
joint_act = self.mixer(
action,
self.base_act,
self.config["mixer_param"][0],
self.config["mixer_param"][1],
)
self._simulate(joint_act)
self.base_act = (
self.base_ctrl()
if self.config["enable_residual_ctrl"]
else np.zeros(self.action_type.act_dim)
)
obs, obs_info = self.observation_type.observe(self.base_act.copy())
reward, reward_info = self._reward(
obs.copy(), joint_act, copy.deepcopy(obs_info)
)
terminal = self._is_terminal(copy.deepcopy(obs_info))
info = {
"step": self.steps,
"obs": obs,
"obs_info": obs_info,
"act": action,
"base_act": self.base_act,
"joint_act": joint_act,
"reward": reward,
"reward_info": reward_info,
"terminal": terminal,
}
self._update_goal_and_env()
self._step_info(info)
return obs, reward, terminal, info
def _step_info(self, info: dict):
"""publish all the step information to rviz
Args:
info ([dict]): [dict contain all step information]
"""
obs_info = info["obs_info"]
proc_info = obs_info["proc_dict"]
self.rew_rviz_pub.publish(
Float32MultiArray(data=np.array(info["reward_info"]["rew_info"]))
)
self.state_rviz_pub.publish(Quaternion(0, proc_info["yaw_diff"], 0, 0))
self.ang_diff_rviz_pub.publish(Point(0, 0, proc_info["yaw_diff"]))
self.ang_vel_rviz_pub.publish(Point(0, 0, proc_info["yaw_vel"]))
self.act_rviz_pub.publish(Quaternion(*info["act"], 0, 0, 0))
self.base_act_rviz_pub.publish(
Quaternion(*info["base_act"], *info["joint_act"], 0, 0)
)
self.pos_cmd_pub.publish(Point(*self.goal["position"]))
if self.dbg:
print(
f"================= [ PlanarNavigateEnv ] step {self.steps} ================="
)
print("STEP INFO:", info)
print("\r")
def mixer(self, action, base_act, alpha=0.5, beta=0.5):
if self.config["enable_residual_ctrl"] == False:
return action
if self.config["mixer_type"] == "absolute":
joint_act = beta * action + (1 - beta) * base_act
elif self.config["mixer_type"] == "relative":
joint_act = base_act * (1 + beta * action)
elif self.config["mixer_type"] == "hybrid":
absolute = beta * action + (1 - beta) * base_act
relative = base_act * (1 + beta * action)
joint_act = alpha * absolute + (1 - alpha) * relative
else:
raise NotImplementedError
return np.clip(joint_act, -1, 1)
def base_ctrl(self):
"""
generate base control signal
"""
obs, obs_dict = self.observation_type.observe()
yaw_ctrl = self.yaw_basectrl.action(
err=-obs[0], err_d=obs_dict["angular_velocity"][2]
)
return np.clip(np.array([yaw_ctrl]), -1, 1)
def _reward(
self, obs: np.array, act: np.array, obs_info: dict
) -> Tuple[float, dict]:
"""calculate reward
total_reward = success_reward + tracking_reward + action_reward
success_reward: 0
tracking_reward: - yaw angle difference
action_reward: penalty for motor use
Args:
obs (np.array): ("yaw_diff", "yaw_vel", "action")
act (np.array): agent action [-1,1] with size (1,)
obs_info (dict): contain all information of a step
Returns:
Tuple[float, dict]: [reward scalar and a detailed reward info]
"""
track_weights = self.config["tracking_reward_weights"].copy()
reward_weights = self.config["reward_weights"].copy()
yaw_diff = obs_info["proc_dict"]["yaw_diff"]
success_reward = self.compute_success_rew(yaw_diff)
tracking_reward = np.dot(track_weights, -np.abs(obs[0]))
action_reward = self.action_type.action_rew()
reward = self.config["reward_scale"] * np.dot(
reward_weights,
(success_reward, tracking_reward, action_reward),
)
if self.config["clip_reward"]:
reward = np.clip(reward, -1, 1)
rew_info = (reward, success_reward, tracking_reward, action_reward)
return float(reward), {"rew_info": rew_info}
def compute_success_rew(
self,
yaw_diff: np.array,
epsilon: float = 0.1,
) -> float:
"""yaw_diff less than 0.1 for 5 seconds
Args:
yaw_diff (np.array): [scaled yaw diff]
epsilon (float, optional): [tolerence]. Defaults to 0.1.
Returns:
float: [0.0 or 1.0]
"""
if np.abs(yaw_diff) < epsilon:
self.success_cnt += 1
else:
self.success_cnt = 0
return float(
self.success_cnt
> self.config["success_seconds"] * self.config["simulation_frequency"]
)
def _is_terminal(self, obs_info: dict) -> bool:
"""if episode terminate
- time: episode duration finished
Returns:
bool: [episode terminal or not]
"""
time = False
if self.config["duration"] is not None:
time = self.steps >= int(self.config["duration"]) - 1
return time
def close(self) -> None:
close_simulation()
if __name__ == "__main__":
import copy
from blimp_env.envs.common.gazebo_connection import GazeboConnection
from blimp_env.envs.script import close_simulation
# ============== profile ==============#
# 1. pip install line-profiler
# 2. in terminal:
# kernprof -l -v blimp_env/envs/planar_navigate_env.py
auto_start_simulation = False
if auto_start_simulation:
close_simulation()
ENV = ResidualPlanarNavigateEnv # PlanarNavigateEnv, ResidualPlanarNavigateEnv, YawControlEnv
env_kwargs = {
"DBG": True,
"simulation": {
"gui": True,
"enable_meshes": True,
"auto_start_simulation": auto_start_simulation,
"enable_wind": True,
"enable_wind_sampling": True,
"enable_buoyancy_sampling": False,
"wind_speed": 0,
"wind_direction": (1, 0),
"position": (0, 0, 30), # initial spawned position
},
"observation": {
"DBG_ROS": False,
"DBG_OBS": False,
"noise_stdv": 0.02,
},
"action": {
"DBG_ACT": False,
"act_noise_stdv": 0.05,
"disable_servo": True,
},
"target": {
"DBG_ROS": False,
"enable_random_goal": False,
"trigger_dist": 5,
"wp_list": [
(20, 20, -100, 3),
(20, -20, -100, 3),
(-20, -20, -100, 3),
(-20, 20, -100, 3),
],
},
"mixer_type": "absolute",
"mixer_param": (0.5, 0),
}
@profile
def env_step():
env = ENV(copy.deepcopy(env_kwargs))
env.reset()
for _ in range(100000):
action = env.action_space.sample()
action = np.zeros_like(action) # [yaw, pitch, servo, thrust]
obs, reward, terminal, info = env.step(action)
GazeboConnection().unpause_sim()
env_step()
|
nilq/baby-python
|
python
|
import os.path
import unittest
import unittest.mock as mock
import psutil
import pytest
import fpgaedu.vivado
def find_vivado_pids():
'''
return a set containing the pids for the vivado processes currently
activated.
'''
vivado_procs = filter(lambda p: 'vivado' in p.name(), psutil.process_iter())
return {p.pid for p in vivado_procs}
class SessionTestCase(unittest.TestCase):
def test_init_tcl_init_script_exists(self):
session = fpgaedu.vivado.Session()
self.assertTrue(os.path.exists(session._vivado_path))
self.assertTrue(os.path.exists(session._tcl_init_script))
def test_init_server_port_property(self):
session = fpgaedu.vivado.Session(server_port=99999)
self.assertEqual(session.server_port, 99999)
@pytest.mark.timeout(2)
def test_start_timeout(self):
'''
Ensure timeout works and that any spawned process isdestroyed afterwards
'''
pids_before = find_vivado_pids()
session = fpgaedu.vivado.Session()
session._rpc_proxy = mock.Mock()
session._rpc_proxy.call.side_effect = fpgaedu.jsonrpc2.EndpointError
with self.assertRaises(fpgaedu.vivado.SessionTimeoutError):
session.start(timeout=1)
pids_after = find_vivado_pids()
self.assertEqual(len(pids_before - pids_after), 0)
@pytest.mark.timeout(60)
def test_start(self):
pids_before = find_vivado_pids()
session = fpgaedu.vivado.Session()
session.start(timeout=60)
session_pids = find_vivado_pids() - pids_before
# vivado process and a child process that was created in sourcing the
# server script.
self.assertGreaterEqual(len(session_pids), 1)
session.stop()
self.assertTrue(find_vivado_pids().isdisjoint(session_pids))
@mock.patch('random.randint')
def test_echo(self, mock_randint):
mock_randint.return_value = 12345
session = fpgaedu.vivado.Session()
session._rpc_proxy = mock.Mock()
session._rpc_proxy.call = mock.Mock(return_value={"echo": 12345})
session.echo()
session._rpc_proxy.call.assert_called_with('echo', params={"echo": 12345})
def test_echo_raises(self):
session = fpgaedu.vivado.Session()
session._rpc_proxy = mock.Mock()
session._rpc_proxy.call.return_value = None
with self.assertRaises(AssertionError):
session.echo()
def test_program(self):
session = fpgaedu.vivado.Session()
session.start()
target = session.get_target_identifiers()[0]
device = session.get_device_identifiers(target)[0]
with open("/home/matthijsbos/Dropbox/fpgaedu/python/fpgaedu/fpgaedu/tcl/test/resources/nexys4.bit", 'rb') as f:
bitstream_data = f.read()
session.program(target, device, bitstream_data)
|
nilq/baby-python
|
python
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for array_grad."""
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class ArrayGradTest(test.TestCase):
def _testGrad(self, f, x):
max_error = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(f, [x]))
self.assertLess(max_error, 1e-4)
def test_gather_v2_simple(self):
x = constant_op.constant([1., 2., 3., 4., 5.], dtype=dtypes.float64)
def f(x):
return array_ops.gather_v2(
x, constant_op.constant([2, 0, 2, 4], dtype=dtypes.int32))
self._testGrad(f, x)
def test_gather_v2_more_index_dims(self):
x = constant_op.constant([1., 2., 3., 4., 5.], dtype=dtypes.float64)
def f(x):
return array_ops.gather_v2(
x, constant_op.constant([[2, 0], [2, 4]], dtype=dtypes.int32))
self._testGrad(f, x)
def test_gather_v2_more_param_dims(self):
x = constant_op.constant([[1., 2.], [3., 4.]], dtype=dtypes.float64)
def f(x):
return array_ops.gather_v2(
x, constant_op.constant([1, 0], dtype=dtypes.int32))
self._testGrad(f, x)
def test_gather_v2_axis(self):
x = constant_op.constant([[1., 2.], [3., 4.]], dtype=dtypes.float64)
def f(x):
return array_ops.gather_v2(
x, constant_op.constant([1, 0], dtype=dtypes.int32), axis=1)
self._testGrad(f, x)
def test_gather_v2_batch_dims(self):
x = constant_op.constant([[1., 2.], [3., 4.]], dtype=dtypes.float64)
def f(x):
return array_ops.gather_v2(
x,
constant_op.constant([[1, 0], [0, 0]], dtype=dtypes.int32),
axis=1,
batch_dims=1)
self._testGrad(f, x)
def test_gather_v2_2batch_dims(self):
x = constant_op.constant([[[1., 2.], [3., 4.]]], dtype=dtypes.float64)
def f(x):
return array_ops.gather_v2(
x,
constant_op.constant([[[1, 0], [0, 0]]], dtype=dtypes.int32),
axis=2,
batch_dims=2)
self._testGrad(f, x)
def test_gather_v2_batch_dims_with_axis(self):
x = constant_op.constant([[[1., 2.]], [[3., 4.]]], dtype=dtypes.float64)
def f(x):
return array_ops.gather_v2(
x,
constant_op.constant([[0], [0]], dtype=dtypes.int32),
axis=2,
batch_dims=1)
self._testGrad(f, x)
def test_broadcast_to(self):
x = constant_op.constant([1., 2., 3.], dtype=dtypes.float64)
y = constant_op.constant([2, 3], dtype=dtypes.int32)
def f(x):
return array_ops.broadcast_to(
x,
y)
self._testGrad(f, x)
def test_broadcast_to_int64(self):
x = constant_op.constant([1., 2., 3.], dtype=dtypes.float64)
y = constant_op.constant([2, 3], dtype=dtypes.int64)
def f(x):
return array_ops.broadcast_to(
x,
y)
self._testGrad(f, x)
if __name__ == "__main__":
test.main()
|
nilq/baby-python
|
python
|
#imports
from random import randint
stDevMonthly = 0.07
stDevWeekly = 0.0095
def getDosageStats(popSize):
exposureStats = []
for i in range(0, popSize):
temp = []
temp.append(randint(3,7))
temp.append(stDevMonthly)
temp.append(stDevWeekly)
exposureStats.append(temp)
return exposureStats
|
nilq/baby-python
|
python
|
from flask import Flask
from flask import render_template
app = Flask(__name__)
@app.route('/')
def index():
name = 'Jhasmany'
return render_template('index.html', name=name)
@app.route('/client')
def client():
list_name = ['Test1', 'Test2', 'Test3']
return render_template('client.html', list=list_name)
if __name__ == '__main__':
app.run( debug=True, port=8000)
|
nilq/baby-python
|
python
|
#------------------------------------------------------------------------------
# Get the trending colors.
# GET /v1/color_trends/{report_name}/trending_colors
#------------------------------------------------------------------------------
import os
import json
import requests
from urlparse import urljoin
from pprint import pprint
from props import *
# Replace this with the custom url generated for you.
api_gateway_url = props['api_gateway_url']
# Pass the api key into the header
# Replace 'your_api_key' with your API key.
headers = {'X-Api-Key': props['X-Api-Key']}
params = {}
params['colors'] = 20
# Optional
#params['catalog_name'] = 'vogue-autumn-winter-2017'
report_name = 'vogue-autumn-winter'
api_endpoint = '/v1/color_trends/%s/trending_colors'%(report_name)
url = urljoin(api_gateway_url,api_endpoint)
response = requests.get(url,headers=headers,params=params)
print response.status_code
pprint(response.json())
# Print the colors.
results = response.json()
years = ['2013','2014','2015','2016','2017']
for color_info in results['colors']:
print('popularity [%s] [%+1.2f] trend [%s] [%+1.2f] [%s]'%(
' '.join(['%1.2f'%(color_info['popularity_by_id']['%s-%s'%(report_name,year)]) for year in years]),
color_info['popularity_forecast']['mean'],
' '.join(['%+1.2f'%(color_info['trend_by_id']['%s-%s'%(report_name,year)]) for year in years]),
color_info['trend_forecast']['mean'],
color_info['pantone_id']))
|
nilq/baby-python
|
python
|
#coding=utf-8
from __future__ import print_function
import os
import sys
# Do not use end=None, it will put '\n' in the line end auto.
print('test\r\ntest',end=u'')
# 74 65 73 74 0D 0D 0A 74 65 73 74
# Debug in python Windows source code, it run into
# builtin_print() -> PyFile_WriteObject()->file_Pyobject_Print()->
# internal_print()->string_print()
# bltinmodule.c
# fileobject.c
# object.c
# stringobject.c -> fwrite(data, 1, (size_t)size, fp);
|
nilq/baby-python
|
python
|
from . import serializers
from rest_framework import generics,authentication,permissions
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
class CreateUserView(generics.CreateAPIView):
serializer_class=serializers.UserApiSerializer
class CreateTokenView(ObtainAuthToken):
serializer_class=serializers.AuthTokenSerializer
renderer_classes=api_settings.DEFAULT_RENDERER_CLASSES
class ManageUserView(generics.RetrieveUpdateAPIView):
serializer_class=serializers.UserApiSerializer
authentication_classes=(authentication.TokenAuthentication,)
permission_classes=(permissions.IsAuthenticated,)
def get_object(self):
return self.request.user
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# Load required modules
import matplotlib
matplotlib.use('agg')
import sys, os, argparse, json, matplotlib.pyplot as plt, seaborn as sns, pandas as pd, numpy as np
import matplotlib.patches as mpatches
from models import EN, RF, IMPORTANCE_NAMES, FEATURE_CLASS_NAMES
sns.set_style('whitegrid')
# Parse command-line arguments
parser = argparse.ArgumentParser()
parser.add_argument('-bf', '--biomarkers_file', type=str, required=True)
parser.add_argument('-rf', '--results_file', type=str, required=True)
parser.add_argument('-cf', '--coefficients_file', type=str, required=True)
parser.add_argument('-prf', '--permuted_results_file', type=str, required=True)
parser.add_argument('-o', '--output_prefix', type=str, required=True)
parser.add_argument('-e', '--extension', type=str, required=False, default='pdf')
args = parser.parse_args(sys.argv[1:])
# Load the input file
with open(args.biomarkers_file, 'r') as IN:
biomarker_plot_items = json.load(IN)['Biomarkers']
with open(args.results_file, 'r') as IN:
results = json.load(IN)
model = results['params']['model']
with open(args.permuted_results_file, 'r') as IN:
permuted_results = json.load(IN)
var_importance = pd.read_csv(args.coefficients_file, sep='\t')
###############################################################################
# FIGURE 1
###############################################################################
fig1, (ax1, ax2) = plt.subplots(1, 2)
fig1.set_size_inches(10, 5)
# Expanded clones (predicted vs. true)
pred = np.array(results['preds'])
true = np.array(results['true'])
variance_explained = results['variance_explained']
ax1.scatter(pred, true)
min_val = min(pred.min(),true.min())
max_val = max(pred.max(),true.max())
ax1.plot([min_val, max_val], [min_val, max_val], 'k-', color = 'r')
ax1.set_xlabel('Log predicted number of expanded clones', fontsize=16)
ax1.set_ylabel('Log held-out ground truth', fontsize=16)
ax1.text(0.01, 0.95, 'Variance explained: %.2f%%' % (variance_explained*100.),
ha='left', va='top', transform=ax1.transAxes, fontsize=16)
ax1.set_title('(a)', fontsize=16)
# Permutation scores
permutation_scores = np.array(permuted_results['permutation_scores'])
true_score = permuted_results['true_score']
pvalue = permuted_results['pvalue']
ax2.hist(permutation_scores, 20,
label='Permuted ($p < %.1g$)' % pvalue,
edgecolor='black')
ylim = ax2.get_ylim()
ax2.plot(2 * [true_score], ylim, '--g', linewidth=3,
label='True (%.3f)' % true_score)
ax2.set_ylim(ylim)
ax2.legend(fontsize=14)
ax2.set_xlabel('Leave-one-out mean squared error', fontsize=16)
ax2.set_title('(b)', fontsize=16)
# Save to file and clear
plt.tight_layout()
plt.savefig('%s1.%s' % (args.output_prefix, args.extension))
plt.clf()
###############################################################################
# FIGURE 2
###############################################################################
# Plot the variable importances (coloring by Class)
var_importance = var_importance.reset_index()
var_importance_name = IMPORTANCE_NAMES[model]
var_importance = var_importance.rename(index=str, columns={"#Feature name": "Feature", "Score": var_importance_name})
var_importance['Class'] = var_importance['Class'].map({fc.capitalize(): fcn for fc, fcn in FEATURE_CLASS_NAMES.items() })
#
classToColor = dict(zip(['Tumor', 'Circulating', 'Clinical'], sns.color_palette()[:3]))
featureToImportance = dict(zip(var_importance['Feature'], var_importance[var_importance_name]))
featureToClass = dict(zip(var_importance['Feature'], var_importance['Class']))
features = sorted(var_importance['Feature'], key=lambda f: abs(featureToImportance[f]), reverse=True)
classes = [ featureToClass[f] for f in features ]
palette = [ classToColor[c] for c in classes ]
sns.set(font_scale=0.8, style='whitegrid') # smaller
ax = sns.barplot(x=var_importance_name, y="Feature", data=var_importance,
label=var_importance_name, palette=palette, order=features)
ax.set_xlabel(ax.get_xlabel(), fontsize=16)
ax.set_ylabel(ax.get_ylabel(), fontsize=16)
# Add custom legend
patches = [ mpatches.Patch(color=col, label=c) for c, col in classToColor.items() ]
plt.legend(handles=patches, fontsize=14)
# Output to file
plt.subplots_adjust(left=0.3, right=0.95, top=0.95)
plt.savefig('%s2.%s' % (args.output_prefix, args.extension))
plt.clf()
sns.set(font_scale=1, style='whitegrid')
###############################################################################
# FIGURE 3
###############################################################################
# Load the data and use nicer names
biomarker_nice_names = {
"PD-L1": "PD-L1 expression",
"missense_snv_count": "Missense SNV count",
"expressed_neoantigen_count": "Expressed neoantigen count",
"Predicted N Expanded Clones that were TILs A->B": "Predicted expanded TIL clones",
"N Expanded Clones that were TILs A->B": "Expanded TIL clones"
}
for item in biomarker_plot_items:
item['Progression-free survival'] = '> 6 months' if item['Benefit'] else '≤ 6 months'
item['Biomarker'] = biomarker_nice_names[item['Biomarker']]
if type(item['Biomarker value']) == type('') and item['Biomarker value'].startswith('IC'):
item['Biomarker value'] = int(item['Biomarker value'][2:])
biomarker_df = pd.DataFrame(biomarker_plot_items)
biomarker_df = biomarker_df.dropna(axis='rows', how='any')
biomarker_df = biomarker_df.loc[biomarker_df['Biomarker'] != 'Expanded TIL Clones']
# Plot with seaborn
ordered_biomarkers = ['Predicted expanded TIL clones', 'Missense SNV count', 'Expressed neoantigen count', 'PD-L1 expression']
g = sns.FacetGrid(biomarker_df, col="Biomarker", sharex=True, sharey=False,
col_wrap=2, col_order=ordered_biomarkers)
g = g.map(sns.boxplot, "Progression-free survival", "Biomarker value",
palette=sns.color_palette()[:2], width=0.4, order=['≤ 6 months', '> 6 months'],
autorange=True)
g = g.map(sns.swarmplot, "Progression-free survival", "Biomarker value", "Treated",
order=['≤ 6 months', '> 6 months'], hue_order=["Yes", "No"],
palette=[sns.color_palette()[2], "k"])
# Custom y-axis for PD-L1 expression plot
g.axes[-1].set_yticks((0, 1, 2))
g.axes[-1].set_yticklabels(('<1%', '1-5%', '≥5%'))
#g.axes[-1].legend(loc='upper center', bbox_to_anchor=(-0.25, -0.2), ncol=2)
# Prepend subfigure letter to titles
for biomarker, ax, letter in zip(ordered_biomarkers, g.axes, 'abcd'):
ax.set_title('(%s)' % letter)
ax.set_ylabel(biomarker)
# Show the plot
plt.subplots_adjust(bottom=0.12)
plt.savefig('%s3.%s' % (args.output_prefix, args.extension))
|
nilq/baby-python
|
python
|
def other_side_of_seven(num):
dist = 7 - num
num = 7 + (2 * dist)
return num
print(other_side_of_seven(4))
print(other_side_of_seven(12))
|
nilq/baby-python
|
python
|
import argparse
def parse():
parser = argparse.ArgumentParser()
parser.add_argument("URL", nargs='?')
parser.add_argument("-s", action="store_true")
parser.add_argument("-l", type=str)
parser.add_argument("-t", type=str)
args = parser.parse_args()
return args
|
nilq/baby-python
|
python
|
import torch
from torchtext import data
import numpy as np
import pandas as pd
import torch.nn as nn
import torch.nn.functional as F
import spacy
nlp = spacy.load('en')
SEED = 1
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
TEXT = data.Field(tokenize='spacy')
LABEL = data.LabelField(tensor_type=torch.FloatTensor)
print("loading dataset clean_Shoes300.tsv...")
train = data.TabularDataset.splits(
path='../counter-sent-generation3/VAE/data/official_Amazon/',
train='clean_Shoes300.tsv',
format='tsv',
fields=[('Text', TEXT),('Label', LABEL)])[0]
TEXT.build_vocab(train, max_size=60000, vectors="fasttext.en.300d",min_freq=1)
LABEL.build_vocab(train)
LABEL.vocab.stoi['1']=1
LABEL.vocab.stoi['2']=2
LABEL.vocab.stoi['3']=3
LABEL.vocab.stoi['4']=4
LABEL.vocab.stoi['5']=5
class RNN(nn.Module):
def __init__(self, vocab_size, embedding_dim, hidden_dim, output_dim, n_layers, bidirectional, dropout):
super().__init__()
self.embedding = nn.Embedding(vocab_size, embedding_dim)
self.rnn = nn.LSTM(embedding_dim, hidden_dim, num_layers=n_layers, bidirectional=bidirectional, dropout=dropout)
self.fc = nn.Linear(hidden_dim*2, output_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
#x = [sent len, batch size]
embedded = self.dropout(self.embedding(x))
#print("embedded shape: ", embedded.shape)
#embedded = [sent len, batch size, emb dim]
output, (hidden, cell) = self.rnn(embedded)
#print("output.shape: ",output.shape)
#print("output[-1].shape: ",output[-1].shape)
#print("hidden.shape: ",hidden.shape)
#print("cell.shape: ",cell.shape)
#output = [sent len, batch size, hid dim * num directions]
#hidden = [num layers * num directions, batch size, hid. dim]
#cell = [num layers * num directions, batch size, hid. dim]
hidden = self.dropout(torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim=1))
#print("hidden.shape: ",hidden.shape)
y = self.fc(hidden.squeeze(0))
#hidden [batch size, hid. dim * num directions]
#return self.fc(hidden.squeeze(0))
return y
INPUT_DIM = len(TEXT.vocab)
EMBEDDING_DIM = 300
HIDDEN_DIM = 500
OUTPUT_DIM = 1
N_LAYERS = 2
BIDIRECTIONAL = True
DROPOUT = 0.5
model = RNN(INPUT_DIM, EMBEDDING_DIM, HIDDEN_DIM, OUTPUT_DIM, N_LAYERS, BIDIRECTIONAL, DROPOUT)
print("model parameters: ")
print(model.parameters)
pretrained_embeddings = TEXT.vocab.vectors
model.embedding.weight.data.copy_(pretrained_embeddings)
import torch.optim as optim
optimizer = optim.Adam(model.parameters(),lr=0.0003)
criterion = nn.MSELoss()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#device=torch.device('cpu')
model = model.to(device)
criterion = criterion.to(device)
import torch.nn.functional as F
def accuracy(preds,y):
rounded_preds = torch.round(preds)
correct = (rounded_preds==y).float()
acc = correct.sum()/len(correct)
return acc
def train(model, iterator, optimizer, criterion):
epoch_loss = 0
epoch_acc = 0
model.train() # turns on dropout and batch normalization and allow gradient update
i=0
for batch in iterator:
i=i+1
optimizer.zero_grad() # set accumulated gradient to 0 for every start of a batch
predictions = model(batch.Text).squeeze(1)
loss = criterion(predictions, batch.Label)
acc = accuracy(predictions, batch.Label)
loss.backward() # calculate gradient
optimizer.step() # update parameters
if i%200==0:
print("train batch loss: ", loss.item())
print("train accuracy: ", acc.item())
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
def evaluate(model, iterator, criterion):
epoch_loss = 0
epoch_acc = 0
model.eval() #turns off dropout and batch normalization
with torch.no_grad():
i=0
for batch in iterator:
i=i+1
predictions = model(batch.Text).squeeze(1)
loss = criterion(predictions, batch.Label)
acc = accuracy(predictions, batch.Label)
epoch_loss += loss.item()
epoch_acc += acc.item()
if i%200 ==0:
print("eval batch loss: ", loss.item())
print("eval accuracy: ", acc.item())
return epoch_loss / len(iterator), epoch_acc / len(iterator)
####################
# prediction
####################
print('loading model:')
model = torch.load('Amazon/Shoes_classifier',map_location=lambda storage,loc:storage)
model = model.to(device)
print("prediction of Shoes_classifier.....")
import spacy
nlp = spacy.load('en')
def predict_sentiment(sentence,model,TEXT):
tokenized = [tok.text for tok in nlp.tokenizer(sentence)]
indexed = [TEXT.vocab.stoi[t] for t in tokenized]
tensor = torch.LongTensor(indexed).to(device)
tensor = tensor.unsqueeze(1)
model.eval()
prediction = model(tensor)
return prediction.item()
with open('../counter-sent-generation3/VAE/data/official_Amazon/clean_Beauty300test.tsv') as f:
Beauty = f.readlines()
with open('../counter-sent-generation3/VAE/data/official_Amazon/clean_Apparel300test.tsv') as f:
Apparel = f.readlines()
with open('../counter-sent-generation3/VAE/data/official_Amazon/clean_Jewelry300test.tsv') as f:
Jewelry = f.readlines()
with open('../counter-sent-generation3/VAE/data/official_Amazon/clean_Shoes300test.tsv') as f:
Shoes = f.readlines()
with open('Amazon/Shoes_pre_Beautytest.txt','w') as f:
for line in Beauty:
try:
text = line.split('\t')[0]
label = line.split('\t')[1]
score = predict_sentiment(text,model,TEXT)
f.write(str(score)+'\t'+label)
except:
f.write(label.strip('\n')+'\t'+label)
print('finish writing Shoes_pre_Beautytest.txt')
with open('Amazon/Shoes_pre_Appareltest.txt','w') as f:
for line in Apparel:
try:
text = line.split('\t')[0]
label = line.split('\t')[1]
score = predict_sentiment(text,model,TEXT)
f.write(str(score)+'\t'+label)
except:
f.write(label.strip('\n')+'\t'+label)
print('finish writing Shoes_pre_Appareltest.txt')
with open('Amazon/Shoes_pre_Jewelrytest.txt','w') as f:
for line in Jewelry:
try:
text = line.split('\t')[0]
label = line.split('\t')[1]
score = predict_sentiment(text,model,TEXT)
f.write(str(score)+'\t'+label)
except:
f.write(label.strip('\n')+'\t'+label)
print('finish writing Shoes_pre_Jewelrytest.txt')
with open('Amazon/Shoes_pre_Shoestest.txt','w') as f:
for line in Shoes:
try:
text = line.split('\t')[0]
label = line.split('\t')[1]
score = predict_sentiment(text,model,TEXT)
f.write(str(score)+'\t'+label)
except:
f.write(label.strip('\n')+'\t'+label)
print('finish writing Shoes_pre_Shoestest.txt')
|
nilq/baby-python
|
python
|
from django.contrib.auth.models import User
from rest_framework import serializers
from .models import Rating, Site, Picture, Like, Category
class CategorySerialier(serializers.ModelSerializer):
class Meta:
model = Category
fields = ('id', 'description', 'icon')
class RatingSerializer(serializers.ModelSerializer):
class Meta:
model = Rating
fields = ('id', 'user', 'site')
class SiteSerializer(serializers.ModelSerializer):
icon = serializers.ReadOnlyField(source='get_category_icon', read_only=True)
class Meta:
model = Site
fields = ('id', 'description', 'detail', 'latitude', 'longitude',
'tags', 'category', 'creator_by', 'icon')
class PictureSerializer(serializers.ModelSerializer):
class Meta:
model = Picture
fields = ('picture', 'site')
class LikeSerializer(serializers.ModelSerializer):
class Meta:
model = Like
fields = ('user', 'site')
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('password', 'first_name', 'last_name', 'email', 'username')
write_only_fields = ('password',)
read_only_fields = ('is_staff', 'is_superuser', 'is_active', 'date_joined',)
def create(self, validated_data):
user = User(first_name=validated_data['first_name'], last_name=validated_data['last_name'],
username=validated_data['username'])
user.set_password(validated_data['password'])
user.save()
return user
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import os
import time
import re
import sys
import hashlib
import shutil
import pathlib
import traceback
import subprocess
import pandas
import argparse
from ...shared import shared_tools as st
import lxml.etree as et
from pathlib import Path
from datetime import datetime
from joblib import Parallel, delayed
#
# Main running function
#
def check_ready_files(paths_data, config_data, cohort_data, arg):
sor_string = paths_data[0]
des_string = paths_data[1]
comp_string = paths_data[2]
paths_proj = paths_data[3]
#
# loop for each directory in path input document
#
directory_waiting = []
directory_roots = []
for i1 in cohort_data[0]:
try:
cohort_data_index = cohort_data[0].index(i1)
config_data_index = config_data[0].index(i1)
paths_data_index = paths_data[3].index(i1)
except ValueError:
continue
specimen_path = sor_string[paths_data_index] + '/Specimen_Table.xlsx'
#
if not os.path.exists(sor_string[paths_data_index]) \
or not os.path.exists(des_string[paths_data_index]) \
or not os.path.exists(comp_string[paths_data_index]) \
or not os.path.exists(specimen_path):
continue
#
# build directory_waiting data. Uses project # from paths and
# finds coresponding project # in config and cohort to determine
# relevant cohort #, space allocation, and delete protocol
#
delete_i = config_data[1][config_data_index]
space_i = config_data[2][config_data_index]
cohort_i = cohort_data[1][cohort_data_index]
#
specimen_table = pandas.read_excel(specimen_path, engine='openpyxl')
st_slide_ids = specimen_table['Patient #'].tolist()
st_batch_ids = specimen_table['Batch ID'].tolist()
#
total_size = 0
astro_ids = get_astro_id(des_string[paths_data_index], paths_proj[paths_data_index])
if not astro_ids[0]:
continue
#
# get the paths with batchIDs and not in directory waiting
#
for root, dirs, files in os.walk(sor_string[paths_data_index], topdown=False):
if "Scan" in root and os.path.exists(root + "/BatchID.txt") and \
root not in directory_roots:
#
regex = '/|\\\\'
root = '/'.join(re.split(regex, root))
slide_id = str(root.split('/')[-2])
#
# SlideID is not in AstroID_def
#
if slide_id in astro_ids[1]:
astro_id = astro_ids[0][astro_ids[1].index(slide_id)]
string_list = [i1, cohort_i, astro_id]
elif "Control" in slide_id:
astro_id = slide_id
string_list = [i1, cohort_i, '_'.join(slide_id.split('_')[:-1])]
else:
continue
#
log_base = ';'.join(string_list)
#
file = open(root + "/BatchID.txt", 'r')
batch_id = str(file.read())
if slide_id in st_slide_ids:
st_batch_id = str(st_batch_ids[st_slide_ids.index(slide_id)])
else:
st_batch_id = ''
if batch_id != st_batch_id and "Control" not in slide_id:
log_string = log_base + ";ERROR: BatchID.txt does not match " \
"BatchID in Specimen Table. Skipping transfer"
st.print_to_log(log_string, des_string[paths_data_index], arg.v, arg.q, astro_id, "master")
continue
#
row = [comp_string[paths_data_index], des_string[paths_data_index], delete_i, root,
i1, space_i, cohort_i, astro_id, log_base]
#
# If the directory to be transferred is larger than the space avaliable
# in the destination directory as given by AstropathConfig.csv, then
# the directory is not transferred. Updated for each new specimen
#
total_size = total_size + st.get_size(root)
if total_size > space_i * 10 ** 12:
log_string = log_base + ";ERROR: Insufficient space. Skipping transfer"
st.print_to_log(log_string, des_string[paths_data_index], arg.v, arg.q, astro_id, "master")
continue
#
# If automatic has been chosen the DoNotDelete texts files are ignored
# and deleted. Otherwise only add directory waiting queue if the text
# file does not exist
#
dnd_path = root + "/DoNotDelete.txt"
delete_i = delete_i.upper()
if arg.delete_type == "automatic":
if delete_i == "YES" and os.path.exists(dnd_path):
os.remove(dnd_path)
else:
temp_des = os.path.join(des_string[paths_data_index], root.split('/')[-2])
if os.path.exists(dnd_path) and os.path.exists(temp_des):
continue
#
directory_waiting.append(row)
directory_roots.append(root)
return directory_waiting
#
# transfer the directories from directory_waiting
#
def transfer_loop(directory_waiting, arg, zip_path):
#
for direct in directory_waiting:
transfer_one_sample(direct, arg, zip_path)
#
# log file is to be saved in each specimen folder. To avoid conflicts with
# md5 checks, log file will be moved after all transfer processes have finished
#
#
# Needed to manage the log files if the destination needs to be deleted.
# Want to keep what has been logged in before
#
def merge_logs(des, log_path):
new_log = des + '/transfer.log'
prev_log = open(log_path + '/transfer.log', 'a+')
with open(new_log) as f:
for line in f:
prev_log.write(line)
prev_log.close()
#
# transfer a single sample and delete based off of corresponding settings
#
def transfer_one_sample(direct, arg, zip_path):
astro_id = direct[7]
log_base = direct[8]
des_string = direct[1]
#
# set up transfer strings
#
current_sor_string = direct[3]
current_des_string = des_string + "/" + astro_id + "/im3" + "/" \
+ str(direct[3].split('/')[-1])
current_compress_string = direct[0] + "/" + astro_id + "/im3" + "/" \
+ str(direct[3].split('/')[-1])
if arg.delete_type == 'manual':
del_string = 'YES'
else:
del_string = direct[2]
#
# If the destination path already exists, compare the files and determine if
# an error occurred in the transfer process. If it did then delete destination
# and compression then continue with this sample. If no error occurred then
# remove the source directory and continue to the next sample.
#
if os.path.exists(current_des_string):
compare = compare_file_names(current_sor_string, direct[1], current_des_string,
current_compress_string, del_string, arg,
log_base=log_base, astro_id=astro_id)
if compare == 1:
return
#
# transfer process
#
i2 = 2
while i2 == 2:
err, result, result2 = error_check("TRANSFER", direct[1], arg, current_sor_string,
current_des_string, astro=astro_id,
log_base=log_base)
if err:
return
if not arg.no_compress:
err, result, result2 = error_check("COMPRESS", direct[1], arg, current_sor_string,
current_des_string, current_compress_string,
log_base=log_base, astro=astro_id, zip_path=zip_path)
if err:
return
log_string = log_base + ";Compression finished"
st.print_to_log(log_string, direct[1], arg.v, arg.q, astro_id, "master")
#
# MD5 calculation and file comparison
#
err = compare_file_names(current_sor_string, direct[1], current_des_string,
current_compress_string, del_string, arg, 1,
log_base, astro_id=astro_id)
if err == 2:
continue
elif err == 1:
return
else:
break
#
# Get the AstroIDs from U/P folder. If no file exists, push a warning and move to
# next specimen
#
def get_astro_id(des_dir, proj):
astro_id_csv = str(des_dir) + '/upkeep_and_progress/AstropathAPIDdef_' + \
str(proj) + '.csv'
if not os.path.exists(astro_id_csv):
return ['', '']
lines = st.read_csv(astro_id_csv)
astro_ids = [i.split(',')[0] for i in lines[1:]]
slide_ids = [i.split(',')[1] for i in lines[1:]]
return [astro_ids, slide_ids]
#
# Resolve directory already in the destination directory
# Returning 1 means that the source directory was deleted and the program should
# continue to next directory. Returning 2 means that the transfer process should
# be re-initiated
#
def compare_file_names(current_sor_string, main_des_string, current_des_string,
current_compress_string, del_string, arg, post_transfer=0,
log_base="", astro_id=""):
slide_id = str(current_sor_string.split('/')[-2])
#
annotation_file = astro_id + "_" + current_sor_string.split('/')[-1] \
+ "_annotations.xml"
#
if post_transfer == 0:
log_string = log_base + ";Slide ID is in source and destination on source directory recheck comparing files"
st.print_to_log(log_string, main_des_string, arg.v, arg.q, astro_id)
#
# Compute the number of files in each directory
# Only compare those files which are transferred from source
#
hash_path = [current_des_string, current_sor_string]
file_array = []
for x in [0, 1]:
x1 = []
p = hash_path[x]
for root, dirs, files in os.walk(p):
x1 += files
if 'CheckSums.txt' in x1:
x1.remove('CheckSums.txt')
if 'transfer.log' in x1:
x1.remove('transfer.log')
if annotation_file in x1 and slide_id != astro_id:
x1.remove(annotation_file)
file_array.append(x1)
#
if len(file_array[1]) < len(file_array[0]):
if post_transfer == 1:
log_string = log_base + ";Source lost files after transfer"
st.print_to_log(log_string, main_des_string, arg.v, arg.q, astro_id, "master")
log_string = log_base + ";Error sent. Next slide"
st.print_to_log(log_string, main_des_string, arg.v, arg.q, astro_id)
mail_string = "Source directory has less files after transfer."
st.send_email(arg.email, mail_string, debug=arg.d)
#
return 1
#
# delete source path if there are missing files in it
#
log_string = log_base + ";Source directory missing files"
st.print_to_log(log_string, main_des_string, arg.v, arg.q, astro_id)
#
if not os.path.exists(current_sor_string + "/DoNotDelete.txt") \
and del_string == "YES":
to_delete = str(Path(current_sor_string).parents[0])
shutil.rmtree(to_delete, ignore_errors=True)
#
log_string = log_base + ";Deleted source directory"
st.print_to_log(log_string, main_des_string, arg.v, arg.q, astro_id)
#
# return 1 to continue to next specimen
#
return 1
#
elif len(file_array[1]) > len(file_array[0]):
#
# delete destination path if there are missing files in it
#
log_string = log_base + ";Destination directory missing files"
st.print_to_log(log_string, main_des_string, arg.v, arg.q, astro_id)
#
delete_destination(main_des_string, current_des_string, current_compress_string,
log_base, astro_id, arg)
#
return 2
#
# For the situation where the number of files is equal between the source
# directory and the destination directory, the following compares the hash values
# of each file.
#
elif len(file_array[1]) == len(file_array[0]):
#
# compute the new hash files
#
#
location_string = ['DEST', 'SOURCE']
hash_list = []
#
for x in [0, 1]:
#
# if old check sum file exists delete it
#
c_hash_path = hash_path[x] + '/CheckSums.txt'
if os.path.exists(c_hash_path):
os.remove(c_hash_path)
#
# compute hash values and store them in the array hash_array
#
error_string = "COMPUTE " + location_string[x] + " MD5"
err, hash_value, sums_value = error_check(error_string, main_des_string, arg,
current_sor_string, current_des_string,
log_base=log_base, astro=astro_id)
if err:
return err
hash_list.append(hash_value)
#
log_string = log_base + ";MD5 calculations finished"
st.print_to_log(log_string, main_des_string, arg.v, arg.q, astro_id)
#
# If all the values in the arrays match between source and destination files,
# the data was transferred successfully and the source directory should be
# removed according to the user input.
#
if not list(set(hash_list[0]) - set(hash_list[1])):
#
# if do not delete text file does not exist and protocol allows deletion
# then delete the source directory
#
if not os.path.exists(current_sor_string + "/DoNotDelete.txt") \
and del_string == "YES":
log_string = log_base + ";Source and destination match"
st.print_to_log(log_string, main_des_string, arg.v, arg.q, astro_id)
to_delete = str(Path(current_sor_string).parents[0])
shutil.rmtree(to_delete, ignore_errors=True)
log_string = log_base + ";Deleted source directory"
st.print_to_log(log_string, main_des_string, arg.v, arg.q, astro_id)
#
elif not os.path.exists(current_sor_string + "/DoNotDelete.txt") \
or not os.path.exists(current_des_string + "/DoNotDelete.txt"):
st.print_to_log(log_base + ";Source and destination match", main_des_string, arg.v, arg.q, astro_id)
create_delete_txt(current_sor_string)
create_delete_txt(current_des_string)
st.print_to_log(log_base + ";Created DoNotDelete file", main_des_string, arg.v, arg.q, astro_id)
st.print_to_log(log_base + ";Processing finished", main_des_string, arg.v, arg.q, astro_id)
#
# return 1 to continue to next specimen if we are before the transfer
#
if post_transfer == 1:
return 0
else:
return 1
#
# If a given file has different hash values, something was corrupted
# in the data during transfer and the process is re-initiated.
#
else:
log_string = log_base + ";Destination and source inconsistency"
st.print_to_log(log_string, main_des_string, arg.v, arg.q, astro_id)
delete_destination(main_des_string, current_des_string, current_compress_string,
log_base, astro_id, arg)
return 0
#
# function that deletes the destination when necessary
#
def delete_destination(main_des_string, current_des_string, current_compress_string,
log_base, astro_id, arg):
to_delete = str(Path(current_des_string).parents[0])
shutil.rmtree(to_delete, ignore_errors=True)
#
log_string = log_base + ";Deleted destination directory"
st.print_to_log(log_string, main_des_string, arg.v, arg.q, astro_id)
#
# also delete the compressed path if it exists
#
if os.path.exists(current_compress_string):
to_delete = str(Path(current_compress_string).parents[0])
shutil.rmtree(to_delete, ignore_errors=True)
log_string = log_base + ";Deleted compression directory"
st.print_to_log(log_string, main_des_string, arg.v, arg.q, astro_id)
#
log_string = log_base + ";Re-initiating transfer process."
st.print_to_log(log_string, main_des_string, arg.v, arg.q, astro_id)
#
# evaluate the functions while checking for errors
#
def error_check(action, main_des_string, arg, current_sor_string="", current_des_string="",
comp="", astro="", log_base="", zip_path=""):
slide_id = str(current_sor_string.split('/')[-2])
attempts = 2
err = 0
warning = ""
mins = 0.1
result = []
result2 = []
while err < attempts:
try:
if action == "TRANSFER":
transfer_directory(current_sor_string, main_des_string, current_des_string,
astro, arg, log_base=log_base)
st.print_to_log(log_base + ";Transfer finished", main_des_string, astro, arg, "master")
elif action == "COMPUTE SOURCE MD5":
result, result2 = compute_md5(current_sor_string, main_des_string, "SOURCE", arg,
log_base=log_base, slide_id=slide_id,
astro_id=astro)
elif action == "COMPUTE DEST MD5":
result, result2 = compute_md5(current_des_string, main_des_string,
"DESTINATION", arg, log_base=log_base,
slide_id=slide_id, astro_id=astro)
elif action == "COMPRESS":
compress_directory(current_sor_string, main_des_string, comp, astro, arg,
zip_path, log_base=log_base)
elif action == "ANNOTATE":
xmlfile = main_des_string + '/' + astro + '/im3/' \
+ current_sor_string.split('/')[-1] \
+ '/' + astro + '_' + current_sor_string.split('/')[-1] \
+ '_annotations.xml'
warning = annotation_handler(xmlfile, str(current_sor_string.split('/')[-2]), astro)
if err > 0:
log_string = log_base + ";Warning: " + action.lower() + " passed with " + \
str(err) + " error(s)"
st.print_to_log(log_string, main_des_string, astro, arg, "master")
if warning:
log_string = log_base + warning
st.print_to_log(log_string, main_des_string, astro, arg)
err = attempts
except OSError:
#
# increase count and check if it is greater than number of allowed attempts
#
err = err + 1
#
# send error message to log
#
error_msg = traceback.format_exc().splitlines()[-1].split(':')[0]
descriptor = traceback.format_exc().splitlines()[-1].split(']')[-1]
log_string = log_base + ";WARNING: attempt " + str(err) + " failed for " \
+ action.lower()
st.print_to_log(log_string, main_des_string, astro, arg)
if err < attempts:
#
# if we have not met the allowed count wait <mins> minutes and try again
#
log_string = log_base + ";Attempting to " + action.lower() \
+ " again after " + str(mins) + " minutes"
st.print_to_log(log_string, main_des_string, astro, arg)
time.sleep(mins * 60)
continue
#
else:
#
# if we have met the allowed count something else must be wrong.
# Email, return positive err value
#
log_string = log_base + ";ERROR: " + error_msg + descriptor
st.print_to_log(log_string, main_des_string, astro, arg, "master")
error = traceback.format_exc()
st.send_email(arg.email, error, err=err, error_check_dec=True, debug=arg.d)
err = 1
return err, result, result2
except et.ParseError as what:
#
# Annotation handler error catch if parsing error occurs
#
err = err + 1
error_msg = traceback.format_exc().splitlines()[-1].split(':')[0]
log_string = log_base + ";ERROR: " + error_msg + " - " + str(what)
st.print_to_log(log_string, main_des_string, astro, arg, "master")
error = traceback.format_exc()
st.send_email(arg.email, error, err=err, error_check_dec=True, debug=arg.d)
return err, result, result2
err = 0
return err, result, result2
#
# Generates Hash Values and CheckSums.txt files
#
def compute_md5(current_directory, main_des_string, location_string, arg, log_base="",
slide_id="", astro_id=""):
#
# print starting strings to log
#
log_string = log_base + ";MD5 computation started"
st.print_to_log(log_string, main_des_string, astro_id, arg)
log_string = log_base + ";Computing " + location_string.lower() + " MD5 check sums"
st.print_to_log(log_string, main_des_string, astro_id, arg)
#
# create the md5 hash values in parallel for each file in the current directory
# put the strings for the check sums file and the hash values into separate arrays
# Only compare those files that were transferred
#
start = time.time()
num = 0
sums_array = []
hash_array = []
for root, dirs, files in os.walk(current_directory):
results = Parallel(n_jobs=4, backend="loky")(
delayed(md5)(root + '/' + file_1) for file_1 in files)
for item in results:
if ("annotations.xml" in item[0] and "xml.lock" not in item[0].lower()
and location_string != "SOURCE" and astro_id != slide_id) \
or "transfer.log" in item[0]:
continue
num += 1
sums_array.append(item[0])
hash_array.append(item[1])
#
# write sums to checksum file
#
sums_file = open(current_directory + "/CheckSums.txt", "w")
sums_file.writelines(["%s\n" % item for item in sums_array])
sums_file.close()
#
end = time.time()
log_string = log_base + ";Completed " + str(num) + " files in " \
+ str(round(end - start, 2)) + " seconds"
st.print_to_log(log_string, main_des_string, astro_id, arg)
#
return hash_array, sums_array
#
# Outputs CheckSums.txt lines and Hash values as 2 object array
#
def md5(item):
hash_md5 = hashlib.md5()
with open(item, "rb") as f:
for chunk in iter(lambda: f.read(104857600), b""):
hash_md5.update(chunk)
return [item + '\t' + hash_md5.hexdigest(), hash_md5.hexdigest()]
#
# creates DoNotDelete.txt
#
def create_delete_txt(current_directory):
text_file = open(current_directory + "/DoNotDelete.txt", "w")
text_file.write("Do not delete me unless this folder is going to be removed.")
text_file.close()
#
# Performs TransferItem() on every file in the source directory
# For rename process, create a duplicate of the annotation file with "-original".
# This will have the astroID in the filename but nothing changed inside.
# The other version will have the .im3 portion inside changed to the AstroID
#
def transfer_directory(current_sor_string, main_des_string, current_des_string,
astro_id, arg, log_base=""):
slide_id = str(current_sor_string.split('/')[-2])
#
# get the number of files and bytes in the source directory
#
n_sor_files, n_sor_bytes = 0, 0
M_files = []
all_files = []
#
# Check for duplicate files. Remove all but latest version of each duplicate
#
for root, dirs, files in os.walk(current_sor_string):
for f in sorted(files):
if ".im3" in f:
all_files.append(f)
if "]_M" in f:
M_files.append(f)
if M_files:
st.print_to_log(log_base + ";Duplicate files found", main_des_string, astro_id, arg)
st.M_file_handler(current_sor_string, all_files, M_files)
st.print_to_log(log_base + ";Duplicate files handled", main_des_string, astro_id, arg)
#
names = [""] * 2
names[1] = astro_id
for root, dirs, files in os.walk(current_sor_string):
if not names[0]:
names[0] = str(root.split('/')[-2])
for f in files:
f = os.path.join(root, f)
n_sor_bytes += os.path.getsize(f)
n_sor_files += len(files)
#
log_string = log_base + ";Transfer process started"
st.print_to_log(log_string, main_des_string, astro_id, arg, "master")
log_string = (log_base + ";Source Contains " + str(n_sor_files) +
" File(s) " + str(n_sor_bytes) + " bytes")
st.print_to_log(log_string, main_des_string, astro_id, arg)
#
pathlib.Path(current_des_string).mkdir(parents=True, exist_ok=True)
#
for item in os.listdir(current_sor_string):
transfer_item(item, current_sor_string, current_des_string, names)
#
# get files and bytes from destination directory
#
n_des_files, n_des_bytes = 0, 0
#
for root, dirs, files in os.walk(current_des_string):
for f in files:
f = os.path.join(root, f)
n_des_bytes += os.path.getsize(f)
n_des_files += len(files)
#
# Once transfer process is finished, duplicate and edit annotations
# folder to match new naming convention
#
if slide_id != astro_id:
error_check("ANNOTATE", main_des_string, arg, current_sor_string, astro=astro_id,
log_base=log_base)
log_string = (log_base + ";Transferred " + str(n_des_files) +
" File(s) " + str(n_des_bytes) + " bytes")
st.print_to_log(log_string, main_des_string, astro_id, arg)
#
# Duplicates existing annotations file and edits the version not labeled "-original"
# to match the new naming convention
#
def annotation_handler(xmlfile, slide_id, astro_id):
if not os.path.exists(xmlfile):
return ";WARNING: " + xmlfile + " does not exist"
newfile = xmlfile.replace('.xml', '-original.xml')
shutil.copy(xmlfile, newfile)
with open(xmlfile, 'rb+') as f:
tree = et.parse(f)
root = tree.getroot()
for elem in root.getiterator():
if elem.text:
elem.text = elem.text.replace(slide_id, astro_id)
if elem.tail:
elem.tail = elem.tail.replace(slide_id, astro_id)
f.seek(0)
f.write(et.tostring(tree, encoding='UTF-8', xml_declaration=True))
f.truncate()
return ""
#
# Transfers an individual file from source to directory
#
def transfer_item(item, src, dst, names):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
if not os.path.exists(d):
pathlib.Path(d).mkdir(parents=True, exist_ok=True)
Parallel(n_jobs=4, backend="loky")(
delayed(transfer_one)(item, s, d, names)
for item in os.listdir(s))
else:
transfer_one(item, src, dst, names)
#
# transfers items and changes SlideID in filenames to AstroID
#
def transfer_one(item, src, dst, names):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if names[0] in d:
d = d.replace(names[0], names[1])
with open(s, 'rb') as f_src:
with open(d, 'wb') as f_dst:
shutil.copyfileobj(f_src, f_dst, length=16 * 1024 * 1024)
shutil.copystat(s, d)
#
# Runs Compress() on each file in the working directory
#
def compress_directory(current_sor_string, main_des_string, compress, astro_id, arg,
zip_path, log_base=""):
#
# get the number of files and bytes in the source directory
#
n_sor_files, n_sor_bytes = 0, 0
xmlfile = astro_id + '_' + current_sor_string.split('/')[-1] \
+ '_annotations.xml'
xml_list = [xmlfile, xmlfile.replace('.xml', '-original.xml')]
#
names = [""] * 2
names[1] = astro_id
for root, dirs, files in os.walk(current_sor_string):
if not names[0]:
names[0] = str(root.split('/')[-2])
for f in files:
if "annotations.xml.lock" in f.lower() or "annotations.xml" not in f \
or names[0] == names[1]:
f = os.path.join(root, f)
n_sor_bytes += os.path.getsize(f)
n_sor_files += len(files)
n_sor_files = n_sor_files
#
log_string = log_base + ";Compression started"
st.print_to_log(log_string, main_des_string, astro_id, arg, "master")
#
# do the compression one file at a time
#
if not os.path.exists(compress):
pathlib.Path(compress).mkdir(parents=True, exist_ok=True)
#
for item in os.listdir(current_sor_string):
compress_item(item, current_sor_string, compress, names, zip_path)
annotation_path = main_des_string + '/' + astro_id + '/im3/' \
+ current_sor_string.split('/')[-1] + '/'
for item in xml_list:
if os.path.exists(annotation_path + item) and names[0] != names[1]:
compress_item(item, annotation_path, compress, names, zip_path, ann=True)
#
# get files and bytes from destination directory
#
n_des_files, n_des_bytes = 0, 0
#
for root, dirs, files in os.walk(compress):
for f in files:
f = os.path.join(root, f)
n_des_bytes += os.path.getsize(f)
n_des_files += len(files)
#
log_string = (log_base + ";Compressing " + str(n_sor_files) +
" file(s) and " + str(n_sor_bytes) + " bytes from source")
st.print_to_log(log_string, main_des_string, astro_id, arg)
log_string = (log_base + ";Compressed " + str(n_des_files) +
" total file(s) " + str(n_des_bytes) + " total bytes")
st.print_to_log(log_string, main_des_string, astro_id, arg)
#
# Compresses individual files
#
def compress_item(item, sor, des, names, zip_path, ann=False):
s = os.path.join(sor, item)
d = os.path.join(des, item)
d = d.replace(names[0], names[1])
if os.path.isdir(s):
if not os.path.exists(d):
pathlib.Path(d).mkdir(parents=True, exist_ok=True)
Parallel(n_jobs=4, backend="loky")(
delayed(compress_item)(item, s, d, names, zip_path)
for item in os.listdir(s))
if (not ann or names[0] not in item) and not os.path.isdir(s):
subprocess.check_output([zip_path + '/7z.exe', 'a', d + ".7z", '-mx1', s])
elif names[0] == names[1] and not os.path.isdir(s):
subprocess.check_output([zip_path + '/7z.exe', 'a', d + ".7z", '-mx1', s])
elif "annotations.xml.lock" in item.lower() or "annotations.xml" not in item \
and not os.path.isdir(s):
pre_string = str(s.split('/')[-2]) + '/' + names[0]
post_string = str(s.split('/')[-2]) + '/' + names[1]
temp_s = s.replace(pre_string, post_string)
shutil.copy(s, temp_s)
subprocess.check_output([zip_path + '/7z.exe', 'a', d + ".7z", '-mx1', temp_s])
os.remove(temp_s)
#
# Reads source csv and takes directories
#
def update_source_csv(arg):
if arg.d:
leading = ''
else:
leading = '//'
path = arg.mpath + '/AstropathPaths.csv'
config = arg.mpath + '/AstropathConfig.csv'
cohort_csv = arg.mpath + '/AstropathCohortsProgress.csv'
#
# Catches and alerts user to which cource files weren't found
#
all_files = [path, config, cohort_csv]
a_exist = [f for f in all_files if os.path.isfile(f)]
a_non_exist = list(set(a_exist) ^ set(all_files))
if a_non_exist:
return [], a_non_exist, []
#
# open and read files
#
paths = st.read_csv(path)
configs = st.read_csv(config)
cohorts = st.read_csv(cohort_csv)
#
# get and return relevant strings
#
c_proj = [i.split(',')[0] for i in cohorts[1:]]
cohort = [i.split(',')[1] for i in cohorts[1:]]
#
d_proj = [i.split(',')[0] for i in configs[1:]]
delete = [i.split(',')[3] for i in configs[1:]]
space = [float(i.split(',')[4]) for i in configs[1:]]
#
proj = [i.split(',')[0] for i in paths[1:]]
dpath = [leading + i.split(',')[1] for i in paths[1:]]
dname = [i.split(',')[2] for i in paths[1:]]
spath = [leading + i.split(',')[3] for i in paths[1:]]
cpath = [leading + i.split(',')[4] for i in paths[1:]]
#
sor_string = [''] * len(dname)
des_string = [''] * len(dname)
comp_string = [''] * len(dname)
#
# Convert filepath format to something Jenkins can read
#
regex = '/|\\\\'
for i1 in range(0, len(dpath)):
sor_string[i1] = '/'.join(re.split(regex, spath[i1])) + '/' + dname[i1]
des_string[i1] = '/'.join(re.split(regex, dpath[i1])) + '/' + dname[i1]
comp_string[i1] = '/'.join(re.split(regex, cpath[i1])) + '/' + dname[i1]
#
paths_data = [sor_string, des_string, comp_string, proj]
config_data = [d_proj, delete, space]
cohort_data = [c_proj, cohort]
return paths_data, config_data, cohort_data
#
# Create and edit local transfer.log
# create a log folder and save master file in there
# <console> is a hard coded method of showing log entries
# pending user input method
#
def print_to_log(log_string, des_string, astro_id, arg, loc=""):
#
# Make a check for starting and ending lines for version number entries
#
pathlib.Path(des_string + '/' + astro_id + '/logfiles').mkdir(parents=True, exist_ok=True)
if loc == "master":
if not os.path.exists(des_string + '/logfiles'):
os.mkdir(des_string + '/logfiles')
logfile = open(des_string + '/logfiles' + r"\transfer.log", 'ab')
now = datetime.now()
str1 = "{0}-{1};{2}\r\n".format(log_string, arg.v, now.strftime("%Y-%m-%d %H:%M:%S"))
strb = bytes(str1, 'utf-8')
logfile.write(strb)
logfile.close()
logfile = open(des_string + '/' + astro_id + '/logfiles' + r"\transfer.log", 'ab')
now = datetime.now()
str1 = "{0}-{1};{2}\r\n".format(log_string, arg.v, now.strftime("%Y-%m-%d %H:%M:%S"))
strb = bytes(str1, 'utf-8')
logfile.write(strb)
logfile.close()
if not arg.q:
print(log_string)
def apid_argparser():
version = '0.01.0001'
parser = argparse.ArgumentParser(
prog="Daemon",
description='launches transfer for clincal specimen slides in the Astropath pipeline'
)
parser.add_argument('--version', action='version', version='%(prog)s ' + version)
parser.add_argument('mpath', type=str, nargs='?',
help='directory for astropath processing documents')
parser.add_argument('email', type=str, nargs='?',
help='defines person to email in case of errors')
parser.add_argument('delete_type', type=str, nargs='?',
choices=["hybrid", "automatic", "manual"],
default='hybrid',
help='sets delete type protocol defined in readme')
parser.add_argument('-no_compress', action='store_true',
help='do not compress transferred files')
parser.add_argument('-q', action='store_true',
help='runs the function quietly')
parser.add_argument('-v', type=str, nargs='?',
default=parser.prog + ' ' + version,
help='used for transmitting version to log')
parser.add_argument('-d', action='store_true',
help='runs debug mode')
args, unknown = parser.parse_known_args()
return args
#
# main function, reads in input arguments, opens source file, and begins the checking function
#
def launch_transfer():
#
# User input for the csv file path with all the transfer protocols.
#
print(sys.argv)
arg = apid_argparser()
if not arg.mpath:
print("No mpath")
if not arg.email:
print("No email")
# cwd = '/'.join(os.getcwd().replace('\\', '/').split('/')[:-1])
# print(cwd)
# for root, dirs, files in os.walk(cwd, topdown=True):
# if "shared_tools" in dirs:
# os.chdir(root)
# break
# cwd = '/'.join(os.getcwd().replace('\\', '/').split('/'))
# print(cwd)
#
# run the file checking and transfer algorithms in an infinite loop
#
print("Starting Server Demon for Clinical Specimen...")
try:
for ii in range(3):
paths_data, config_data, cohort_data = update_source_csv(arg)
if not paths_data:
sys.exit()
cwd = '/'.join(os.getcwd().replace('\\', '/').split('/')[:-1])
print(cwd)
zip_path = ""
for root, dirs, files in os.walk(cwd, topdown=False):
if "7-Zip" in dirs:
zip_path = os.path.join(root, "7-Zip")
break
if not zip_path and not arg.no_compress:
sys.exit()
directory_waiting = check_ready_files(paths_data, config_data, cohort_data, arg)
print("DIRECTORIES CHECKED. FOUND " + str(len(directory_waiting)) +
" POTENTIAL SAMPLES TO TRANSFER...")
transfer_loop(directory_waiting, arg, zip_path)
minutes = 0.1
print("ALL DIRECTORIES CHECKED SLEEP FOR " + str(minutes) + " MINUTES...")
wait_time = 60 * minutes
time.sleep(wait_time)
print("RECHECKING TRANSFER DIRECTORY")
except OSError:
if arg.d:
return
error = traceback.format_exc()
st.send_email(arg.email, error, debug=arg.d)
traceback.print_exc()
except SystemExit:
if arg.d:
return
error = "ERROR: Missing source csv files.\n"
for file in config_data:
error = error + file + '\n'
st.send_email(arg.email, error, debug=arg.d)
traceback.print_exc()
#
# call the function
#
if __name__ == "__main__":
launch_transfer()
|
nilq/baby-python
|
python
|
# Generated by Django 1.10.5 on 2017-02-15 08:48
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('admin', '0008_domain_enable_dns_checks'),
]
operations = [
migrations.RenameField(
model_name='domain',
old_name='quota',
new_name='default_mailbox_quota',
),
]
|
nilq/baby-python
|
python
|
#assigning the int 100 to "cars"
cars = 100
#assiging the int 4.0 to "space_in_a_car
space_in_a_car = 4.0
#assigning drivers to the int 30
drivers = 30
#assigning passangers to the int 90
passengers = 90
#assigning cars_not_driver to (cars-drivers)
cars_not_driven = cars - drivers
#assigning cars_driven to drivers
cars_driven = drivers
#assigning carpool_capacity to (cars_driven * space_in_a_car)
carpool_capacity = cars_driven * space_in_a_car
#assiging the average of passengers per car to passanegers divided by cars driven
average_passengers_per_car = passengers / cars_driven
#printing results.
print "The are ", cars, "cars available."
print "There are only ", drivers, "drivers available."
print "There will be ", cars_not_driven, "empty cars today."
print "We can transport ", carpool_capacity, "people today."
print "We have", passengers, "to carpool today."
print "We need to put about ", average_passengers_per_car, "in each car."
|
nilq/baby-python
|
python
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import os
import random
import cv2
import numpy as np
import oneflow as flow
import oneflow.typing as oft
def _random_sample_images(anno_file, image_dir, batch_size):
from pycocotools.coco import COCO
image_files = []
image_ids = []
batch_group_id = -1
coco = COCO(anno_file)
img_ids = coco.getImgIds()
while len(image_files) < batch_size:
rand_img_id = random.choice(img_ids)
img_h = coco.imgs[rand_img_id]["height"]
img_w = coco.imgs[rand_img_id]["width"]
group_id = int(img_h / img_w)
if batch_group_id == -1:
batch_group_id = group_id
if group_id != batch_group_id:
continue
anno_ids = coco.getAnnIds(imgIds=[rand_img_id])
if len(anno_ids) == 0:
continue
image_files.append(os.path.join(image_dir, coco.imgs[rand_img_id]["file_name"]))
image_ids.append(rand_img_id)
assert len(image_files) == len(image_ids)
images = [cv2.imread(image_file).astype(np.single) for image_file in image_files]
bbox_list = _get_images_bbox_list(coco, image_ids)
return images, bbox_list
def _get_images_bbox_list(coco, image_ids):
bbox_list = []
for img_id in image_ids:
anno_ids = coco.getAnnIds(imgIds=[img_id])
anno_ids = list(
filter(lambda anno_id: coco.anns[anno_id]["iscrowd"] == 0, anno_ids)
)
bbox_array = np.array(
[coco.anns[anno_id]["bbox"] for anno_id in anno_ids], dtype=np.single
)
bbox_list.append(bbox_array)
return bbox_list
def _get_images_static_shape(images):
image_shapes = [image.shape for image in images]
image_static_shape = np.amax(image_shapes, axis=0)
assert isinstance(
image_static_shape, np.ndarray
), "image_shapes: {}, image_static_shape: {}".format(
str(image_shapes), str(image_static_shape)
)
image_static_shape = image_static_shape.tolist()
image_static_shape.insert(0, len(image_shapes))
return image_static_shape
def _get_bbox_static_shape(bbox_list):
bbox_shapes = [bbox.shape for bbox in bbox_list]
bbox_static_shape = np.amax(bbox_shapes, axis=0)
assert isinstance(
bbox_static_shape, np.ndarray
), "bbox_shapes: {}, bbox_static_shape: {}".format(
str(bbox_shapes), str(bbox_static_shape)
)
bbox_static_shape = bbox_static_shape.tolist()
bbox_static_shape.insert(0, len(bbox_list))
return bbox_static_shape
def _of_target_resize_bbox_scale(images, bbox_list, target_size, max_size):
image_shape = _get_images_static_shape(images)
bbox_shape = _get_bbox_static_shape(bbox_list)
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def target_resize_bbox_scale_job(
image_def: oft.ListListNumpy.Placeholder(
shape=tuple(image_shape), dtype=flow.float
),
bbox_def: oft.ListListNumpy.Placeholder(
shape=tuple(bbox_shape), dtype=flow.float
),
):
images_buffer = flow.tensor_list_to_tensor_buffer(image_def)
resized_images_buffer, new_size, scale = flow.image_target_resize(
images_buffer, target_size=target_size, max_size=max_size
)
bbox_buffer = flow.tensor_list_to_tensor_buffer(bbox_def)
scaled_bbox = flow.object_bbox_scale(bbox_buffer, scale)
scaled_bbox_list = flow.tensor_buffer_to_tensor_list(
scaled_bbox, shape=bbox_shape[1:], dtype=flow.float
)
return scaled_bbox_list, new_size
input_image_list = [np.expand_dims(image, axis=0) for image in images]
input_bbox_list = [np.expand_dims(bbox, axis=0) for bbox in bbox_list]
output_bbox_list, output_image_size = target_resize_bbox_scale_job(
[input_image_list], [input_bbox_list]
).get()
return output_bbox_list.numpy_lists()[0], output_image_size.numpy_list()[0]
def _compare_bbox_scale(
test_case,
anno_file,
image_dir,
batch_size,
target_size,
max_size,
print_debug_info=False,
):
images, bbox_list = _random_sample_images(anno_file, image_dir, batch_size)
of_bbox_list, image_size_list = _of_target_resize_bbox_scale(
images, bbox_list, target_size, max_size
)
for image, bbox, of_bbox, image_size in zip(
images, bbox_list, of_bbox_list, image_size_list
):
w, h = image_size
oh, ow = image.shape[0:2]
scale_h = h / oh
scale_w = w / ow
bbox[:, 0] *= scale_w
bbox[:, 1] *= scale_h
bbox[:, 2] *= scale_w
bbox[:, 3] *= scale_h
test_case.assertTrue(np.allclose(bbox, of_bbox))
@flow.unittest.skip_unless_1n1d()
class TestObjectBboxScale(flow.unittest.TestCase):
def test_object_bbox_scale(test_case):
_compare_bbox_scale(
test_case,
"/dataset/mscoco_2017/annotations/instances_val2017.json",
"/dataset/mscoco_2017/val2017",
4,
800,
1333,
)
if __name__ == "__main__":
unittest.main()
|
nilq/baby-python
|
python
|
import codecs
import logging
import time
from concurrent.futures.thread import ThreadPoolExecutor
from pathlib import Path
import frontmatter
from bs4 import BeautifulSoup
from jinja2 import Environment, FileSystemLoader
from aqui_brain_dump import base_url, content_path, get_creation_date, get_last_modification_date, get_number_commits, \
md, \
output_path, static_url, template_path
from aqui_brain_dump.main import datetimeformat
from aqui_brain_dump.util import path_to_url
env = Environment(loader=FileSystemLoader(template_path))
env.filters['datetime'] = datetimeformat
template_article = env.get_template('note.html')
template_index = env.get_template('index.html')
logger = logging.getLogger(__name__)
class Note:
notes = {}
note_executor = ThreadPoolExecutor(max_workers=10)
futures_executor = []
tags_dict = {}
def __init__(self, file_path):
self.file_path = file_path
self.path = Path(file_path).relative_to(content_path)
self.content = None
self.backlinks = []
self.links = []
self.title = ''
self.meta = {}
self.tags = []
self.url = ''
self.last_mod = None
self.number_edits = 1
self.creation_date = None
@classmethod
def create_from_path(cls, file_path):
logger.debug(f'Creating note from file: {file_path}')
rel_path = Path(file_path).relative_to(content_path)
note = cls.notes.get(path_to_url(rel_path), False)
if note:
return note
note = cls(file_path)
note.parse_file()
return note
@classmethod
def create_from_url(cls, url: str):
""" Creates a note without content, normally product of links to non existing notes
"""
if not all(ord(c) < 128 for c in url):
logger.warning(f'{url} has non-ascii characters')
logger.debug(f'Creating note from url {url}')
if url.startswith('/'):
url = url[1:]
logger.debug(f'New Url: {url}')
file_path = content_path / (url + '.md')
note = cls(file_path)
note.title = url.replace('_', ' ').capitalize()
note.url = '/' + url.replace(' ', '_').lower()
note.meta['epistemic'] = 'This note is auto generated'
note.notes[note.url] = note
logger.debug(f'Added {note} to notes with url {url}')
return note
def parse_file(self):
logger.debug(f'Parsing contents of {self}')
if not Path(self.file_path).is_file():
logger.info(f'{self.file_path} does not exist, creating empty note')
self.title = ' '.join(str(self.path).split('_')).strip('/')
if self.title.endswith('.md'):
self.title = self.title[:-3]
self.url = path_to_url(self.path)
if not all(ord(c) < 128 for c in self.url):
logger.warning(f'{self.url} has non-ascii characters')
self.notes[str(self.path.absolute()).lower()] = self
return
with codecs.open(self.file_path, 'r', encoding='utf-8') as f:
md.reset()
md.links = []
post = frontmatter.load(f)
self.content = md.convert(post.content)
bs = BeautifulSoup(self.content, 'html.parser')
h1 = bs.find('h1')
h1_title = None
if h1 is not None and h1.get_text() != '':
h1_title = h1.get_text()
h1.decompose()
self.content = bs.prettify()
if 'title' in post.metadata:
self.title = post.metadata['title']
elif h1_title is not None:
self.title = h1_title
else:
self.title = ' '.join(str(self.path).split('_')).strip('/').capitalize()
if self.title.endswith('.md'):
self.title = self.title[:-3]
self.url = path_to_url(self.path)
if 'slug' in post.metadata:
self.url = post.metadata.get('url')
self.meta = post.metadata
self.links = md.links
self.tags = md.tags
for tag in self.tags:
tag = tag.lower()
if tag not in self.tags_dict:
self.tags_dict[tag] =[self, ]
else:
self.tags_dict[tag].append(self)
self.futures_executor.append(self.note_executor.submit(self.update_git_information))
logger.debug(f'Added {self} with url {self.url}')
self.notes[self.url] = self
def update_git_information(self):
self.last_mod = get_last_modification_date(self.file_path)
self.creation_date = get_creation_date(self.file_path)
self.number_edits = get_number_commits(self.file_path)
def render(self, base_url):
logger.debug(f'Preparing to render {self}')
context = {
'note': self,
'static': static_url,
'base_url': base_url,
}
out_path = output_path / self.url[1:]
out_path.mkdir(parents=True, exist_ok=True)
if 'template' in self.meta:
template = env.get_template(self.meta.get('template'))
else:
template = template_article
with open(out_path / 'index.html', 'w', encoding='utf-8') as out:
logger.debug(f'Writing {template} with {self} information, to {out_path}')
out.write(template.render(context))
@classmethod
def build_backlinks(cls):
for note in list(cls.notes.values()):
for link in note.links:
logger.debug(f'{note.url} links to {link}')
link_to = cls.notes.get(link, False)
if link_to:
link_to.backlinks.append(note)
logger.debug(f'Appending {note} to backlinks of {link_to}')
else:
new_note = Note.create_from_url(link)
while len([f for f in Note.futures_executor if f.running()]):
time.sleep(.01)
new_note.backlinks.append(note)
logger.debug(f'Creating {new_note} and appending {note} to its backlinks')
def __str__(self):
return self.title or str(self.path)
def __repr__(self):
return f'<Note {self.file_path or self.path}>'
|
nilq/baby-python
|
python
|
#! /usr/bin/env python
"""
This script produces the stacks for emission line luminosity limited samples.
"""
import sys
import os
from os.path import join
import glob
import numpy as n
import astropy.io.fits as fits
import SpectraStackingEBOSS as sse
from scipy.interpolate import interp1d
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as p
import lineListVac as ll
em_line_list = [
[1240.14, 'N V' , 'darkgreen'],
[1305.53, 'O I' , 'darkgreen'],
[1335.31, 'C II', 'darkgreen' ],
[1397.61, 'Si IV', 'darkgreen' ],
[1399.8, 'Si IV + O IV', 'darkgreen' ],
[ll.C4_1548, r'C IV', 'darkgreen'],
[1640.42, 'He II', 'darkgreen'],
[1750.26, 'N III]', 'darkgreen'],
[ll.C3_1908 , r'C III', 'darkgreen' ],
[2327.00, 'CII]', 'darkgreen'],
[2396.36, 'FeII*', 'darkgreen'],
[2626.45, 'FeII*', 'darkgreen'],
[3346.82, '[Ne V]', 'darkgreen'],
[3426.84, '[Ne V]', 'darkgreen'],
[ll.O2_mean , r'[O II]', 'darkgreen'],
[3759.99, '[Fe VII]', 'darkgreen'],
[ll.Ne3_3869 , r'[Ne III]', 'darkgreen'],
# [ll.Ne3_3968 , r'[Ne III]', 'darkgreen'],
[ll.O3_4363 , r'[O III]' , 'darkgreen'],
[ll.O3_4960 , r'[O III]' , 'darkgreen'],
[ll.O3_5007 , r'[O III]' , 'darkgreen'],
[5160.33, '[Fe VII]', 'darkgreen'],
[ll.O1_5578 , r'O I', 'darkgreen' ],
[5722.30, '[Fe VII]', 'darkgreen'],
[5877.29, 'He I', 'darkgreen'],
[6087.98, '[Fe VII]', 'darkgreen'],
[ll.O1_6302 , r'O I' , 'darkgreen'],
[ll.O1_6365 , r'O I' , 'darkgreen'],
[ll.N2_5756 , r'[N II]' , 'darkgreen'],
[ll.N2_6549 , r'[N II]' , 'darkgreen'],
[ll.N2_6585 , r'[N II]' , 'darkgreen'],
[ll.S2_6718 , r'[S II]', 'darkgreen'],
[ll.S2_6732 , r'[S II]', 'darkgreen'],
[ll.Ar3_7137 , r'[Ar III]' , 'darkgreen'],
]
abs_line_list = [
[911.753, r'Ly$_{limit}$', 'black'],
[1025.7220, r'Ly$_\beta$', 'black'],
[ll.H1_1216, r'Ly$_\alpha$', 'black'],
[1857.40, 'Al III', 'darkgreen'],
#
[2344.21, 'FeII', 'darkgreen'],
[2382.76, 'Fe II', 'darkgreen'],
[2600.17, 'FeII', 'darkgreen'],
[2798.75, 'MgII', 'darkgreen'],
#
[3835.397, r'H$\eta$', 'black'],
[3889.064, r'H$\zeta$', 'black'],
[3934.777, 'Ca(K)', 'magenta'],
[3969.588, 'Ca(H)', 'magenta'],
[ll.H1_3970 , r'H$_\epsilon$', 'black'],
#
[ll.H1_4102 , r'H$_\delta$', 'black'],
[4305.61, 'G', 'magenta'],
[ll.H1_4341 , r'H$_\gamma$', 'black'],
[ll.He2_4686 , r'He II', 'darkgreen'],
[ll.H1_4862 , r'H$_\beta$', 'black'],
#
[5176.7, 'MgI b', 'magenta'],
[ll.He2_5411, r'He II', 'darkgreen'],
[5895.6, r'NaI D$_{1,2}$', 'magenta'],
[ll.H1_6564 , r'H$_\alpha$', 'black'],
#
[8500.36, 'Ca II', 'magenta'],
[8544.44, 'Ca II', 'magenta'],
[8664.52, 'Ca II', 'magenta'],
]
# line_list_abs = n.array([ 2249.88, 2260.78, 2344.21, 2374.46, 2382.76, 2576.88, 2586.65, 2594.50, 2600.17, 2606.46, 2796.35, 2803.53, 2852.96])
# line_list_abs_names = n.array(['FeII' , 'FeII', 'FeII', 'FeII', 'FeII', 'MnII', 'FeII', 'MnII', 'FeII', 'MnII', 'MgII', 'MgII', 'MgI'])
# line_list_em = n.array([2327, 2365.55, 2396.36, 2612.65,2626.45])
# line_list_em_names = n.array(['CII]', 'FeII*', 'FeII*', 'FeII*', 'FeII*'])
#stack_dir = join( os.environ['HOME'], "SDSS/stacks/v2" )
stack_dir = join( os.environ['HOME'], "SDSS/stacks" )
file_out = join(stack_dir,"X_AGN", "DR16_ELG-stitched-stack.fits")
def plot_spec( p_2_stack = file_out ):
print('plots', p_2_stack)
# fig=p.figure(7, (14.0, 14.0), frameon=False)
# fig.add_subplot(411, ylabel=r'F$_\lambda$')
fig=p.figure(5, (14.0, 8.0))#, frameon=False)
fig.add_subplot(111, ylabel=r'F$_\lambda$', xlabel='Wavelength rest-frame [Angstrom]')
stack = fits.open(p_2_stack)[1].data
s1 = (stack['wavelength']>0)
stack = stack[s1]
y_min = n.min(stack['medianStack'])
y_max = n.max(stack['medianStack'])
delta_y = y_max - y_min
p.xlim((n.min(stack['wavelength']), 9500 )) # n.max(stack['wavelength'])))
p.ylim((y_min - delta_y * 0.2 , y_max + delta_y * 0.2 ))
#p.xscale('log')
# lines above
for elem in em_line_list:
print(elem)
if elem[0]>n.min(stack['wavelength'][5]) and elem[0]<n.max(stack['wavelength'][-5]) :
xpos = n.searchsorted(stack['wavelength'], elem[0])
ypos = n.max(stack['medianStack'][xpos-10:xpos+10]) + delta_y * 0.1
# p.plot(n.array([elem[0], elem[0]]), em_dash_Y, ls='dashed', color='k', lw=0.5)
p.text(elem[0], ypos, r'$^{----}$' + elem[1], rotation=90, c='darkgreen')
# lines below
for elem in abs_line_list:
print(elem)
if elem[0]>n.min(stack['wavelength'][5]) and elem[0]<n.max(stack['wavelength'][-5]) :
xpos = n.searchsorted(stack['wavelength'], elem[0])
ypos = n.min(stack['medianStack'][xpos-30:xpos+30]) - delta_y * 0.2
# p.plot(n.array([elem[0], elem[0]]), em_dash_Y, ls='dashed', color='k', lw=0.5)
p.text(elem[0], ypos, elem[1] + r'$^{---}$', rotation=90, c='magenta')
p.plot(stack['wavelength'], stack['medianStack'], lw=0.7)
p.grid()
p.tight_layout()
#
# print('standard deviation')
# fig.add_subplot(412, ylabel=r'per cent')
# stack = fits.open(p_2_stack)[1].data
# s1 = (stack['wavelength']>0)
# stack = stack[s1]
# y_min = n.min( [ stack['jackknifStackErrors'], stack['NspectraPerPixel']**-0.5 ] )
# y_max = n.max( [ stack['jackknifStackErrors'], stack['NspectraPerPixel']**-0.5 ] )
# p.xlim((n.min(stack['wavelength']), n.max(stack['wavelength'])))
# p.ylim(( y_min/1.1 , y_max*1.1 ))
# p.plot(stack['wavelength'], stack['jackknifStackErrors']/stack['medianStack'], lw=0.7, label=r'$\sigma^{var}_{JK}$')
# p.plot(stack['wavelength'], stack['NspectraPerPixel']**-0.5, lw=2, label=r'$1/\sqrt{N}$')
# p.grid()
# p.legend()
# p.yscale('log')
# p.tight_layout()
# print('correlation coefficient')
# fig.add_subplot(212, ylabel='Wavelength rest-frame [Angstrom]', xlabel='Wavelength rest-frame [Angstrom]')
# CR = n.corrcoef(stack['jackknifeSpectra'])
# WLa = n.array([ stack['wavelength'] for el in stack['wavelength'] ])
# WLb = WLa.T
# highCorr_sel = ( abs(CR) > 0.8 ) & (CR>0)
# xx = WLa[highCorr_sel]
# yy = WLb[highCorr_sel]
# cr_val = CR[highCorr_sel]
# p.scatter(xx, yy, c=cr_val, s=1, rasterized = True)
# p.colorbar(shrink=0.8)
# p.tight_layout()
p.savefig(p_2_stack+".png")
p.clf()
plot_spec( join(stack_dir,"X_AGN", "ROSAT_AGNT1-stitched-stack.fits") )
plot_spec( join(stack_dir,"X_AGN", "ROSAT_AGNT2-stitched-stack.fits") )
plot_spec( join(stack_dir,"X_AGN", "ROSAT_AGNT2-highZ-stitched-stack.fits") )
plot_spec( join(stack_dir,"X_AGN", "DR16_ELG-stitched-stack.fits") )
plot_spec( join(stack_dir,"X_AGN", "ROSAT_AGNT1-DR16QSO-stitched-stack.fits") )
plot_spec( join(stack_dir,"X_AGN", "DR16LRG-stitched-stack.fits") )
|
nilq/baby-python
|
python
|
import socket
import sys
name = sys.argv[1]
print 'Resolving Server Service Name for ' + name
ipAddress = socket.gethostbyname(name)
print('IP address of host name ' + name + ' is: ' + ipAddress)
|
nilq/baby-python
|
python
|
import cStringIO as StringIO
import numpy as np
from sensor_msgs.msg import PointCloud2
#from rospy.numpy_msg import numpy_msg
#PointCloud2Np = numpy_msg(PointCloud2)
import pypcd
import pyrosmsg
pc = pypcd.PointCloud.from_path('./tmp.pcd')
msg = pc.to_msg()
#msg2 = PointCloud2()
#msg2.deserialize(smsg)
def with_caster(m):
# 38.4 us per loop,
pyrosmsg.print_centroid(m)
def with_serial(m):
# 117 us
buf = StringIO.StringIO()
m.serialize(buf)
smsg = buf.getvalue()
pyrosmsg.print_centroid2(smsg)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# train MNIST with latent equilibrium model
# and track experiment using sacred
import torch
from sacred import Experiment
from sacred.observers import FileStorageObserver
ex = Experiment('layeredMNIST')
ex.observers.append(FileStorageObserver('runs'))
# configuration
@ex.config
def config():
# choose dataset
# either mnist_784 or Fashion-MNIST
dataset = 'mnist_784'
ex.add_config('defaults.json')
# hook for logging metrics to sacred
def after_epoch_hook(network, run):
# metrics to track during experiment
metrics = ['val_loss', 'val_accuracy', 'val_error',
'test_loss', 'test_accuracy', 'test_error']
logs = network.logs
for key in metrics:
if key in logs:
run.log_scalar(key, logs[key])
else:
print(f'Warning: Metric "{key}" not found in logs. Skipping...')
# initialize and train the network
@ex.automain
def run(_run, _config, _seed):
from le_layers_mnist_training import MnistTrainer
from model.network_params import LayeredParams
params = LayeredParams()
params.load_params_from_dict(_config)
import model.latent_equilibrium_layers as nn
import model.layered_torch_utils as tu
fc1 = nn.Linear(# this are the only things that should remain to be set here
28 * 28, 300,
tu.hard_sigmoid,
tu.hard_sigmoid_deriv,
params,
)
fc2 = nn.Linear(
300, 100,
tu.hard_sigmoid,
tu.hard_sigmoid_deriv,
params,
)
fc3 = nn.Linear(
100, _config['classes'],
tu.linear,
tu.linear_deriv,
params,
)
network = nn.LESequential(
[fc1, fc2, fc3],
params,
)
trainer = MnistTrainer(
classes=_config['classes'],
train_samples=_config['train_samples'],
val_samples=_config['val_samples'],
test_samples=_config['test_samples'],
n_updates=_config['n_updates'],
epoch_hook=after_epoch_hook,
)
net, test_acc = trainer.train(
network,
run=_run,
batch_size=_config['batch_size'],
epoch_qty=_config['epoch_qty'],
with_optimizer=_config['with_optimizer'],
verbose=1,
)
return test_acc
|
nilq/baby-python
|
python
|
from django.contrib import admin
from demo_api import models
admin.site.register(models.CustomUser)
# Register your models here.
|
nilq/baby-python
|
python
|
# Generated by Django 2.0.7 on 2018-08-06 16:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0004_auto_20180804_2152'),
]
operations = [
migrations.AlterModelOptions(
name='dac',
options={'ordering': ['-time']},
),
migrations.AlterField(
model_name='dac',
name='data',
field=models.SmallIntegerField(),
),
migrations.AlterField(
model_name='dac',
name='time',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='msg',
name='eth_mac',
field=models.CharField(max_length=20),
),
migrations.AlterField(
model_name='msg',
name='wifi_mac',
field=models.CharField(max_length=20),
),
]
|
nilq/baby-python
|
python
|
import os
from flask import Flask, redirect
from flask import request
from flask import jsonify
import hashlib
app = Flask(__name__)
c = 0
clients = []
chat = []
#[from, to, status[0sent, 1accepted, 2rejected]]
requests = {}
requests_sent = {}
version = 5
additive = 0
def getUID(ip):
return hashlib.sha256(str(ip).encode("utf8")).hexdigest()
def getUN(ip):
return int(str(ip).replace(".", ""))
def addChat(toAdd, limit = True):
global chat, additive
if limit:
additive = additive + 1
print("new chat: " + toAdd)
toAdd = toAdd.replace("<script>", "").replace("</script>", "")
if(additive > 50):
chat.pop(0)
chat.append(toAdd)
def addClient(uID):
if uID not in clients:
clients.append(uID)
addChat("--- " + uID + " Joined the Chat ---")
print("connection from " + str(request.remote_addr))
def removeClient(uID):
if uID in clients:
clients.remove(uID)
addChat("--- " + uID + " Left the Chat ---")
@app.route('/')
def hello():
global chat, version
uIp = request.access_route[0]
uID = getUID(uIp)
addClient(uID)
view = "<title>A+</title>"
global c
c = c + 1
view = view + "<h3> Public Chat </h3>"
view = view + "Connected as: " + uID + " (" + uIp + ")<br \\>"
view = view + "Refresh the page to access the latest messages."
view = view + "<br \\>-----------------------------------------------------------------------<br \\>"
for i in chat:
view = view + i.replace("<", "").replace(">", "") + "<br \\>"
view = view + "<br \\>-----------------------------------------------------------------------<br \\>"
view = view + "note that only the latest 50 messages are stored and displayed. <br \\><br \\>"
view = view + "<form action=\" " + "/post" + "\" method=\"post\">"
view = view + "<input type=\"text\" name=\"msg\">"
view = view + "<input type=\"submit\">"
view = view + "</form>"
view = view + "<br \\><hr \\>"
view = view + "A+ v. " + str(version) + " | <a href=\"https://raw.githubusercontent.com/jonnelafin/A-/master/LICENSE\">LICENSE</a>"
return(view)
@app.route('/post', methods=['POST'])
def handle_data():
uIp = request.access_route[0]
uID = getUID(uIp)
msg = request.form['msg']
addChat(uID + ": " + msg)
return redirect("/", code=302)
@app.route("/get_my_ip", methods=["GET"])
def get_my_ip():
return jsonify({'ip': request.access_route[0], 'id' : getUID(request.access_route[0])}), 200
@app.route("/announce", methods=["GET"])
def announceThem():
global chat
uIp = request.access_route[0]
uID = getUID(uIp)
addClient(uID)
return jsonify({'you': uID}), 200
@app.route("/unannounce", methods=["GET"])
def unannounceThem():
global chat
uIp = request.access_route[0]
uID = getUID(uIp)
removeClient(uID)
return jsonify({'you': uID}), 200
@app.route("/list", methods=["GET"])
def listAnnounced():
return jsonify({'clients': clients}), 200
@app.route("/req", methods=['POST'])
def requestCH():
global requests, requests_sent
uIp = request.access_route[0]
uID = getUID(uIp)
if "to" in request.form:
to = request.form['to']
# [from, to, status[0sent, 1accepted, 2rejected]]
req = [uID, to, 0]
if not (to in requests):
requests[to] = []
requests[to].append(req)
if not (uID in requests_sent):
requests_sent[uID] = []
requests_sent[uID].append(req)
return redirect("/", code=302)
else:
return jsonify({'error': "400: POST Request expected"}), 400
@app.route("/status", methods=["GET"])
def sendStatus():
global requests, requests_sent
uIp = request.access_route[0]
uID = getUID(uIp)
lis = []
if not (uID in requests_sent):
requests_sent[uID] = []
if not (uID in requests):
requests[uID] = []
return jsonify({'sent': requests_sent[uID], 'received': requests[uID]}), 200
@app.route("/send", methods=["GET"])
def sendView():
view = ""
view = view + "<h3> Send a Chat Request </h3>"
view = view + "<hr \\>"
view = view + "<form action=\" " + "/req" + "\" method=\"post\">"
view = view + "<h4> To: </h4>"
view = view + "<input type=\"text\" name=\"to\"><br \\>"
view = view + "<input type=\"submit\">"
view = view + "</form>"
view = view + "<hr \\>"
return view, 200
if __name__ == '__main__':
# Bind to PORT if defined, otherwise default to 5000.
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
|
nilq/baby-python
|
python
|
from django.apps import AppConfig
from django.db.models.signals import post_migrate
from django.utils.translation import gettext_lazy as _
class SitesConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = "webquills.sites"
label = "sites"
verbose_name = _("Webquills sites")
def ready(self) -> None:
# Once the ORM is initialized, connect signal handlers
from . import signals
signals.connect_signals()
post_migrate.connect(signals.create_default_site, sender=self)
|
nilq/baby-python
|
python
|
import socket
import select
import threading
import json
import sys
import traceback
import os
import random
from clientInterface import ClientInterface
from constants import LOGIN, LOGOUT, CREATE_ACCOUNT, DELETE_ACCOUNT, EXIT, ACTIVE_USERS, ACTIVE_STATUS, INACTIVE_STATUS, OPEN_CHAT, CLOSE_CHAT, DELETE_MESSAGES, SEND_MESSAGE
class Client:
def __init__(self, serverHost='localhost', serverPort = 5000):
self.serverHost = serverHost #Endereço do processo passivo
self.serverPort = serverPort #Porta que o processo passivo estará escutando
self.myPort = None #Porta que o cliente irá receber as conexões de outros clientes
self.sock = None #Sock utilizado para se comunicar com o servidor
self.clientView = ClientInterface() #Inicializa a classe responsável pela interface da aplicação
self.userName = '' #Guarda o user name do usuário
self.password = '' #Guarda a senha do usuário
self.status = -1 #Guarda o status do usuário
self.stopWorkers = False #Variável chave para parar todas as threads trabalhadoras
self.chatUsersIps = {} #Ips dos usuários cadastrados no sistema
self.chatUsersPorts = {} #Portas dos usuários cadastrados no sistema
self.chatUsersStatus = {} #Status dos usuários cadastrados no sistema
self.openChat = {} #Usuários com os quais se mantém a conversa ativa
self.openChatMessages = {} #Guarda as conversas com os demais usuários. Ex:{'fabio_Junior19': ["você:ola", "fabio_Junior19:ola", "fabio_Junior19:tudobom?"]}
self.fifoMessages = {} #Fila de envio de mensagens trocadas entre usuários. Ex:{'fabio_Junior19': ["você:ola", "fabio_Junior19:ola", "fabio_Junior19:tudobom?"]}
self.lock = threading.Lock() #Inicializa o lock
self.start() #Adquire o socket e se conecta com o servidor
def scanPort(self):
"""
Encontra uma porta vaga para o cliente.
:return Porta não ocupada no cliente.
"""
ip = socket.gethostbyname(socket.gethostname())
# Define uma porta que esteja disponível
for port in range(random.randrange(2000,4000),65535):
try:
serv = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
serv.bind((ip,port))
except:
print('[OPEN] Port open :',port)
serv.close()
continue
serv.close()
return port
def start(self):
"""
Cria o socket do cliente e o conecta com o servidor.
"""
self.sock = socket.socket()
try:
self.sock.connect((self.serverHost, self.serverPort)) #Abertura da conexão com o servidor
except Exception as e:
sys.exit(1)
def stop(self):
"""
Fecha o socket do cliente.
"""
self.sock.close()
def finishBusiness(self):
"""
Termina as threads trabalhadoras e o fluxo passivo (P2P) do cliente.
"""
self.stopWorkers = True
sock = socket.socket()
sock.connect(('localhost', self.myPort))
sock.close()
def handlerServerRequest(self, method, userInput):
"""
Dado o método e o input do cliente, gera a mensagem de requisição para o servidor e envia para o mesmo.
:param method: comando a ser executado.
:param userInput: entradas digitadas pelo cliente.
:return mensagem de resposta do servidor.
"""
methods = {CREATE_ACCOUNT: 'createAccount', DELETE_ACCOUNT: 'deleteAccount', LOGIN:'authAccount', ACTIVE_USERS:'getUsers', LOGOUT:'logout', EXIT:'logout', ACTIVE_STATUS:'setMyStatus', INACTIVE_STATUS:'setMyStatus'}
if(method in (CREATE_ACCOUNT, DELETE_ACCOUNT)):
userName, password = userInput
request = {'method':methods[method],'data':{'userName': userName,'password':password}}
if(method == LOGIN):
self.myPort = self.scanPort()
userName, password = userInput
request = {'method':methods[method],'data':{'userName': userName, 'password':password, 'port':self.myPort}}
elif(method == ACTIVE_USERS):
request = {'method':methods[method],'data': None}
elif(method == ACTIVE_STATUS):
request = {'method':methods[method],'data':{'userName': self.userName,'password': self.password,'status':1}}
elif(method == INACTIVE_STATUS):
request = {'method':methods[method],'data':{'userName': self.userName,'password': self.password,'status':0}}
elif(method in (LOGOUT, EXIT)):
request = {'method':methods[method],'data':{'userName': self.userName,'password': self.password}}
request_msg = json.dumps(request, ensure_ascii=False) #Gera o json para o envio da requisição ao servidor
self.sock.send(bytes(request_msg, encoding='utf-8')) #Envio da mensagem para o servidor
response_msg = self.sock.recv(1024) #Recebimento da mensagem enviada pelo servidor.
response = json.loads(response_msg) #Tranformar a mensagem recebida em um dicionário.
return response
def handleServerResponse(self, method, response, userInput):
"""
Dado a resposta do servidor e o input do cliente faz as tratativas necessárias no back do cliente.
:param method: comando a ser executado
:param response: resposta retornada pelo servidor
:param userInput: possíveis entradas digitadas pelo usuário
:return void ou mensagem de retorno para a interface.
"""
#Atualiza as informação do usuário e iniciar o lado passivo do cliente para receber futuras conexões P2P
if(method == LOGIN and response['status'] == 'success'):
userName, password = userInput
self.userName = userName
self.clientView.username = self.userName
self.password = password
self.status = 1
work = threading.Thread(target= self.runAsPassive_P2P, args=())
work.start()
if(method == ACTIVE_STATUS and response['status'] == 'success'):
self.status = 1
if(method == INACTIVE_STATUS and response['status'] == 'success'):
self.status = 0
#Quando um usuário se desloga, é necessário trocar seu status para offline e terminar suas threads trabalhadoras associadas assim como o fluxo passivo
if(method == LOGOUT and response['status'] == 'success'):
self.status = -1
self.finishBusiness()
#Ao abrir um chat, as informações dos demais usuários são atualizados a partir das informações do servidor
if(method == OPEN_CHAT and response['status'] == 'success'):
self.lock.acquire()
for element in response['data']:
self.chatUsersIps[element['userName']] = element['ip']
self.chatUsersPorts[element['userName']] = element['port']
self.chatUsersStatus[element['userName']] = element['status']
self.lock.release()
response = {'method':'openChat', 'status': 'success', 'data':{'message': None} }
return response
elif(method == OPEN_CHAT and response['status'] == 'error'):
response = {'method':'openChat', 'status': 'error', 'data':{'message': None} }
return response
def handleInterfaceCommand(self, cmd, userInput = None, response = None):
"""
Dado o comando enviado e outras possíveis informações (entrada do usuário ou resposta a requisição), trata os comandos da interface.
:param cmd: comando a ser executado.
:param userInput: entradas digitadas pelo cliente.
:param response: mensagem de resposta (dicionário).
:return void ou mensagem de resposta (dicionário) para a classe clientInterface.
"""
if(cmd == OPEN_CHAT):
#Caso o usuário com quem se deseja falar não esteja ativo, retorna uma mensagem de erro para a interface
if(userInput[5:] not in self.chatUsersStatus):
response = {'method':'openChat', 'status': 'error', 'data':{'message': None} }
#Caso eu já tenha uma conversa ativa com o usuário em questão, apenas retorna uma mensagem de sucesso para interface, sem mudanças no back.
elif(userInput[:5] == OPEN_CHAT and self.status == 1 and (userInput[5:] in self.openChat and self.openChat[userInput[5:]] == 1)
and self.chatUsersStatus[userInput[5:]] == '1'):
response = {'method':'openChat', 'status': 'success', 'data':{'message': None} }
self.clientView.openChatFriendUser = userInput[5:]
#Caso ainda não exista uma conversa ativa com o usuário em questão, será iniciada uma conversa com o mesmo ao iniciar o lado ativo P2P
else:
response = self.handlerServerRequest(ACTIVE_USERS, None)
response = self.handleServerResponse(OPEN_CHAT,response,None)
if(response['status'] == 'success'):
response = self.runAsActive_P2P(userInput)
if(response['status'] == 'success'):
self.clientView.openChatFriendUser = userInput[5:]
elif(cmd == CLOSE_CHAT):
self.clientView.openChatFriendUser = None
response = {'method':'closeChat', 'status': 'success', 'data':{'message': None} }
#Ao se logar, deve-se atualizar as informações dos usuários cadastrados na aplicação
elif(cmd == LOGIN and (response['status'] == 'success')):
self.stopWorkers = False
responseUpdate = self.handlerServerRequest(ACTIVE_USERS, None)
self.handleServerResponse(OPEN_CHAT,responseUpdate,None)
response = None
elif(cmd == DELETE_MESSAGES):
self.openChatMessages[self.clientView.openChatFriendUser] = []
response = {'method':'deleteMessages', 'status': 'success', 'data':{'message': None} }
#As mensagens a serem enviadas devem ser adicionadas a fila de mensagens
elif(cmd == SEND_MESSAGE):
self.lock.acquire()
self.openChatMessages[self.clientView.openChatFriendUser].append(self.userName + ":" + userInput)
if(self.clientView.openChatFriendUser not in self.fifoMessages):
self.fifoMessages[self.clientView.openChatFriendUser] = []
self.fifoMessages[self.clientView.openChatFriendUser].append(userInput)
self.lock.release()
response = None
return response
def channelMessage(self, chatUserName, clientSock):
"""
Perfoma como um canal de mensagens para a comunicação cliente-cliente. Aqui é onde o cliente (independente se está como passivo ou ativo no P2P) envia e recebe mensagens de outros clientes.
:param chatUserName: user name do usuário que o cliente em questão está conversando.
:param clientSock: socket do cliente em questão.
"""
request_msg = None
if(chatUserName not in self.openChatMessages):
self.openChatMessages[chatUserName] = []
try:
while True:
try:
#Recebe a mensagem do par de conversa
request_msg = clientSock.recv(1024)
except socket.timeout as e:
pass
#Entra se receber logout ou se deseja terminar as threads
if(request_msg == b'' or self.stopWorkers):
self.lock.acquire()
self.openChat[chatUserName] = 0
self.lock.release()
clientSock.close()
break
if(request_msg != None):
#Separa as mensagens recebidas caso venha várias de uma vez só
request_msg = str(request_msg, encoding='utf-8').replace('}{','}-#-{').split('-#-')
for r in request_msg:
request = json.loads(r)
if(request['method'] == 'logout'):
self.lock.acquire()
self.openChat[chatUserName] = 0
self.lock.release()
clientSock.close()
break
self.lock.acquire()
#Salva a mensagem recebida na lista de mensagens com aquele par de conversa
self.openChatMessages[chatUserName].append(chatUserName + ":" + request['data']['message'])
self.lock.release()
#É responsável por enviar todas as mensagens que estão na fila de mensagem a serem enviadas.
self.lock.acquire()
for i in range(len(self.fifoMessages.get(chatUserName,[]))):
msg = self.fifoMessages[chatUserName][0]
request = {'method': 'sendMessage', 'data':{'userName': self.userName, 'message': msg}}
request_msg = json.dumps(request, ensure_ascii=False)
clientSock.send(bytes(request_msg, encoding='utf-8'))
self.fifoMessages[chatUserName].pop(0)
self.clientView.messages_queue = self.openChatMessages
if(self.clientView.openChatFriendUser == chatUserName):
self.clientView.printChatScreen()
self.lock.release()
request_msg = None
except Exception as e:
self.lock.acquire()
self.openChat[chatUserName] = 0
self.lock.release()
clientSock.close()
def runAsActive_P2P(self, userInput):
"""
Representa o fluxo ativo da implementação P2P, para quando o cliente deseja tomar a iniciativa de iniciar a conversa com outro.
:param userInput: comando de chat digitado pelo usuário em questão para iniciar uma conversa com outro. Ex: "chat:nick"
"""
if(userInput[:5] == OPEN_CHAT and self.status == 1 and self.chatUsersStatus[userInput[5:]] == '1'
and ((not userInput[5:] in self.openChat) or (userInput[5:] in self.openChat and self.openChat[userInput[5:]] == 0))):
try:
sock_active = socket.socket()
#Abertura da conexão com o processo passivo do cliente que se deseja comunicar
sock_active.connect((self.chatUsersIps[userInput[5:]], self.chatUsersPorts[userInput[5:]]))
sock_active.setblocking(True) #Configura o sock para o modo bloqueante
request = {'method': 'sendMessage', 'data':{'userName': self.userName, 'message': 'hand shake'}}
request_msg = json.dumps(request, ensure_ascii=False) #Gera o json para o envio da requisição ao outro cliente
sock_active.send(bytes(request_msg, encoding='utf-8')) #Envio da mensagem de handshake para o outro cliente
handshakeMsg = sock_active.recv(1024)
handshakeMsgDict = json.loads(handshakeMsg)
#Caso tenha recebido uma mensagem de handshake de sucesso do cliente passivo, cria uma nova thread para perfomar o canal de mensagens entre os dois
self.lock.acquire()
if(handshakeMsgDict['data']['userName'] != '' and handshakeMsgDict['data']['message'] == 'success'):
self.openChat[userInput[5:]] = 1
sock_active.setblocking(False)
sock_active.settimeout(0.5)
worker = threading.Thread(target=self.channelMessage, args=(handshakeMsgDict['data']['userName'], sock_active))
worker.start()
self.lock.release()
except Exception as e:
traceback.print_exc()
sock_active.close()
response = {'method':'openChat', 'status': 'error', 'data':{'message': None} }
return response
finally:
handshakeMsg = None
response = {'method':'openChat', 'status': 'success', 'data':{'message': None} }
else:
response = {'method':'openChat', 'status': 'error', 'data':{'message': None} }
return response
def runAsPassive_P2P(self):
"""
Representa o fluxo passivo da implementação P2P, para quando o cliente recebe uma proposta de conversa com outro cliente.
"""
inputs = []
sock = socket.socket() #Cria um socket para a comunicação
sock.bind(('', self.myPort)) #Vincula a interface e porta para comunicação
sock.listen(10) #Coloca o processo em modo de espera pela a conexão. O argumento define o limite máximo de conexões pendentes
sock.setblocking(False) #Configura o sock para o modo não-bloqueante
inputs.append(sock) #Inclui o socket principal na lista de entradas de interesse
try:
while (True):
read, write, exception = select.select(inputs, [], [])
# Atualiza o cliente com as informações do servidor
response = self.handlerServerRequest(ACTIVE_USERS, None)
self.handleServerResponse(OPEN_CHAT,response,None)
for trigger in read:
#Caso o trigger seja um nova conexão e ainda não esteja setado para parar as threads associadas ao cliente
if (trigger == sock and self.status == 1 and not self.stopWorkers):
try:
clientSock, ipAddress = sock.accept() #Aceita a primeira conexão da fila e retorna um novo socket e o endereço do par ao qual se conectou.
clientSock.setblocking(False) #Configura o sock para o modo não-bloqueante
clientSock.settimeout(0.5) #Define o tempo de espera por uma msg para 0.5 segundos
handshakeMsg = clientSock.recv(1024)
self.lock.acquire()
handshakeMsgDict = json.loads(handshakeMsg)
if((handshakeMsgDict['data']['userName'] != '') and (self.chatUsersStatus[handshakeMsgDict['data']['userName']] == '1') and ((not handshakeMsgDict['data']['userName'] in self.openChat) or (handshakeMsgDict['data']['userName'] in self.openChat and self.openChat[handshakeMsgDict['data']['userName']] == 0))):
self.openChat[handshakeMsgDict['data']['userName']] = 1
chatUserName = handshakeMsgDict['data']['userName']
handshakeMsgDict = {}
handshakeMsgDict['method'] = 'sendMessage'
handshakeMsgDict['data'] = {'userName':self.userName,'message':'success'}
handshakeMsg = json.dumps(handshakeMsgDict, ensure_ascii=False)
#Envio da mensagem de handshake para o processo ativo e criação da thread que cuidará do canal de mensagens
clientSock.send(bytes(handshakeMsg, encoding='utf-8'))
self.lock.release()
self.clientView.refreshNotification(self.clientView.currScreen, f'{chatUserName} iniciou um chat com você! Digite "chat:{chatUserName} para abrir o chat."', add = True)
worker = threading.Thread(target=self.channelMessage, args=(chatUserName, clientSock))
worker.start()
else:
self.lock.release()
clientSock.close()
except Exception as e:
print(e)
clientSock.close()
finally:
handshakeMsg = None
else:
sock.close()
return
except Exception as a:
print(e)
self.stop()
def run(self):
"""
Executa o fluxo principal do cliente. É a partir dele e dos comandos de entrada, sendo para o servidor ou para a própria aplicação, que são tomadas certas atitudes, inclusive criar novas threads para comunicações.
"""
userInput, msg = self.clientView.homeScreen()
while (userInput != EXIT):
if(userInput == CREATE_ACCOUNT):
userInput = self.clientView.createAccountScreen()
response = self.handlerServerRequest(CREATE_ACCOUNT, userInput)
self.clientView.handlerResponse(response)
elif(userInput == DELETE_ACCOUNT):
userInput = self.clientView.deleteAccountScreen()
response = self.handlerServerRequest(DELETE_ACCOUNT, userInput)
self.clientView.handlerResponse(response)
elif(userInput == LOGIN):
userInput = self.clientView.authScreen()
response = self.handlerServerRequest(LOGIN, userInput)
self.handleInterfaceCommand(LOGIN, response=response)
self.handleServerResponse(LOGIN, response, userInput)
self.clientView.handlerResponse(response)
elif(userInput == ACTIVE_USERS):
response = self.handlerServerRequest(ACTIVE_USERS, None)
self.clientView.handlerResponse(response)
elif(userInput == ACTIVE_STATUS):
response = self.handlerServerRequest(ACTIVE_STATUS, None)
self.handleServerResponse(ACTIVE_STATUS,response,None)
self.clientView.handlerResponse(response)
elif(userInput == INACTIVE_STATUS):
response = self.handlerServerRequest(INACTIVE_STATUS, None)
self.handleServerResponse(INACTIVE_STATUS,response,None)
self.clientView.handlerResponse(response)
elif(userInput[0:5] == OPEN_CHAT):
response = self.handleInterfaceCommand(OPEN_CHAT, userInput = userInput)
self.clientView.handlerResponse(response)
elif(userInput == LOGOUT):
response = self.handlerServerRequest(LOGOUT, None)
self.clientView.handlerResponse(response)
result = self.handleServerResponse(LOGOUT,response,None)
elif(userInput == CLOSE_CHAT):
response = self.handleInterfaceCommand(CLOSE_CHAT)
self.clientView.handlerResponse(response)
elif(userInput == DELETE_MESSAGES):
response = self.handleInterfaceCommand(DELETE_MESSAGES)
self.clientView.handlerResponse(response)
elif(userInput == SEND_MESSAGE):
response = self.handleInterfaceCommand(SEND_MESSAGE, msg)
userInput, msg = self.clientView.redirectScreen()
self.stop()
if __name__ == '__main__':
client = Client(serverHost='localhost', serverPort = 5000)
client.run()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# The MIT License (MIT)
# Copyright (c) 2017 Lancaster University.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import os
import sys
import optparse
import platform
import json
import shutil
import re
def system(cmd):
if os.system(cmd) != 0:
sys.exit(1)
def build(clean):
if platform.system() == "Windows":
# configure
system("cmake .. -DCMAKE_BUILD_TYPE=RelWithDebInfo -G \"Ninja\"")
# build
system("ninja")
else:
# configure
system("cmake .. -DCMAKE_BUILD_TYPE=RelWithDebInfo -G \"Unix Makefiles\"")
if clean:
system("make clean")
# build
system("make -j 10")
def read_json(fn):
json_file = ""
with open(fn) as f:
json_file = f.read()
return json.loads(json_file)
def checkgit():
stat = os.popen('git status --porcelain').read().strip()
if stat != "":
print "Missing checkin in", os.getcwd(), "\n" + stat
exit(1)
def read_config():
codal = read_json("codal.json")
targetdir = codal['target']['name']
target = read_json("libraries/" + targetdir + "/target.json")
return (codal, targetdir, target)
def update():
(codal, targetdir, target) = read_config()
dirname = os.getcwd()
for ln in target['libraries']:
os.chdir(dirname + "/libraries/" + ln['name'])
system("git checkout " + ln['branch'])
system("git pull")
os.chdir(dirname + "/libraries/" + targetdir)
system("git pull")
os.chdir(dirname)
def printstatus():
print "\n***%s" % os.getcwd()
system("git status -s")
def status():
(codal, targetdir, target) = read_config()
dirname = os.getcwd()
for ln in target['libraries']:
os.chdir(dirname + "/libraries/" + ln['name'])
printstatus()
os.chdir(dirname + "/libraries/" + targetdir)
printstatus()
os.chdir(dirname)
printstatus()
def get_next_version():
log = os.popen('git log -n 100').read().strip()
m = re.search('Snapshot v(\d+)\.(\d+)\.(\d+)', log)
if m is None:
print "Cannot determine next version from git log"
exit(1)
v0 = int(m.group(1))
v1 = int(m.group(2))
v2 = int(m.group(3))
if options.update_major:
v0 += 1
v1 = 0
v2 = 0
elif options.update_minor:
v1 += 1
v2 = 0
else:
v2 += 1
return "v%d.%d.%d" % (v0, v1, v2)
def lock():
(codal, targetdir, target) = read_config()
dirname = os.getcwd()
for ln in target['libraries']:
os.chdir(dirname + "/libraries/" + ln['name'])
checkgit()
stat = os.popen('git status --porcelain -b').read().strip()
if "ahead" in stat:
print "Missing push in", os.getcwd()
exit(1)
sha = os.popen('git rev-parse HEAD').read().strip()
ln['branch'] = sha
print ln['name'], sha
os.chdir(dirname + "/libraries/" + targetdir)
ver = get_next_version()
print "Creating snaphot", ver
system("git checkout target-locked.json")
checkgit()
target["snapshot_version"] = ver
with open("target-locked.json", "w") as f:
f.write(json.dumps(target, indent=4, sort_keys=True))
system("git commit -am \"Snapshot %s\"" % ver) # must match get_next_version() regex
sha = os.popen('git rev-parse HEAD').read().strip()
system("git tag %s" % ver)
system("git pull")
system("git push")
system("git push --tags")
os.chdir(dirname)
print "\nNew snapshot: %s [%s]" % (ver, sha)
def delete_build_folder(in_folder = True):
if in_folder:
os.chdir("..")
shutil.rmtree('./build')
os.mkdir("./build")
if in_folder:
os.chdir("./build")
parser = optparse.OptionParser(usage="usage: %prog target-name [options]", description="This script manages the build system for a codal device. Passing a target-name generates a codal.json for that devices, to list all devices available specify the target-name as 'ls'.")
parser.add_option('-c', '--clean', dest='clean', action="store_true", help='Whether to clean before building. Applicable only to unix based builds.', default=False)
parser.add_option('-t', '--test-platforms', dest='test_platform', action="store_true", help='Whether to clean before building. Applicable only to unix based builds.', default=False)
parser.add_option('-l', '--lock', dest='lock_target', action="store_true", help='Create target-lock.json, updating patch version', default=False)
parser.add_option('-m', '--minor', dest='update_minor', action="store_true", help='With -l, update minor version', default=False)
parser.add_option('-M', '--major', dest='update_major', action="store_true", help='With -l, update major version', default=False)
parser.add_option('-u', '--update', dest='update', action="store_true", help='git pull target and libraries', default=False)
parser.add_option('-s', '--status', dest='status', action="store_true", help='git status target and libraries', default=False)
parser.add_option('-d', '--dev', dest='dev', action="store_true", help='enable developer mode (does not use target-locked.json)', default=False)
(options, args) = parser.parse_args()
if not os.path.exists("build"):
os.mkdir("build")
if options.lock_target:
lock()
exit(0)
if options.update:
update()
exit(0)
if options.status:
status()
exit(0)
# out of source build!
os.chdir("build")
test_json = read_json("../utils/targets.json")
# configure the target a user has specified:
if len(args) == 1:
target_name = args[0]
target_found = False
# list all targets
if target_name == "ls":
for json_obj in test_json:
print "%s: %s" % (json_obj["name"], json_obj["info"]),
if "device_url" in json_obj.keys():
print "(%s)" % json_obj["device_url"],
print ""
exit(0)
# cycle through out targets and check for a match
for json_obj in test_json:
if json_obj["name"] != target_name:
continue
del json_obj["device_url"]
del json_obj["info"]
# developer mode is for users who wish to contribute, it will clone and checkout commitable branches.
if options.dev:
json_obj["dev"] = True
config = {
"target":json_obj,
}
with open("../codal.json", 'w') as codal_json:
json.dump(config, codal_json, indent=4)
target_found = True
# remove the build folder, a user could be swapping targets.
delete_build_folder()
break
if not target_found:
print("'" + target_name + "'" + " is not a valid target.")
exit(1)
elif len(args) > 1:
print("Too many arguments supplied, only one target can be specified.")
exit(1)
if not options.test_platform:
if not os.path.exists("../codal.json"):
print("No target specified in codal.json, does codal.json exist?")
exit(1)
build(options.clean)
exit(0)
for json_obj in test_json:
# ensure we have a clean build tree.
delete_build_folder()
# clean libs
if os.path.exists("../libraries"):
shutil.rmtree('../libraries')
# configure the target and tests...
config = {
"target":json_obj,
"output":".",
"application":"libraries/"+json_obj["name"]+"/tests/"
}
with open("../codal.json", 'w') as codal_json:
json.dump(config, codal_json, indent=4)
build(True)
|
nilq/baby-python
|
python
|
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import cartopy.io.img_tiles as cimgt
from sklearn import neighbors
import numpy as np
from ela.textproc import *
from ela.classification import KNN_WEIGHTING
# These functions were an attempt to have interactive maps with ipywidgets but proved to be a pain.
# I may revisit later on but these are parked.
def plot_lithologydata_slice_points_redo(df,
slice_depth, extent, data_proj,
near_field_extents, geoms, terrain):
fig,ax=plt.subplots(1,1,figsize=(15,15), subplot_kw={'projection': data_proj, 'extent': extent})
# fig.clear()
# ax.clear()
ax.add_image(terrain, 11)
ax.add_geometries(geoms[0], ccrs.PlateCarree(),facecolor='none',edgecolor='k',zorder=1)
ax.add_geometries(geoms[1], ccrs.PlateCarree(),facecolor='none',edgecolor='r',zorder=1)
for val,label in zip(ax.get_xticks(), ax.get_xticklabels()):
label.set_text(str(val))
label.set_position((val,0))
for val,label in zip(ax.get_yticks(), ax.get_yticklabels()):
label.set_text(str(val))
label.set_position((0,val))
plt.tick_params(bottom=True,top=True,left=True,right=True,labelbottom=True,labeltop=False,labelleft=True,labelright=False)
ax.xaxis.set_visible(True)
ax.yaxis.set_visible(True)
ax.ticklabel_format(useOffset=False)
ax.ticklabel_format(style='plain')
ax.grid(False)
ax.text(0.1, 0.9, u'\u25B2 \nN ',
horizontalalignment='center',
verticalalignment='center',
fontsize=25,
color='k',
family='Arial',
transform=ax.transAxes)
ax.set_extent(near_field_extents, crs=data_proj)
# Note that all of the above is independent of slice depth and background that would not need redoing
# but Matplotlib befuddles (or rather the interplay with ipywidgets)
df_slice=df.loc[(df[DEPTH_FROM_COL] <= slice_depth) & (df[DEPTH_TO_COL] >= slice_depth)]
ax.scatter(df_slice.Easting.values,df_slice.Northing.values)
plt.title('bore log locations at %s m depth'%(slice_depth), fontsize=20, weight='bold')
# I cannot fathom why this stuff actually plots anything
# via ipywidgets or otherwise since it returns nothing.
def create_background(extent, data_proj,
near_field_extents, geoms):
fig,ax=plt.subplots(1,1,figsize=(15,15), subplot_kw={'projection': data_proj,'extent': extent})
stamen_terrain = cimgt.Stamen('terrain-background')
ax.add_image(stamen_terrain, 11)
ax.add_geometries(geoms[0], ccrs.PlateCarree(),facecolor='none',edgecolor='k',zorder=1)
ax.add_geometries(geoms[1], ccrs.PlateCarree(),facecolor='none',edgecolor='r',zorder=1)
for val,label in zip(ax.get_xticks(), ax.get_xticklabels()):
label.set_text(str(val))
label.set_position((val,0))
for val,label in zip(ax.get_yticks(), ax.get_yticklabels()):
label.set_text(str(val))
label.set_position((0,val))
plt.tick_params(bottom=True,top=True,left=True,right=True,labelbottom=True,labeltop=False,labelleft=True,labelright=False)
ax.xaxis.set_visible(True)
ax.yaxis.set_visible(True)
ax.ticklabel_format(useOffset=False)
ax.ticklabel_format(style='plain')
ax.grid(False)
ax.text(0.1, 0.9, u'\u25B2 \nN ',
horizontalalignment='center',
verticalalignment='center',
fontsize=25,
color='k',
family='Arial',
transform=ax.transAxes)
ax.set_extent(near_field_extents, crs=data_proj)
scatter_layer = ax.scatter(near_field_extents[0], near_field_extents[2])
return (fig, scatter_layer)
def plot_lithologydata_slice_points(df, slice_depth, scatter_layer, fig):
df_slice=df.loc[(df[DEPTH_FROM_COL] <= slice_depth) & (df[DEPTH_TO_COL] >= slice_depth)]
plt.title('bore log locations at %s m depth'%(slice_depth), fontsize=20, weight='bold')
e = df_slice.Easting.values
n = df_slice.Northing.values
bore_coords = [[e[i], n[i]] for i in range(0, len(e))]
scatter_layer.set_offsets(bore_coords)
fig.canvas.draw()
fig.canvas.flush_events()
return fig
def plot_lithologydata_slice_depth(df, slice_depth, n_neighbours, extent, data_proj, near_field_extents, geoms, gw_subareas, cmap_settings):
df_slice=df.loc[(df[DEPTH_FROM_AHD_COL] >= slice_depth) & (df[DEPTH_TO_AHD_COL] <= slice_depth)]
_,ax=plt.subplots(1,1,figsize=(15,15),subplot_kw={'projection': data_proj,'extent': extent})
stamen_terrain = cimgt.Stamen('terrain-background')
ax.add_image(stamen_terrain, 11)
ax.add_geometries(geoms[0], ccrs.PlateCarree(),facecolor='none',edgecolor='k',zorder=1)
ax.add_geometries(geoms[1], ccrs.PlateCarree(),facecolor='none',edgecolor='r',zorder=1)
for i, txt in enumerate(df_slice[PRIMARY_LITHO_COL].values):
plt.annotate(txt,(df_slice.Easting.values[i],df_slice.Northing.values[i]),fontsize=8,clip_on=True)
for val,label in zip(ax.get_xticks(), ax.get_xticklabels()):
label.set_text(str(val))
label.set_position((val,0))
for val,label in zip(ax.get_yticks(), ax.get_yticklabels()):
label.set_text(str(val))
label.set_position((0,val))
plt.tick_params(bottom=True,top=True,left=True,right=True,labelbottom=True,labeltop=False,labelleft=True,labelright=False)
ax.xaxis.set_visible(True)
ax.yaxis.set_visible(True)
ax.ticklabel_format(useOffset=False)
ax.ticklabel_format(style='plain')
ax.grid(False)
ax.text(0.1, 0.9, u'\u25B2 \nN ',
horizontalalignment='center',
verticalalignment='center',
fontsize=25,
color='k',
family='Arial',
transform=ax.transAxes)
plt.title('KNN facies classification at %s m AHD (neighbours=%s)'%(slice_depth,n_neighbours), fontsize=20, weight='bold')
df_1=df_slice[df_slice.Lithology_1 != ""]
# X = df_1.as_matrix(columns=[EASTING_COL, NORTHING_COL])
X = df_1[[EASTING_COL, NORTHING_COL]].values
y = np.array(df_1[PRIMARY_LITHO_NUM_COL])
knn = neighbors.KNeighborsClassifier(n_neighbours, weights = KNN_WEIGHTING).fit(X, y)
grid_res=100
# max/min bounds
x_min=gw_subareas.total_bounds[0]
y_min=gw_subareas.total_bounds[1]
x_max=gw_subareas.total_bounds[2]
y_max=gw_subareas.total_bounds[3]
xx, yy = np.meshgrid(np.arange(x_min, x_max, grid_res),np.arange(y_min, y_max, grid_res))
predicted = knn.predict(np.c_[xx.ravel(), yy.ravel()])
predicted = predicted.reshape(xx.shape)
plt.pcolormesh(xx, yy, predicted, cmap=cmap_settings['cmap'], norm=cmap_settings['norm'], alpha=0.3)
ax.set_extent(near_field_extents, crs=data_proj)
|
nilq/baby-python
|
python
|
import os
import six
from six.moves import cPickle
import time
import numpy as np
import modelarts.aibox.ops as ops
from modelarts.aibox.pipeline import Pipeline
import modelarts.aibox.types as types
_R_MEAN = 125.31
_G_MEAN = 122.95
_B_MEAN = 113.87
_CHANNEL_MEANS = [_R_MEAN, _G_MEAN, _B_MEAN]
_R_STD = 62.99
_G_STD = 62.09
_B_STD = 66.70
_CHANNEL_STD = [_R_STD, _G_STD, _B_STD]
INPUT_SIZE = 32
NUM_CHANNELS = 3
BATCH_SIZE = 1
UFF_FILE = "./resnet_cifar.uff"
DATA_DIR = "/path/your_data_path"
CALIB_FILE = "./cifar_int8_cache"
FILE_ROOT = "./test"
operator_relation_list = [
{"OperatorName": "TensorRT", "OperatorDevice": "gpu"},
{"OperatorName": "RawReader", "OperatorDevice": "gpu"}
]
ops.InitOp(operator_relation_list)
class InferencePipeline(Pipeline):
def __init__(self, batch_size=1, num_threads=1, device_id=0, seed=-1,
exec_pipelined=True, prefetch_queue_depth=1):
super(InferencePipeline, self).__init__(batch_size,
num_threads,
device_id, seed, exec_pipelined, prefetch_queue_depth)
self.input = ops.RawReader(device="gpu", file_root=FILE_ROOT,
dtype=types.AIBOXDataType.FLOAT,
layout_type=types.AIBOXTensorLayout.NCHW,
height=INPUT_SIZE, width=INPUT_SIZE, channels=NUM_CHANNELS,
prefetch_queue_size=prefetch_queue_depth)
self.tensorrt = ops.TensorRT(
device="gpu",
uffFile=UFF_FILE,
uffInputs=["input,{},{},{}".format(NUM_CHANNELS, INPUT_SIZE, INPUT_SIZE)],
outputs=["logits"],
int8=True,
calibrationCache=CALIB_FILE,
workspaceSize=20,
batchSize=batch_size)
self.iter = 0
def define_graph(self):
self.inputs = self.input()
tensorrt_out = self.tensorrt(self.inputs)
return tensorrt_out
def read_data_files(data_dir):
"""Reads from data file and returns images and labels in a numpy array."""
filenames = [os.path.join(data_dir, 'test_batch')]
inputs = []
for filename in filenames:
with open(filename, 'rb') as f:
encoding = {} if six.PY2 else {'encoding': 'bytes'}
inputs.append(cPickle.load(f, **encoding))
all_images = np.concatenate(
[each_input[b'data'] for each_input in inputs]).astype(np.float32)
all_labels = np.concatenate(
[each_input[b'labels'] for each_input in inputs]).astype(np.int32)
all_images = all_images.reshape(-1, 3, 32, 32)
all_images = np.transpose(all_images, [0, 2, 3, 1])
all_images = normlize(all_images, np.float32)
all_images = np.transpose(all_images, [0, 3, 1, 2])
return all_images, all_labels
def normlize(image, dtype=np.float32):
cifar10_mean = np.array(_CHANNEL_MEANS,
dtype=dtype) # equals np.mean(train_set.train_data, axis=(0,1,2))/255
cifar10_std = np.array(_CHANNEL_STD,
dtype=dtype) # equals np.std(train_set.train_data, axis=(0,1,2))/255
image -= cifar10_mean
image /= cifar10_std
return image
def main():
if not os.path.exists(FILE_ROOT):
os.makedirs(FILE_ROOT)
images_list, labels_list = read_data_files(DATA_DIR)
test_count = len(images_list)
for i in range(test_count):
filename = "./test/" + "%05d" % i + ".raw"
images_list[i].tofile(filename)
infer_pipe = InferencePipeline(BATCH_SIZE)
infer_pipe.build()
total_time = 0
top1_predicts = []
for i in range(test_count):
start_time = time.time()
pipe_out = infer_pipe.run()
total_time += time.time() - start_time
predict = pipe_out[0].as_cpu().as_array()[0, :, 0, 0].argsort()[-1]
top1_predicts.append(predict)
predict_top_1_true = 0
for i in range(test_count):
if labels_list[i] == top1_predicts[i]:
predict_top_1_true += 1
accuracy = float(predict_top_1_true) / test_count
print(' accuracy: %.2f' % (accuracy * 100))
# this time include H2D, the time without H2D is printed in the AIBOX
print("avg time:{}".format(total_time * 1000 / test_count))
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import flask
from flask import Flask, render_template, request, json, redirect, session, flash
from flaskext.mysql import MySQL
from processor import Processor
from werkzeug import generate_password_hash, check_password_hash
def init_db():
print "Init DB"
print "mysql = ", mysql
print "do init stuff here"
# fix import for running as script
if __name__ == '__main__':
if __package__ is None:
import sys
from os import path
sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )
from ticketer import name as app_name
from ticketer import app
from ticketer import port
from ticketer import mysql
else:
from ..ticketer import name as app_name
from ..ticketer import app
from ..ticketer import port
from ..ticketer import mysql
else:
from ticketer import name as app_name
from ticketer import app
from ticketer import port
from ticketer import mysql
with app.app_context():
print "One time init"
flask.g.db_connection = mysql.connect()
flask.g.db_cursor = flask.g.db_connection.cursor()
init_db()
def get_connection():
"""
Needs to be called from within the application context
If a connection does not already exist, it creates one
using the mysql global.
:return db_connection
"""
if not hasattr(flask.g, "db_connection"):
print "Creating new db_connection"
flask.g.db_connection = mysql.connect()
return flask.g.db_connection
def get_cursor():
"""
Needs to be called from within the application context
If a cursor does not already exist, it creates one using
get_connection()
:return db_cursor
"""
if not hasattr(flask.g, "db_cursor"):
print "Creating new db_cursor"
flask.g.db_cursor = get_connection().cursor()
return flask.g.db_cursor
def main():
print "***",app_name,"***"
app.run(port=port, debug=True)
@app.route("/")
def home():
return render_template('index.html')
@app.route("/hi")
def sayhi():
return "hi"
@app.route("/showSignUp")
def showSignUp():
return render_template('signup.html')
@app.route("/signUp",methods=['POST', 'GET'])
def signUp():
_role = 'user' # default role
_name = request.form['inputName']
_email = request.form['inputEmail']
_password = request.form['inputPassword']
_hashed_password = generate_password_hash(_password)
conn = get_connection()
cursor = get_cursor()
#validate
if _name and _email and _password and _hashed_password:
cursor.callproc('sp_createUser',(_name,_email,_hashed_password, _role))
data = cursor.fetchall()
if len(data) is 0:
print "signup happy path"
conn.commit()
return json.dumps({'status': 'ok'})
else:
if 'Username Exists' in data[0][0]:
print "User already exists"
return json.dumps({'status': 'ok'})
else:
return json.dumps({'html': '<span>Enter the required fields</span>'})
@app.route('/showSignIn')
def showSignin():
return render_template('signin.html')
@app.route('/validateLogin',methods=['POST'])
def validateLogin():
try:
_username = request.form['inputEmail']
_password = request.form['inputPassword']
# connect to mysql
cursor = get_cursor()
cursor.callproc('sp_validateLogin',(_username,))
data = cursor.fetchall()
print "user : ", _username
print "pass : ", _password
print "data = ", str(data)
if len(data) > 0:
if check_password_hash(str(data[0][3]),_password):
session['user'] = data[0][0]
session['username'] = _username
flash("You have logged in successfully")
return redirect('/userHome')
else:
print "failed login detected"
return render_template('error.html', error='Wrong Email address or Password : hash mismatch.')
else:
return render_template('error.html', error='Wrong Email address or Password : len(data) <= 0')
except Exception as e:
return render_template('error.html', error=str(e))
@app.teardown_appcontext
def close_db(error):
print "Closing down db cursor and connection"
if error is not None:
print str(error)
if hasattr(flask.g, "db_cursor"):
flask.g.db_cursor.close()
if hasattr(flask.g, "db_connection"):
flask.g.db_connection.close()
@app.route('/userHome')
def userHome():
try:
username = session['username']
except KeyError:
return render_template('error.html', error="Please sign in")
return render_template('userHome.html', user=session['username'])
@app.route('/logout')
def logout():
session.pop('user', None)
return redirect('/')
@app.route('/createTicket')
def createTicket():
return render_template('create_ticket.html', user=session['username'])
@app.route('/doCreateTicket')
def doCreateTicket():
try:
_key = request.form['key']
_summary = request.form['summary']
_owner = request.form['owner']
cursor = get_cursor()
cursor.callproc('sp_createTicket', (_key, _summary, _owner))
data = cursor.fetchall()
if len(data) is 0:
conn.commit()
print "create ticket worked"
return json.dumps({'status': 'ok'})
else:
print "something odd happened"
print str(data)
return json.dumps({'html': '<span>Enter the required fields</span>'})
except Exception as e:
return json.dumps({'status': 'failed'})
if __name__ == '__main__':
p = Processor()
main()
|
nilq/baby-python
|
python
|
from . import BaseAgent
from pommerman.constants import Action
from pommerman.agents import filter_action
import random
class RulesRandomAgentNoBomb(BaseAgent):
""" random with filtered actions but no bomb"""
def __init__(self, *args, **kwargs):
super(RulesRandomAgentNoBomb, self).__init__(*args, **kwargs)
def act(self, obs, action_space):
valid_actions=filter_action.get_filtered_actions(obs)
if Action.Bomb.value in valid_actions:
valid_actions.remove(Action.Bomb.value)
if len(valid_actions)==0:
valid_actions.append(Action.Stop.value)
return random.choice(valid_actions)
|
nilq/baby-python
|
python
|
class Database(object):
placeholder = '?'
def connect(self, dbtype, *args, **kwargs):
if dbtype == 'sqlite3':
import sqlite3
self.connection = sqlite3.connect(*args)
elif dbtype == 'mysql':
import MySQLdb
self.connection = MySQLdb.connect(**kwargs)
self.placeholder = '%s'
class DBConn(object):
def __init__(self):
self.b_debug = False
self.b_commit = True
self.conn = None
autumn_db = DBConn()
autumn_db.conn = Database()
|
nilq/baby-python
|
python
|
import re
import requests
import csv
lines = []
with open('01.csv', 'r') as csvFile:
texts = csv.reader(csvFile)
for text in texts:
lines.append(text[0])
datas = []
datass = []
count = 0
# 提取阅读数,评论数,转发数,收藏数
for num in range(0,220):
datas = []
x = lines[num]
w = x[-21:-13]
q = x[-11:-5]
start_urls1 = ['http://comet.blog.sina.com.cn/api?maintype=num&uid=' + w + '&aids=' + q]
html = requests.get(start_urls1[0], timeout=500)
pattern = re.compile(r"\d+")
reads = re.findall(pattern, html.text)
f = reads[-5]
d = reads[-4]
c = reads[-3]
z = reads[-2]
r = reads[-1]
datas.append(f)
datas.append(d)
datas.append(c)
datas.append(z)
datas.append(r)
print(f,d,c,z,r)
count = count+1
datass.append(datas)
print(count)
with open('03.csv', 'w', encoding='utf8',newline="") as file:
write = csv.writer(file)
write.writerows(datass)
|
nilq/baby-python
|
python
|
from connaisseur.image import Image
class ValidatorInterface:
def __init__(self, name: str, **kwargs): # pylint: disable=unused-argument
"""
Initializes a validator based on the data from the configuration file.
"""
self.name = name
def validate(self, image: Image, **kwargs) -> str:
"""
Validates an admission request, using the extra arguments from the image policy.
Returns a list of trusted digests.
"""
raise NotImplementedError
@property
def healthy(self):
raise NotImplementedError
def __str__(self):
return self.name
|
nilq/baby-python
|
python
|
"""
defines:
- CalculixConverter
"""
from collections import defaultdict
from numpy import array, zeros, cross
from numpy.linalg import norm # type: ignore
from pyNastran.bdf.bdf import BDF, LOAD # PBAR, PBARL, PBEAM, PBEAML,
from pyNastran.bdf.cards.loads.static_loads import Force, Moment
class CalculixConverter(BDF):
"""
Converts a BDF to Calculix (inp/dat/py files).
.. warning:: Totally inaccurate....
How:
* Nodes/Coordinate Systems/Elements/Properties/Materials are
directly extracted from the BDF. All objects must reference
each other properly.
* Just like Nastran, extra materials/properties are allowed.
No idea how Code_Aster handles SPOINTs or unassociated GRIDs.
* Loads must be referenced by a single LOAD card in the Case Control deck.
This is consistent with standard Nastran.
Limitations:
* All Case Control inputs must come from SUBCASE 1.
* LOAD cards must bound FORCEx/MOMENTx/PLOAD4 cards in order for loads to be written
* Only SOL 101 (Static)
Supported Cards:
* GRID, COORDx
* LOAD, FORCEx, MOMENTx, PLOAD4
* CBAR, CBEAM, CROD, CTUBE, CTETRA, CPENTA, CHEXA,CTRIA3/6, CQUAD4/8
* PBAR, PBEAM, PROD, PTUBE, PSOLID, PSHELL
* MAT1
* GRAV (incorrect writing, but really easy to make it correct given proper format)
.. todo::
* PCOMP
* SPC, SPC1, MPC
* RBE2, RBE3
"""
def __init__(self, language='english'):
self.language = 'english'
BDF.__init__(self)
self.max_nid_len = None
self.max_eid_len = None
self.max_pid_len = None
self.max_mid_len = None
def get_elements_by_pid(self, element_ids=None):
"""
builds a dictionary where the key is the property ID and the value
is a list of element IDs
"""
if element_ids is None:
element_ids = self.elements.keys()
props = defaultdict(list)
for eid in element_ids:
element = self.elements[eid]
pid = element.Pid()
props[pid].append(eid)
return props
def get_elements_by_mid(self):
"""
builds a dictionary where the key is the material ID and the value
is a list of element IDs
"""
mats = {0: []}
for mid in self.materials:
mats[mid] = []
for eid, element in self.elements.items():
try:
mid = element.Mid()
mats[mid].append(eid)
except:
mats[0].append(eid)
return mats
def get_elements_by_type(self, element_ids=None):
"""
builds a dictionary where the key is the element type and the value
is a list of element IDs
"""
if element_ids is None:
element_ids = self.elements.keys()
elems = defaultdict(list)
for eid in element_ids:
element = self.elements[eid]
element_type = element.type
elems[element_type].append(eid)
return elems
def get_properties_by_mid(self):
"""
builds a dictionary where the key is the material ID and the value
is a list of property IDs
"""
mats = {0: []}
for mid in self.materials:
mats[mid] = []
for pid, property in self.properties.items():
try:
mid = property.Mid()
mats[mid].append(pid)
except:
mats[0].append(pid)
return mats
def calculix_executive(self):
inp = ''
if self.sol == 101:
inp += 'MECA_STATIQUE % SOL 101 - linear statics\n'
inp += 'stat(MECA_STATIQUE(MODELE=model,CHAM_MATER=material,CARA_ELEM=elemcar,\n'
inp += 'ECIT=(_F(Charge=AllBoundaryConditions,),\n',
inp += ' _F(Charge=AllLoads,),\n',
inp += ' ),\n',
inp += "TITRE='My Title'\n"
return inp
def calculix_nodes(self, fdat):
"""
*NODE
1, 0.000000, 0.000000, 0.000000
2, 1.000000, 0.000000, 0.000000
3, 1.000000, 1.000000, 0.000000
"""
dat = ''
dat += '** Calculix_Nodes\n'
dat += '*NODE\n'
fdat.write(dat)
form = '%-' + str(self.max_nid_len) + 's %8s,%8s,%8s\n'
for nid, node in sorted(self.nodes.items()):
xyz = node.get_position()
dat = form % (nid, xyz[0], xyz[1], xyz[2])
fdat.write(dat)
dat = '\n\n'
dat += self.breaker()
fdat.write(dat)
def calculix_elements(self, fdat):
"""
.. todo:: sort elements by Type and Material ID
"""
dat = ''
dat += '** Calculix_Elements\n'
etype_map = {
'CBAR' : 'BR32R',
'CBEAM' : 'BR32R',
'CTRIA3' : 'C2D3',
'CTRIA6' : 'C2D6', # 'S6' ???
'CQUAD4' : 'C2D4', # 'S4' ???
'CQUAD8' : 'C2D8',
'CSHEAR' : 'S4',
'CTRIAX' : 'CAX6',
'CQUADX' : 'CAX8',
'CTRIAX6' : 'CAX6',
'CQUADR' : 'CAX8',
'CTETRA' : 'C3D4',
'CTETRA4' : 'C3D4',
'CPYRAM' : 'C3D5',
'CPYRAM5' : 'C3D5',
'CPYRAM13' : 'C3D13',
'CPENTA10' : 'C3D10',
'CPENTA6' : 'C3D6',
'CPENTA15' : 'C3D15',
'CHEXA' : 'C3D8',
'CHEXA8' : 'C3D8',
'CHEXA20' : 'C3D20',
}
pid_eids = self.get_elements_by_pid(element_ids=None)
form_elements = '%-' + str(self.nelements) + 's, '
elsets = []
for pid, eids in sorted(pid_eids.items()):
elems = self.get_elements_by_type(eids)
for etype, eids in sorted(elems.items()):
calculix_type = etype_map[etype]
elset = 'pid%i_Elements%s' % (pid, etype)
elsets.append(elset)
dat += '** eid,n1,n2,n3,etc... for a %s\n' % etype
dat += '*ELEMENT, TYPE=%s, ELSET=%s\n' % (calculix_type, elset)
for eid in eids:
dat += form_elements % eid
element = self.elements[eid]
for nid in element.node_ids:
dat += '%s,' % nid
dat = dat[:-1] + '\n'
dat += self.breaker()
#print(dat)
fdat.write(dat)
return elsets
def calculix_properties(self, elsets):
inp = ''
inp += '** calculix_properties\n'
for elset in elsets:
#elset = 'pid%i_Elements%i' % (pid, etype)
pid, etype = elset.lstrip('pid').split('_')
pid = int(pid)
etype = etype[8:] # element type
prop = self.properties[pid]
if prop.type == 'PSHELL':
mid = prop.mid
inp += '*SHELL SECTION,ELSET=%s,MATERIAL=MAT%i\n' % (elset, mid.mid)
inp += '%s\n' % prop.t
#def _write_calculix(self, marker='markerDummyProp',
#element_set='ELsetDummyProp'):
#msg = '*SHELL SECTION,MATERIAL=M%s_%s,ELSET=%s,OFFSET=%s\n' % (
#marker, self.mid, element_set, self.z1)
#msg += '** THICKNESS\n'
#msg += '%s\n\n' % (self.t)
#return msg
elif prop.type == 'PSOLID':
mid = prop.mid
inp += '*SOLID SECTION,ELSET=%s,MATERIAL=MAT%i\n' % (elset, mid.mid)
elif prop.type == 'PBAR':
mid = prop.mid
inp += '*BEAM SECTION,ELSET=%s,MATERIAL=MAT%i\n' % (elset, mid.mid)
elif prop.type == 'PBARL':
mid = prop.mid
#section_name = 'SQUARE'
print("what is the section_name?")
print(" ", sorted(prop.__dict__.keys()))
inp += '*BEAM SECTION,ELSET=eids_pid%i,MATERIAL=MAT%i,SECTION=%s\n' % (
prop.pid, mid.mid, section_name)
if section_name == 'SQUARE':
inp += '%s\n' % prop.dims[0]
if section_name == 'RECT':
inp += '%s, %s\n' % tuple(prop.dims[0])
else:
raise NotImplementedError(section_name)
else:
raise NotImplementedError(section_name)
inp += self.breaker()
return inp
def calculix_materials(self):
"""
might need to make this by pid instead...
steel=DEFI_MATERIAU(ELAS=_F(E=210000.,NU=0.3,RHO=8e-9),);
-----MAT1-----
*MATERIAL,NAME=EL
210000.0, .3
*DENSITY
7.8E-9
*SOLID SECTION,MATERIAL=EL,ELSET=EALL
"""
inp = '** calculix_materials\n'
for mid, material in sorted(self.materials.items()):
msg = '*MATERIAL,NAME=mid%i\n' % material.mid
if material.type == 'MAT1':
msg += '*ELASTIC\n%s, %s\n' % (material.E(), material.Nu())
msg += '*DENSITY\n%s\n' % material.get_density()
msg += '*SOLID SECTION,MATERIAL=EL,ELSET=EALL\n'
elif material.type == 'MAT4':
msg += '*ELASTIC\n%s, %s\n' % (material.E(), material.Nu())
msg += '*DENSITY\n%s\n' % material.rho()
msg += '*CONDUCTIVITY\n%s\n' % material.k
msg += '*CONVECTION\n%s\n' % material.h
msg += '*DENSITY\n%s\n' % material.get_density()
msg += '*SOLID SECTION,MATERIAL=EL,ELSET=EALL\n'
elif material.type == 'MAT2':
raise NotImplementedError(material)
#msg = '*ELASTIC,TYPE=ORTHO\n'
#temperature = 0. # default value - same properties for all values
#msg += '%s,%s,%s\n' % (self.e, self.nu, temperature)
#D = Dplate
#D1111 = D[0, 0]
#D1122 = 0.
#D2222 = D[1, 1]
#D1133 = D[0, 2]
#D2233 = D[1, 2]
#D3333 = D[2, 2]
#D1212 = D[0, 1]
#D1313 = D[0, 2]
#msg += '%s,%s,%s,%s,%s,%s,%s,%s\n\n' % (
#D1111, D1122, D2222, D1133, D2233, D3333, D1212, D1313)
#G23
#temperature = self.tref
#msg = '*ELASTIC,TYPE=ENGINEERING CONSTANTS ** MAT2,mid=%s\n' % (
#self.mid)
#msg += '** E1,E2,E3,NU12,NU13,NU23,G12,G13\n'
#msg += '** G23,TEMPERATURE\n'
#msg += '%s,%s,%s,%s,%s,%s,%s,%s\n' % (
#e1, e2, e3, nu12, nu13, nu23, g12, g13)
#msg += '%s,%s\n' % (G23, temperature)
#if self.rho > 0.:
#msg += '*DENSITY\n'
#msg += '%s\n' % (self.rho)
#if self.a > 0:
#msg += '*EXPANSION,TYPE=ISO,ZERO=%s\n' % (self.tref)
#msg += '** ALPHA,ALPHA*TREF\n'
#msg += '%s,%s\n\n' % (self.a, self.a * self.tref)
#return msg
else:
raise NotImplementedError(mid.type)
inp += self.breaker()
return inp
def calculix_loads(self):
"""writes the load cards sorted by ID"""
inp = '** calculix_loads\n'
#if self.language=='english':
#inp += '** Loads\n'
#else:
#inp += ''
isubcase = 1
param_name = 'LOAD'
#skippedLids = {}
if self.loads:
inp += '** LOADS\n'
#load_keys = self.loads.keys()
#if isubcase in self.case_control_deck:
if self.case_control_deck.has_subcase(isubcase):
loadcase_id = self.case_control_deck.get_subcase_parameter(
isubcase, param_name)[0]
#loadcase = self.loads[loadcase_id]
self._write_loads_p0(loadcase_id) # bdf_file, size=8, is_double=False
inp += self.breaker()
return inp
def _write_loads_p0(self, loadcase_id, p0=None):
if not isinstance(loadcase_id, int):
raise RuntimeError('loadcase_id must be an integer; loadcase_id=%r' % loadcase_id)
if p0 is None:
p = array([0., 0., 0.], dtype='float32')
if isinstance(p0, int):
p = self.nodes[p0].get_position()
else:
p = array(p0)
load_case = self.loads[loadcase_id]
#for (key, load_case) in self.loads.items():
#if key != loadcase_id:
#continue
scale_factors2 = []
loads2 = []
for load in load_case:
if isinstance(load, LOAD):
scale_factors, loads = load.get_reduced_loads(
resolve_load_card=False, filter_zero_scale_factors=False)
scale_factors2 += scale_factors
loads2 += loads
else:
scale_factors2.append(1.)
loads2.append(load)
nnodes = self.nnodes
#print('nnodes = %s' % nnodes)
force_moment = zeros((nnodes, 6), 'float64')
#print(force_moment.shape)
force = force_moment[:, :3]
moment = force_moment[:, 3:]
i = 0
xyz = {}
nid_to_i_map = {}
for nid, node in self.nodes.items():
nid_to_i_map[nid] = i
xyz[nid] = node.get_position()
unsupported_types = set()
for load, scale in zip(loads2, scale_factors2):
if isinstance(load, Force): # FORCE, FORCE1, FORCE2
forcei = load.mag * load.xyz
self.log.info('%s %s' % (load, load.node))
i = nid_to_i_map[load.node]
force[i, :] += forcei * scale
elif isinstance(load, Moment): # MOMENT, MOMENT1, MOMENT2
momenti = load.mag * load.xyz
i = nid_to_i_map[load.node]
moment[i, :] += momenti * scale
elif load.type == 'PLOAD':
nodes = load.node_ids
nnodes = len(nodes)
if nnodes == 3:
n1, n2, n3 = xyz[nodes[0]], xyz[nodes[1]], xyz[nodes[2]]
axb = cross(n1 - n2, n1 - n3)
#centroid = (n1 + n2 + n3) / 3.
elif nnodes == 4:
n1, n2, n3, n4 = xyz[nodes[0]], xyz[nodes[1]], xyz[nodes[2]], xyz[nodes[3]]
axb = cross(n1 - n3, n2 - n4)
#centroid = (n1 + n2 + n3 + n4) / 4.
else:
msg = 'invalid number of nodes on PLOAD card; nodes=%s' % str(nodes)
raise RuntimeError(msg)
nunit = norm(axb)
area = 0.5 * nunit
try:
n = axb / nunit
except FloatingPointError:
msg = ''
for i, nid in enumerate(nodes):
msg += 'nid%i=%i node=%s\n' % (i+1, nid, xyz[nodes[i]])
msg += 'a x b = %s\n' % axb
msg += 'nunit = %s\n' % nunit
raise FloatingPointError(msg)
forcei = load.pressure * area * n * scale / nnodes
for nid in nodes:
i = nid_to_i_map[nid]
force[i, :] = forcei
elif load.type == 'PLOAD1':
elem = load.eid
elif load.type == 'PLOAD2':
# there are 4 pressures, but we assume p0
pressure = load.pressures[0] * scale
for eid in load.eids:
elem = self.elements[eid]
if elem.type in ['CTRIA3',
'CQUAD4', 'CSHEAR']:
nodes = elem.node_ids
nnodes = len(nodes)
n = elem.Normal()
area = elem.Area()
forcei = pressure * n * area / nnodes
for nid in nodes:
i = nid_to_i_map[nid]
force[i, :] = forcei
else:
self.log.debug('case=%s etype=%r loadtype=%r not supported' % (
loadcase_id, elem.type, load.type))
elif load.type == 'PLOAD4':
# there are 4 possible pressures, but we assume p0
pressure = load.pressures[0] * scale
assert load.Cid() == 0, 'Cid() = %s' % (load.Cid())
assert load.surf_or_line == 'SURF', 'surf_or_line = %s' % (load.surf_or_line)
assert load.line_load_dir == 'NORM', 'line_load_dir = %s' % (load.line_load_dir)
for elem in load.eids:
eid = elem.eid
if elem.type in ['CTRIA3', 'CTRIA6', 'CTRIAR',]:
# triangles
nnodes = 3
nodes = elem.node_ids
n1, n2, n3 = xyz[nodes[0]], xyz[nodes[1]], xyz[nodes[2]]
axb = cross(n1 - n2, n1 - n3)
nunit = norm(axb)
area = 0.5 * nunit
try:
n = axb / nunit
except FloatingPointError:
msg = ''
for i, nid in enumerate(nodes):
msg += 'nid%i=%i node=%s\n' % (i+1, nid, xyz[nodes[i]])
msg += 'a x b = %s\n' % axb
msg += 'nunit = %s\n' % nunit
raise FloatingPointError(msg)
#centroid = (n1 + n2 + n3) / 3.
elif elem.type in ['CQUAD4', 'CQUAD8', 'CQUAD', 'CQUADR', 'CSHEAR']:
# quads
nnodes = 4
nodes = elem.node_ids
n1, n2, n3, n4 = xyz[nodes[0]], xyz[nodes[1]], xyz[nodes[2]], xyz[nodes[3]]
axb = cross(n1 - n3, n2 - n4)
nunit = norm(axb)
area = 0.5 * nunit
try:
n = axb / nunit
except FloatingPointError:
msg = ''
for i, nid in enumerate(nodes):
msg += 'nid%i=%i node=%s\n' % (i+1, nid, xyz[nodes[i]])
msg += 'a x b = %s\n' % axb
msg += 'nunit = %s\n' % nunit
raise FloatingPointError(msg)
centroid = (n1 + n2 + n3 + n4) / 4.
elif elem.type in ['CTETRA', 'CHEXA', 'CPENTA']:
area, centroid, normal = elem.get_face_area_centroid_normal(
load.g34_ref.nid, load.g1_ref.nid)
nnodes = None
else:
self.log.debug('case=%s eid=%s etype=%r loadtype=%r not supported' % (
loadcase_id, eid, elem.type, load.type))
continue
#r = centroid - p
forcei = pressure * area * n / nnodes
#m = cross(r, f)
for nid in nodes:
i = nid_to_i_map[nid]
force[i, :] = forcei
elif load.type == 'GRAV':
pass
#def write_calculix_grav(self, gx, gy, gz):
#msg = '*DLOAD\n'
#msg += 'AllElements,GRAV,%s,%s,%s\n' % (gx, gy, gz)
#return msg
else:
# we collect them so we only get one print
unsupported_types.add(load.type)
for load_type in unsupported_types:
self.log.debug('case=%s load_type=%r not supported' % (loadcase_id, load_type))
force_moment.reshape((nnodes*6, 1))
return force_moment
def calculix_constraints(self):
return self.calculix_spcs()
def calculix_spcs(self):
#for spc_id, spcs in self.spcObject2.items():
inp = ''
inp += '** Calculix_SPCs\n'
inp += self.breaker()
return inp
def breaker(self):
return '**-------------------------------------------------------------------------\n'
def build_maxs(self):
self.max_nid_len = len(str(max(self.nodes)))
self.max_eid_len = len(str(max(self.elements)))
self.max_pid_len = len(str(max(self.properties)))
self.max_mid_len = len(str(max(self.materials)))
def write_as_calculix(self, fname='fem'):
inp = ''
dat = ''
self.build_maxs() # gets number of nodes/elements/properties/materials
inp += '** BEGIN BULK\n'
#inp += 'DEBUT();\n\n'
inp += (
"**'Read the mesh' - we use the 'aster' file format here.\n"
'mesh=LIRE_MAILLAGE(UNITE=20,\n'
" FORMAT='ASTER');\n\n"
"**'MECA_STATIQUE' % SOL 101 - linear statics\n"
"** Assigning the model for which CA will calculate the results:\n"
"** 'Mecanique' - since we are dealing with a linear elastic model "
"and '3D' since it's a 3D model.\n"
'Meca=AFFE_MODELE(MAILLAGE=mesh,\n'
" AFFE=_F(TOUT='OUI',\n"
" PHENOMENE='MECANIQUE',\n"
" MODELISATION='3D',),);\n\n")
inp += self.breaker()
print("writing fname=%s" % (fname + '.dat'))
with open(fname + '.dat', 'wb') as fdat:
self.calculix_nodes(fdat)
elsets = self.calculix_elements(fdat)
dat = self.calculix_materials()
fdat.write(dat)
print("writing fname=%s" % (fname + '.inp'))
with open(fname + '.inp', 'wb') as finp:
inpi = self.calculix_properties(elsets)
inp += inpi
inp += self.calculix_loads()
inp += self.calculix_constraints()
finp.write(inp)
# Case Control Deck
inp = '*STEP\n'
inp += '*STATIC\n'
inp += '*CLOAD\n'
inp += 'LAST,2,1.\n'
inp += '*NODE PRINT,NSET=NALL\n'
inp += 'U\n'
inp += '*EL PRINT,ELSET=EALL\n'
inp += 'S\n'
inp += '*END STEP\n'
inp += '** END OF DATA\n'
finp.write(inp)
def _write_mat1(material, element_set='ELSetDummyMat'):
# default value - same properties for all values
temperature = material.tref
msg = '*ELASTIC,TYPE=ISO,ELSET=%s\n' % (element_set)
msg += '** E,NU,TEMPERATURE\n'
msg += '%s,%s,%s\n' % (material.e, material.nu, temperature)
if material.rho > 0.:
msg += '*DENSITY\n'
msg += '%s\n' % (material.rho)
if material.a > 0:
msg += '*EXPANSION,TYPE=ISO,ZERO=%s\n' % (material.tref)
msg += '** ALPHA,ALPHA*TREF\n'
msg += '%s,%s\n\n' % (material.a, material.a * material.tref)
return msg
def main(): # pragma: no cover
import sys
code_aster = CalculixConverter()
#bdf_filename = 'solidBending.bdf'
bdf_filename = sys.argv[1]
code_aster.read_bdf(bdf_filename, xref=False)
code_aster.cross_reference()
code_aster.write_as_calculix(bdf_filename + '.ca') # inp, py
if __name__ == '__main__': # pragma: no cover
main()
|
nilq/baby-python
|
python
|
import serial
import serial.tools.list_ports
import time
import threading as thread
class Laser:
def __init__(self):
self.__ser = serial.Serial()
self.pulseMode = None
self.repRate = None
self.burstCount = None
self.diodeCurrent = None
self.energyMode = None
self.pulseWidth = None
self.diodeTrigger = None
self.autoArm = False
self.burstDuration = 0
self.__kicker_control = False # False = off, True = On. Controls kicker for shots longer than 2 seconds
self.__startup = True
self.__threads = []
self.__lock = thread.Lock()
def __kicker(self): # queries for status every second in order to kick the laser's WDT on shots >= 2s
while True:
if self.__kicker_control:
self.__ser.write(';LA:SS?<CR>')
time.sleep(1)
def __send_command(self, cmd): # sends command to laser
last_line = self.__ser.readline()
responses = []
if not (isinstance(cmd, int) or isinstance(cmd, list) or isinstance(cmd, tuple)):
raise TypeError("Error: command must be an integer, list, or tuple")
if isinstance(cmd, list) or isinstance(list, tuple):
for i in cmd:
self.__ser.write(i)
while True:
time.sleep(0.01)
response = self.__ser.readline() # read response
if response:
responses.append(response)
break
elif isinstance(cmd, str):
self.__ser.write(cmd)
while True:
time.sleep(0.01)
response = self.__ser.readline()
if response:
break
def connect(self, port_number, baud_rate=115200, timeout=5, parity=None):
with self.__lock:
if port_number not in serial.tools.list_ports.comports():
raise ValueError(f"Error: port {port_number} is not available")
self.__ser = serial.Serial(port=port_number)
if baud_rate and isinstance(baud_rate, int):
self.__ser.baudrate = baud_rate
else:
raise ValueError('Error: baud_rate parameter must be an integer')
if timeout and isinstance(timeout, int):
self.__ser.timeout = timeout
else:
raise ValueError('Error: timeout parameter must be an integer')
if not parity or parity == 'none':
self.__ser.parity = serial.PARITY_NONE
elif parity == 'even':
self.__ser.parity = serial.PARITY_EVEN
elif parity == 'odd':
self.__ser.parity = serial.PARITY_ODD
elif parity == 'mark':
self.__ser.parity = serial.PARITY_MARK
elif parity == 'space':
self.__ser.parity = serial.PARITY_SPACE
else:
raise ValueError("Error: parity must be None, \'none\', \'even\', \'odd\', \'mark\', \'space\'")
if self.__startup: # start kicking the laser's WDT
t = thread.Thread(target=self.__kicker())
self.__threads.append(t)
t.start()
self.__startup = False
def fire_laser(self):
with self.__lock:
self.__send_command(';LA:FL 1<CR>')
self.__send_command(';LA:SS?<CR>')
if self.__ser.readline() != '3075<CR>':
self.__send_command(';LA:FL 0<CR>') # aborts if laser fails to fire
raise RuntimeError('Laser Failed to Fire')
else:
if self.burstDuration >= 2:
self.__kicker_control = True
time.sleep(self.burstDuration)
self.__send_command(';LA:FL 0<CR>')
def get_status(self):
with self.__lock:
self.__send_command(';LA:SS?<CR>')
return self.__ser.read()
def check_armed(self):
with self.__lock:
self.__send_command('LA:EN?<CR>')
def arm(self):
with self.__lock:
self.__send_command(';LA:EN 1<CR>')
def disarm(self):
with self.__lock:
self.__send_command(';LA:EN 0<CR>')
def update_settings(self):
# cmd format, ignore brackets => ;[Address]:[Command String][Parameters]<CR>
with self.__lock:
cmd_strings = list()
cmd_strings.append(';LA:PM ' + str(self.pulseMode) + '<CR>')
cmd_strings.append(';LA:RR ' + str(self.repRate) + '<CR>')
cmd_strings.append(';LA:BC ' + str(self.pulseMode) + '<CR>')
cmd_strings.append(';LA:DC ' + str(self.diodeCurrent) + '<CR>')
cmd_strings.append(';LA:EM ' + str(self.energyMode) + '<CR>')
cmd_strings.append(';LA:PM ' + str(self.pulseMode) + '<CR>')
cmd_strings.append(';LA:DW ' + str(self.pulseWidth) + '<CR>')
cmd_strings.append(';LA:DT ' + str(self.pulseMode) + '<CR>')
for i in cmd_strings:
self.__send_command(i)
def list_available_ports():
return serial.tools.list_ports.comports()
|
nilq/baby-python
|
python
|
# Generated by Django 3.1.5 on 2021-04-15 14:49
from django.db import migrations, models
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
dependencies = [
('comments', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='is_edited',
field=models.BooleanField(default=False, editable=False),
),
migrations.AlterField(
model_name='comment',
name='parent',
field=mptt.fields.TreeForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='comments.comment'),
),
]
|
nilq/baby-python
|
python
|
from datetime import datetime
from flask import (Flask, abort, flash, jsonify, redirect, render_template,
url_for, request)
from flask_bootstrap import Bootstrap
from flask_login import (LoginManager, current_user, UserMixin,
login_required, login_user, logout_user)
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
from flask_wtf import FlaskForm
from werkzeug.security import check_password_hash, generate_password_hash
from wtforms import (BooleanField, DateTimeField, StringField, TextAreaField,
PasswordField, SubmitField)
from wtforms.validators import DataRequired, Length, Email, ValidationError
from filters import do_date, do_datetime, do_duration, do_nl2br
# Creates a Python object (WSGI) app
# The __name__ argument tells Flask to look at the current
# python module to find resources associated with the app
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///appt.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.jinja_env.filters['date'] = do_date
app.jinja_env.filters['datetime'] = do_datetime
app.jinja_env.filters['duration'] = do_duration
app.jinja_env.filters['nl2br'] = do_nl2br
db = SQLAlchemy(app)
bootstrap = Bootstrap(app)
migrate = Migrate(app, db)
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
login_manager.session_protection = 'strong'
login_manager.login_message = 'Please login to access this page'
login_manager.login_message_category = 'info'
# Models
@login_manager.user_loader
def user_id(id):
"""Flask-Login hook to load a User instance from ID."""
return User.query.get(int(id))
class User(db.Model, UserMixin):
"""A user login, with credentials and authentication."""
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(20), unique=True)
password_hash = db.Column(db.String(128))
created = db.Column(db.DateTime, default=datetime.utcnow)
@property
def password(self):
"""
Prevents password from being accessed
"""
raise AttributeError('password is not a readable attribute.')
@password.setter
def password(self, password):
"""
Set password to a hashed password
"""
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
"""
Check if hashed password matches actual password
"""
return check_password_hash(self.password_hash, password)
def __repr__(self):
return f'<User: {self.email}>'
class Appointment(db.Model):
"""An appointment on the calendar."""
__tablename__ = 'appointments'
id = db.Column(db.Integer, primary_key=True)
created = db.Column(db.DateTime, default=datetime.utcnow)
modified = db.Column(
db.DateTime, default=datetime.utcnow, onupdate=datetime.now)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False)
user = db.relationship(User, lazy='joined', join_depth=1, viewonly=True)
title = db.Column(db.String(255))
start = db.Column(db.DateTime, nullable=False)
end = db.Column(db.DateTime, nullable=False)
allday = db.Column(db.Boolean, default=False)
location = db.Column(db.String(255))
description = db.Column(db.Text)
@property
def duration(self):
"""
Calculate the length of the appointment, in seconds.
"""
# If the datetime type were supported natively on all database
# management systems (is not on SQLite), then this could be a
# hybrid_property, where filtering clauses could compare
# Appointment.duration. Without that support, we leave duration as an
# instance property, where appt.duration is calculated for us.
delta = self.end - self.start
return delta.days * 24 * 60 * 60 + delta.seconds
def __repr__(self):
# <Appointment: 1>
return (u'<{self.__class__.__name__}: {self.id}>'.format(self=self))
# Forms
class LoginForm(FlaskForm):
email = StringField('Email', validators=[
DataRequired(), Email()])
password = PasswordField()
submit = SubmitField('Login')
class RegisterForm(FlaskForm):
email = StringField('Email', validators=[
DataRequired(), Email()])
password = PasswordField()
confirm_password = PasswordField()
submit = SubmitField('Register')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError("This email already exists.")
class AppointmentForm(FlaskForm):
"""Render HTML input for Appointment model & validate submissions.
This matches the models.Appointment class very closely. Where
models.Appointment represents the domain and its persistence, this class
represents how to display a form in HTML & accept/reject the results.
"""
title = StringField('Title', validators=[
DataRequired(),
Length(min=1, max=255)])
start = DateTimeField('Start', validators=[DataRequired()])
end = DateTimeField('End')
allday = BooleanField('All Day')
location = StringField('Location', validators=[Length(max=255)])
description = TextAreaField('Description')
submit = SubmitField('Submit')
# Views
@app.route('/')
@login_required
def appointment_list():
"""Provide HTML page listing all appointments in the database."""
# Query: Get all Appointment objects, sorted by the appointment date.
appts = Appointment.query.filter_by(user_id=current_user.id) \
.order_by(Appointment.start.asc()).all()
return render_template('appointment/index.html', appts=appts)
@app.route('/<int:appointment_id>/')
@login_required
def appointment_detail(appointment_id):
"""Provide HTML page with all details on a given appointment."""
# Query: get Appointment object by ID.
appt = Appointment.query.get_or_404(appointment_id)
if appt.user_id != current_user.id:
# Abort with Not Found.
abort(404)
return render_template('appointment/detail.html', appt=appt)
@app.route('/create/', methods=['GET', 'POST'])
@login_required
def appointment_create():
"""Provide HTML form to create a new appointment record."""
form = AppointmentForm()
if form.validate_on_submit():
appt = Appointment(
title=form.title.data,
start=form.start.data,
end=form.end.data,
allday=form.allday.data,
location=form.location.data,
description=form.description.data,
user_id=current_user.id
)
db.session.add(appt)
db.session.commit()
flash('You have created a new appointment.', 'success')
return redirect(url_for('appointment_detail', appointment_id=appt.id))
return render_template('appointment/edit.html', form=form)
@app.route('/<int:appointment_id>/edit/', methods=['GET', 'POST'])
@login_required
def appointment_edit(appointment_id):
"""Provide HTML form to edit a given appointment."""
appt = Appointment.query.get_or_404(appointment_id)
if appt.user_id != current_user.id:
abort(404)
form = AppointmentForm(obj=appt)
if form.validate_on_submit():
form.populate_obj(appt)
db.session.commit()
return redirect(url_for('appointment_detail', appointment_id=appt.id))
return render_template('appointment/edit.html', form=form)
@app.route('/<int:appointment_id>/delete/', methods=['DELETE'])
@login_required
def appointment_delete(appointment_id):
"""Delete a record using HTTP DELETE, respond with JSON for JavaScript."""
appt = Appointment.query.get_or_404(appointment_id)
if appt is None:
# Abort with simple response indicating appointment not found.
response = jsonify({'status': 'Not Found'})
response.status_code = 404
return response
if appt.user_id != current_user.id:
# Abort with simple response indicating forbidden.
response = jsonify({'status': 'Forbidden'})
response.status_code = 403
return response
db.session.delete(appt)
db.session.commit()
return jsonify({'status': 'OK'})
@app.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('appointment_list'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is not None and user.verify_password(form.password.data):
login_user(user)
next_page = request.args.get('next')
flash('You have been logged in', 'success')
return redirect(next_page) if next_page else redirect(
url_for('appointment_list'))
flash('Invalid email or password.', 'warning')
return render_template('appointment/login.html', form=form)
@app.route('/logout')
@login_required
def logout():
logout_user()
flash('You have been logged out.', 'success')
return redirect(url_for('appointment_list'))
@app.route('/register', methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('appointment_list'))
form = RegisterForm()
if form.validate_on_submit():
user = User(email=form.email.data,
password=form.password.data)
db.session.add(user)
db.session.commit()
flash('Your account has been created. You can login', 'success')
return redirect(url_for('login'))
return render_template('appointment/register.html', form=form)
@app.errorhandler(404)
def error_not_found(error):
return render_template('404.html'), 404
|
nilq/baby-python
|
python
|
from hstest import StageTest, TestedProgram, CheckResult, dynamic_test
class Test(StageTest):
answers = [
'#### Hello World!\n',
'plain text**bold text**',
'*italic text*`code.work()`',
'[google](https://www.google.com)\n',
'1. first\n2. second\n3. third\n4. fourth\n',
'* first\n* second\n* third\n* fourth\n',
]
def check_result_in_file(self, attach):
try:
with open('output.md', 'r') as outfile:
output = outfile.read()
if output != self.answers[attach]:
return CheckResult.wrong('The result written to the output file is wrong.')
except IOError:
return CheckResult.wrong('The output file is not found.')
return CheckResult.correct()
@dynamic_test
def test1(self):
pr = TestedProgram()
pr.start()
output = pr.execute('header').strip().lower()
if 'level' not in output:
return CheckResult.wrong('Header formatter should prompt a user for both level and text, i.e "- Level: > "')
output = pr.execute('4').strip().lower()
if 'text' not in output.strip().lower():
return CheckResult.wrong('Header formatter should prompt a user for both level and text, i.e "- Text: > "')
output = list(map(lambda item: item.lower(), pr.execute('Hello World!').split('\n')))
if len(output) != 3:
return CheckResult.wrong('Please remember that header formatter switches to a new line automatically')
if output[0].strip().split() != ['####', 'hello', 'world!']:
return CheckResult.wrong('Level 4 for header denotes as #### in markdown')
if output[1]:
return CheckResult.wrong('Please check whether some redundant data is printed after a header')
if 'formatter' not in output[2].strip():
return CheckResult.wrong('A user should be prompted for input again, i.e "- Choose a formatter: > "')
pr.execute('!done')
if not pr.is_finished():
return CheckResult.wrong('Your program should finish its execution whenever !done is an input')
return self.check_result_in_file(attach=0)
@dynamic_test
def test2(self):
pr = TestedProgram()
pr.start()
output = pr.execute('plain').strip().lower()
if 'text' not in output.strip().lower():
return CheckResult.wrong('Plain formatter should prompt a user for text, i.e "- Text: > "')
output = list(map(lambda item: item.lower(), pr.execute('plain text').split('\n')))
if len(output) != 2:
return CheckResult.wrong("Plain formatter should only return the given text as is, and prompt a user for a new formatter")
if output[0] != 'plain text':
return CheckResult.wrong('Plain formatter returns the given text as is, without any extra symbols or tags')
if 'formatter' not in output[1].strip():
return CheckResult.wrong('A user should be prompted for input again, i.e "- Choose a formatter: > "')
output = pr.execute('bold').strip().lower()
if 'text' not in output:
return CheckResult.wrong('Bold formatter should prompt a user for text, i.e "- Text: > "')
output = list(map(lambda item: item.lower(), pr.execute('bold text').split('\n')))
if len(output) != 2:
return CheckResult.wrong("Bold formatter should only return the given text enclosed with '**' symbols, and prompt a user for a new formatter")
if output[0] != 'plain text**bold text**':
return CheckResult.wrong('Plain formatter returns the given text as is, and does not switch to a new line')
if 'formatter' not in output[1].strip():
return CheckResult.wrong('A user should be prompted for input again, i.e "- Choose a formatter: > "')
pr.execute('!done')
if not pr.is_finished():
return CheckResult.wrong('Your program should finish its execution whenever !done is an input')
return self.check_result_in_file(attach=1)
@dynamic_test
def test3(self):
pr = TestedProgram()
pr.start()
output = pr.execute('italic').strip().lower()
if 'text' not in output.strip().lower():
return CheckResult.wrong('Italic formatter should prompt a user for text, i.e "- Text: > "')
output = list(map(lambda item: item.lower(), pr.execute('italic text').split('\n')))
if len(output) != 2 or output[0] != '*italic text*':
return CheckResult.wrong("Bold formatter should only return the given text enclosed with '*' symbols, and prompt a user for a new formatter")
if 'formatter' not in output[1].strip():
return CheckResult.wrong('A user should be prompted for input again, i.e "- Choose a formatter: > "')
output = pr.execute('inline-code').strip().lower()
if 'text' not in output:
return CheckResult.wrong('Inline code formatter should prompt a user for text, i.e "- Text: > "')
output = list(map(lambda item: item.lower(), pr.execute('code.work()').split('\n')))
if len(output) != 2:
return CheckResult.wrong("Inline code formatter should only return the given text enclosed with '`' (backtick) symbols, and prompt a user for a new formatter")
if output[0] != '*italic text*`code.work()`':
return CheckResult.wrong('Inline code formatter does not switch to a new line')
if 'formatter' not in output[1].strip():
return CheckResult.wrong('A user should be prompted for input again, i.e "- Choose a formatter: > "')
pr.execute('!done')
if not pr.is_finished():
return CheckResult.wrong('Your program should finish its execution whenever !done is an input')
return self.check_result_in_file(attach=2)
@dynamic_test
def test4(self):
pr = TestedProgram()
pr.start()
output = pr.execute('link').strip().lower()
if 'label' not in output:
return CheckResult.wrong('Link formatter should prompt a user for both label and URL, i.e "- Label: > "')
output = pr.execute('google').strip().lower()
if 'url' not in output:
return CheckResult.wrong('Link formatter should prompt a user for both label and URL, i.e "- URL: > "')
output = list(map(lambda item: item.lower(), pr.execute('https://www.google.com').split('\n')))
if len(output) != 2:
return CheckResult.wrong('Link code formatter should only return the given label associated with a URL in the form [Label](URL), and prompt a user for a new formatter')
if output[0] != '[google](https://www.google.com)':
return CheckResult.wrong('Please recall that for the given label and URL the correct link formatter return will be [Label](URL)')
if 'formatter' not in output[1].strip():
return CheckResult.wrong('A user should be prompted for input again, i.e "- Choose a formatter: > "')
output = list(map(lambda item: item.lower(), pr.execute('new-line').split('\n')))
if len(output) != 3 or output[1] != '':
return CheckResult.wrong('New-line formatter only moves the input pointer to the next line, and prompts a user for a new formatter')
if output[0] != '[google](https://www.google.com)':
return CheckResult.wrong('Please make sure that the markdown state is saved')
if 'formatter' not in output[2].strip():
return CheckResult.wrong('A user should be prompted for input again, i.e "- Choose a formatter: > "')
pr.execute('!done')
if not pr.is_finished():
return CheckResult.wrong('Your program should finish its execution whenever !done is an input')
return self.check_result_in_file(attach=3)
@dynamic_test
def test5(self):
pr = TestedProgram()
pr.start()
output = pr.execute('ordered-list').strip().lower()
if 'number' not in output:
return CheckResult.wrong('Ordered list formatter should prompt a user for the number of rows, i.e "- Number of rows: > "')
output = list(map(lambda item: item.lower(), pr.execute('0').split('\n')))
if len(output) < 2 or 'number' not in output[-1].strip():
return CheckResult.wrong('(Un)ordered list formatter should inform a user that the number of rows should be greater than zero if the input was invalid, and prompt the user for this input again, i.e "- Number of rows: > "')
pr.execute('4')
pr.execute('first')
pr.execute('second')
pr.execute('third')
output = list(map(lambda item: item.lower(), pr.execute('fourth').split('\n')))
if len(output) != 6:
return CheckResult.wrong('Ordered list formatter should switch to a new line automatically')
if output[0] != '1. first' or output[1] != '2. second' or output[2] != '3. third' or output[3] != '4. fourth':
return CheckResult.wrong('Ordered list formatter should enumerate its rows in the following manner: "1. ", "2.", and so on, depending on the given number of rows.')
if 'formatter' not in output[5].strip():
return CheckResult.wrong('A user should be prompted for input again, i.e "- Choose a formatter: > "')
pr.execute('!done')
if not pr.is_finished():
return CheckResult.wrong('Your program should finish its execution whenever !done is an input')
return self.check_result_in_file(attach=4)
@dynamic_test
def test6(self):
pr = TestedProgram()
pr.start()
output = pr.execute('unordered-list').strip().lower()
if 'number' not in output:
return CheckResult.wrong('Unordered list formatter should prompt a user for the number of rows, i.e "- Number of rows: > "')
output = list(map(lambda item: item.lower(), pr.execute('-7').split('\n')))
if len(output) < 2 or 'number' not in output[-1].strip():
return CheckResult.wrong('(Un)ordered list formatter should inform a user that the number of rows should be greater than zero if the input was invalid, and prompt the user for this input again, i.e "- Number of rows: > "')
pr.execute('4')
pr.execute('first')
pr.execute('second')
pr.execute('third')
output = list(map(lambda item: item.lower(), pr.execute('fourth').split('\n')))
if len(output) != 6:
return CheckResult.wrong('Unordered list formatter should switch to a new line automatically')
if output[0] != '* first' or output[1] != '* second' or output[2] != '* third' or output[3] != '* fourth':
return CheckResult.wrong('Unordered list formatter should begin each of the rows with a star "*" symbol')
if 'formatter' not in output[5].strip():
return CheckResult.wrong('A user should be prompted for input again, i.e "- Choose a formatter: > "')
pr.execute('!done')
if not pr.is_finished():
return CheckResult.wrong('Your program should finish its execution whenever !done is an input')
return self.check_result_in_file(attach=5)
if __name__ == '__main__':
Test().run_tests()
# class SumTest(StageTest):
#
# answers = [
# '#### Hello World!\n',
# 'plain text**bold text**',
# '*italic text*`code.work()`',
# '[google](https://www.google.com)\n',
# '1. first\n2. second\n3. third\n4. fourth\n',
# '* first\n* second\n* third\n* fourth\n',
# ]
#
# def generate(self):
# return [
# TestCase(
# stdin=[
# 'header',
# lambda output:
# '4'
# if 'level' in output.strip().lower()
# else CheckResult.wrong('Header formatter should prompt a user for both level and text, i.e "- Level: > "'),
# lambda output:
# 'Hello World!'
# if 'text' in output.strip().lower()
# else CheckResult.wrong('Header formatter should prompt a user for both level and text, i.e "- Text: > "'),
# self.check_header_test1
# ],
# attach=0
# ),
# TestCase(
# stdin=[
# 'plain',
# lambda output:
# 'plain text'
# if 'text' in output.strip().lower()
# else CheckResult.wrong('Plain formatter should prompt a user for text, i.e "- Text: > "'),
# self.check_plain_test2,
# lambda output:
# 'bold text'
# if 'text' in output.strip().lower()
# else CheckResult.wrong('Bold formatter should prompt a user for text, i.e "- Text: > "'),
# self.check_bold_test2
# ],
# attach=1
# ),
# TestCase(
# stdin=[
# 'italic',
# lambda output:
# 'italic text'
# if 'text' in output.strip().lower()
# else CheckResult.wrong('Italic formatter should prompt a user for text, i.e "- Text: > "'),
# self.check_italic_test3,
# lambda output:
# 'code.work()'
# if 'text' in output.strip().lower()
# else CheckResult.wrong('Inline code formatter should prompt a user for text, i.e "- Text: > "'),
# self.check_inline_code_test3
# ],
# attach=2
# ),
# TestCase(
# stdin=[
# 'link',
# lambda output:
# 'google'
# if 'label' in output.strip().lower()
# else CheckResult.wrong('Link formatter should prompt a user for both label and URL, i.e "- Label: > "'),
# lambda output:
# 'https://www.google.com'
# if 'url' in output.strip().lower()
# else CheckResult.wrong('Link formatter should prompt a user for both label and URL, i.e "- URL: > "'),
# self.check_link_test4,
# self.check_new_line_test4
# ],
# attach=3
# ),
# TestCase(
# stdin=[
# 'ordered-list',
# lambda output:
# '0'
# if 'number' in output.strip().lower()
# else CheckResult.wrong('Ordered list formatter should prompt a user for the number of rows, i.e "- Number of rows: > "'),
# self.check_list_invalid_number_test,
# 'first',
# 'second',
# 'third',
# 'fourth',
# self.check_ordered_list_test5,
# ],
# attach=4
# ),
# TestCase(
# stdin=[
# 'unordered-list',
# lambda output:
# '-7'
# if 'number' in output.strip().lower()
# else CheckResult.wrong('Unordered list formatter should prompt a user for the number of rows, i.e "- Number of rows: > "'),
# self.check_list_invalid_number_test,
# 'first',
# 'second',
# 'third',
# 'fourth',
# self.check_unordered_list_test6,
# ],
# attach=5
# )
# ]
#
# def check_header_test1(self, output):
# output = list(map(lambda item: item.lower(), output.split('\n')))
#
# if len(output) != 3:
# return CheckResult.wrong('Please remember that header formatter switches to a new line automatically')
#
# if output[0].strip().split() != ['####', 'hello', 'world!']:
# return CheckResult.wrong('Level 4 for header denotes as #### in markdown')
#
# if output[1]:
# return CheckResult.wrong('Please check whether some redundant data is printed after a header')
#
# if 'formatter' not in output[2].strip():
# return CheckResult.wrong('A user should be prompted for input again, i.e "- Choose a formatter: > "')
#
# return '!done'
#
# def check_plain_test2(self, output):
# output = list(map(lambda item: item.lower(), output.split('\n')))
#
# if len(output) != 2:
# return CheckResult.wrong("Plain formatter should only return the given text as is, and prompt a user for input again")
#
# if output[0] != 'plain text':
# return CheckResult.wrong('Plain formatter returns the given text as is, without any extra symbols or tags')
#
# if 'formatter' not in output[1].strip():
# return CheckResult.wrong('A user should be prompted for input again, i.e "- Choose a formatter: > "')
#
# return 'bold'
#
# def check_bold_test2(self, output):
# output = list(map(lambda item: item.lower(), output.split('\n')))
#
# if len(output) != 2:
# return CheckResult.wrong("Bold formatter should only return the given text enclosed with '**' symbols, and prompt a user for input again")
#
# if output[0] != 'plain text**bold text**':
# return CheckResult.wrong('Plain formatter returns the given text as is, and does not switch to a new line')
#
# if 'formatter' not in output[1].strip():
# return CheckResult.wrong('A user should be prompted for input again, i.e "- Choose a formatter: > "')
#
# return '!done'
#
# def check_italic_test3(self, output):
# output = list(map(lambda item: item.lower(), output.split('\n')))
#
# if len(output) != 2 or output[0] != '*italic text*':
# return CheckResult.wrong("Bold formatter should only return the given text enclosed with '*' symbols, and prompt a user for a new formatter")
#
# if 'formatter' not in output[1].strip():
# return CheckResult.wrong('A user should be prompted for input again, i.e "- Choose a formatter: > "')
#
# return 'inline-code'
#
# def check_inline_code_test3(self, output):
# output = list(map(lambda item: item.lower(), output.split('\n')))
#
# if len(output) != 2:
# return CheckResult.wrong("Inline code formatter should only return the given text enclosed with '`' (backtick) symbols, and prompt a user for a new formatter")
#
# if output[0] != '*italic text*`code.work()`':
# return CheckResult.wrong('Inline code formatter does not switch to a new line')
#
# if 'formatter' not in output[1].strip():
# return CheckResult.wrong('A user should be prompted for input again, i.e "- Choose a formatter: > "')
#
# return '!done'
#
# def check_link_test4(self, output):
# output = list(map(lambda item: item.lower(), output.split('\n')))
#
# if len(output) != 2:
# return CheckResult.wrong('Link code formatter should only return the given label associated with a URL in the form [Label](URL), and prompt a user for a new formatter')
#
# if output[0] != '[google](https://www.google.com)':
# return CheckResult.wrong('Please recall that for the given label and URL the correct link formatter return will be [Label](URL)')
#
# if 'formatter' not in output[1].strip():
# return CheckResult.wrong('A user should be prompted for input again, i.e "- Choose a formatter: > "')
#
# return 'new-line'
#
# def check_new_line_test4(self, output):
# output = list(map(lambda item: item.lower(), output.split('\n')))
#
# if len(output) != 3 or output[1] != '':
# return CheckResult.wrong('New-line formatter only moves the input pointer to the next line, and prompts a user for a new formatter')
#
# if output[0] != '[google](https://www.google.com)':
# return CheckResult.wrong('Please make sure that the markdown state is saved')
#
# if 'formatter' not in output[2].strip():
# return CheckResult.wrong('A user should be prompted for input again, i.e "- Choose a formatter: > "')
#
# return '!done'
#
# def check_list_invalid_number_test(selfs, output):
# output = list(map(lambda item: item.lower(), output.split('\n')))
#
# if len(output) < 2 or 'number' not in output[-1].strip():
# return CheckResult.wrong('(Un)ordered list formatter should inform a user that the number of rows should be greater than zero if the input was invalid, and prompt the user for this input again, i.e "- Number of rows: > "')
#
# return '4'
#
# def check_ordered_list_test5(self, output):
# output = list(map(lambda item: item.lower(), output.split('\n')))
#
# if len(output) != 6:
# return CheckResult.wrong('Ordered list formatter should switch to a new line automatically')
#
# if output[0] != '1. first' or output[1] != '2. second' or output[2] != '3. third' or output[3] != '4. fourth':
# return CheckResult.wrong('Ordered list formatter should enumerate its rows in the following manner: "1. ", "2.", and so on, depending on the given number of rows.')
#
# if 'formatter' not in output[5].strip():
# return CheckResult.wrong('A user should be prompted for input again, i.e "- Choose a formatter: > "')
#
# return '!done'
#
# def check_unordered_list_test6(self, output):
# output = list(map(lambda item: item.lower(), output.split('\n')))
#
# if len(output) != 6:
# return CheckResult.wrong('Unordered list formatter should switch to a new line automatically')
#
# if output[0] != '* first' or output[1] != '* second' or output[2] != '* third' or output[3] != '* fourth':
# return CheckResult.wrong('Unordered list formatter should begin each of the rows with a star "*" symbol')
#
# if 'formatter' not in output[5].strip():
# return CheckResult.wrong('A user should be prompted for input again, i.e "- Choose a formatter: > "')
#
# return '!done'
#
# def check(self, reply, attach):
# try:
# with open('output.md', 'r') as outfile:
# output = outfile.read()
# if output != self.answers[attach]:
# return CheckResult.wrong('The result written to the output file is wrong.')
# except IOError:
# return CheckResult.wrong('The output file is not found.')
#
# return CheckResult.correct()
|
nilq/baby-python
|
python
|
import pytest
from pasee.__main__ import load_conf
from pasee import MissingSettings
import mocks
def test_load_conf():
"""Test the configuration logging"""
with pytest.raises(MissingSettings):
load_conf("nonexistant.toml")
config = load_conf("tests/test-settings.toml")
assert config["host"] == "0.0.0.0"
load_conf(settings_path="tests/test-settings.toml", host="127.0.0.1", port=4242)
def test_load_conf__none_in_parameter(monkeypatch):
monkeypatch.setattr("pasee.__main__.os.path.join", mocks.join)
config = load_conf(None)
assert config["host"] == "0.0.0.0"
def test_load_conf__missing_variables_in_conf(monkeypatch):
with pytest.raises(MissingSettings):
config = load_conf("tests/test-settings-missing-values.toml")
|
nilq/baby-python
|
python
|
from UR10 import *
u = UR10Controller('10.1.1.6')
x = URPoseManager()
x.load('t1.urpose')
print(x.getPosJoint('home'))
resp = input('hit y if you want to continue')
if resp == 'y':
x.moveUR(u,'home',30)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <gerald@wireshark.org>
# Copyright 1998 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
'''Update the "manuf" file.
Make-manuf creates a file containing ethernet OUIs and their company
IDs. It merges the databases at IEEE with entries in our template file.
Our file in turn contains entries from
http://www.cavebear.com/archive/cavebear/Ethernet/Ethernet.txt along
with our own.
The script reads the comments at the top of "manuf.tmpl" and writes them
to "manuf". It then joins the manufacturer listing in "manuf.tmpl" with
the listing in "oui.txt", "iab.txt", etc, with the entries in
"manuf.tmpl" taking precedence.
'''
import csv
import io
import os
import re
import sys
if sys.version_info[0] >= 3:
import urllib.request, urllib.error, urllib.parse
import codecs
else:
import urllib2
have_icu = False
try:
# Use the grapheme or segments module instead?
import icu
have_icu = True
except ImportError:
pass
def exit_msg(msg=None, status=1):
if msg is not None:
sys.stderr.write(msg + '\n\n')
sys.stderr.write(__doc__ + '\n')
sys.exit(status)
def open_url(url):
'''Open a URL.
Returns a tuple containing the body and response dict. The body is a
str in Python 3 and bytes in Python 2 in order to be compatibile with
csv.reader.
'''
req_headers = { 'User-Agent': 'Wireshark make-manuf' }
try:
if sys.version_info[0] >= 3:
req = urllib.request.Request(url, headers=req_headers)
response = urllib.request.urlopen(req)
body = response.read().decode('UTF-8', 'replace')
else:
req = urllib2.Request(url, headers=req_headers)
response = urllib2.urlopen(req)
body = response.read()
except:
exit_msg('Error opening ' + url)
return (body, dict(response.info()))
def shorten(manuf):
'''Convert a long manufacturer name to abbreviated and short names'''
# Normalize whitespace.
manuf = ' '.join(manuf.split())
orig_manuf = manuf
# Add exactly one space on each end.
# XXX This appears to be for the re.sub below.
manuf = u' {} '.format(manuf)
# Convert to consistent case
manuf = manuf.title()
# Remove any punctuation
# XXX Use string.punctuation? Note that it includes '-' and '*'.
manuf = re.sub(u"[',.()]", ' ', manuf)
# & isn't needed when Standalone
manuf = manuf.replace(" & ", " ")
# Remove any "the", "inc", "plc" ...
manuf = re.sub('\W(the|incorporated|inc|plc|systems|corporation|corp|s/a|a/s|ab|ag|kg|gmbh|company|co|limited|ltd|holding|spa)(?= )', '', manuf, flags=re.IGNORECASE)
# Remove all spaces
manuf = re.sub('\s+', '', manuf)
# Truncate names to a reasonable length, say, 8 characters. If
# the string contains UTF-8, this may be substantially more than
# 8 bytes. It might also be less than 8 visible characters. Plain
# Python slices Unicode strings by code point, which is better
# than raw bytes but not as good as grapheme clusters. PyICU
# supports grapheme clusters. https://bugs.python.org/issue30717
#
# In our case plain Python truncates 'Savroni̇k Elektroni̇k'
# to 'Savroni̇', which is 7 visible characters, 8 code points,
# and 9 bytes.
# Truncate by code points
trunc_len = 8
if have_icu:
# Truncate by grapheme clusters
bi_ci = icu.BreakIterator.createCharacterInstance(icu.Locale('en_US'))
bi_ci.setText(manuf)
bounds = list(bi_ci)
bounds = bounds[0:8]
trunc_len = bounds[-1]
manuf = manuf[:trunc_len]
if manuf.lower() == orig_manuf.lower():
# Original manufacturer name was short and simple.
return manuf
mixed_manuf = orig_manuf
# At least one entry has whitespace in front of a period.
mixed_manuf = re.sub('\s+\.', '.', mixed_manuf)
#If company is all caps, convert to mixed case (so it doesn't look like we're screaming the company name)
if mixed_manuf.upper() == mixed_manuf:
mixed_manuf = mixed_manuf.title()
return u'{}\t{}'.format(manuf, mixed_manuf)
def prefix_to_oui(prefix):
pfx_len = len(prefix) * 8 / 2
if pfx_len == 24:
# 24-bit OUI assignment, no mask
return ':'.join(hi + lo for hi, lo in zip(prefix[0::2], prefix[1::2]))
# Other lengths which require a mask.
oui = prefix.ljust(12, '0')
oui = ':'.join(hi + lo for hi, lo in zip(oui[0::2], oui[1::2]))
return '{}/{:d}'.format(oui, int(pfx_len))
def main():
this_dir = os.path.dirname(__file__)
template_path = os.path.join(this_dir, '..', 'manuf.tmpl')
manuf_path = os.path.join(this_dir, '..', 'manuf')
header_l = []
in_header = True
ieee_d = {
'OUI': { 'url': "http://standards-oui.ieee.org/oui/oui.csv", 'min_entries': 1000 },
'CID': { 'url': "http://standards-oui.ieee.org/cid/cid.csv", 'min_entries': 75 },
'IAB': { 'url': "http://standards-oui.ieee.org/iab/iab.csv", 'min_entries': 1000 },
'OUI28': { 'url': "http://standards-oui.ieee.org/oui28/mam.csv", 'min_entries': 1000 },
'OUI36': { 'url': "http://standards-oui.ieee.org/oui36/oui36.csv", 'min_entries': 1000 },
}
oui_d = {}
hp = "[0-9a-fA-F]{2}"
manuf_re = re.compile('^({}:{}:{})\s+(\S.*)$'.format(hp, hp, hp))
min_total = 35000; # 35830 as of 2018-09-05
tmpl_added = 0
total_added = 0
# Write out the header and populate the OUI list with our entries.
try:
tmpl_fd = io.open(template_path, 'r', encoding='UTF-8')
except:
exit_msg("Couldn't open template file for reading ({}) ".format(template_path))
for tmpl_line in tmpl_fd:
tmpl_line = tmpl_line.strip()
m = manuf_re.match(tmpl_line)
if not m and in_header:
header_l.append(tmpl_line)
elif m:
in_header = False
oui = m.group(1).upper()
oui_d[oui] = m.group(2)
tmpl_added += 1
tmpl_fd.close()
total_added += tmpl_added
# Add IEEE entries from each of their databases
ieee_db_l = list(ieee_d.keys())
ieee_db_l.sort()
for db in ieee_db_l:
db_url = ieee_d[db]['url']
ieee_d[db]['skipped'] = 0
ieee_d[db]['added'] = 0
ieee_d[db]['total'] = 0
print('Merging {} data from {}'.format(db, db_url))
(body, response_d) = open_url(db_url)
ieee_csv = csv.reader(body.splitlines())
if sys.version_info[0] >= 3:
ieee_d[db]['last-modified'] = response_d['Last-Modified']
ieee_d[db]['length'] = response_d['Content-Length']
else:
ieee_d[db]['last-modified'] = response_d['last-modified']
ieee_d[db]['length'] = response_d['content-length']
# Pop the title row.
next(ieee_csv)
for ieee_row in ieee_csv:
#Registry,Assignment,Organization Name,Organization Address
#IAB,0050C2DD6,Transas Marine Limited,Datavagen 37 Askim Vastra Gotaland SE 436 32
oui = prefix_to_oui(ieee_row[1].upper())
if sys.version_info[0] >= 3:
manuf = ieee_row[2].strip()
else:
manuf = ieee_row[2].strip().decode('UTF-8')
if oui in oui_d:
print(u'{} - Skipping IEEE "{}" in favor of "{}"'.format(oui, manuf, oui_d[oui]))
ieee_d[db]['skipped'] += 1
else:
oui_d[oui] = shorten(manuf)
ieee_d[db]['added'] += 1
ieee_d[db]['total'] += 1
if ieee_d[db]['total'] < ieee_d[db]['min_entries']:
exit_msg("Too few {} entries ({})".format(ieee_db, ieee_d[db]['total']))
total_added += ieee_d[db]['total']
if total_added < min_total:
exit_msg("Too few total entries ({})".format(total_added))
# Write the output file.
try:
manuf_fd = io.open(manuf_path, 'w', encoding='UTF-8')
except:
exit_msg("Couldn't open manuf file for reading ({}) ".format(manuf_path))
manuf_fd.write(u"# This file was generated by running ./tools/make-manuf.py.\n")
manuf_fd.write(u"# Don't change it directly, change manuf.tmpl instead.\n#\n")
manuf_fd.write('\n'.join(header_l))
for db in ieee_db_l:
manuf_fd.write(
u'''\
# {url}:
# Content-Length: {length}
# Last-Modified: {last-modified}
'''.format( **ieee_d[db]))
oui_l = list(oui_d.keys())
oui_l.sort()
for oui in oui_l:
manuf_fd.write(u'{}\t{}\n'.format(oui, oui_d[oui]))
manuf_fd.close()
print('{:<20}: {}'.format('Original entries', tmpl_added))
for db in ieee_d:
print('{:<20}: {}'.format('IEEE ' + db + ' added', ieee_d[db]['added']))
print('{:<20}: {}'.format('Total added', total_added))
print()
for db in ieee_d:
print('{:<20}: {}'.format('IEEE ' + db + ' total', ieee_d[db]['total']))
print()
for db in ieee_d:
print('{:<20}: {}'.format('IEEE ' + db + ' skipped', ieee_d[db]['skipped']))
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
from unittest import mock
import json
import time
from aiohttp import web
from aiohttp.web_middlewares import _Handler
from aiohttp.test_utils import TestClient
from typing import Any, Dict
from aiohttp_session import get_session, SimpleCookieStorage
from aiohttp_session import setup as setup_middleware
from typedefs import _TAiohttpClient
def make_cookie(client: TestClient, data: Dict[Any, Any]) -> None:
session_data = {
'session': data,
'created': int(time.time())
}
value = json.dumps(session_data)
# Ignoring type until aiohttp#4252 is released
client.session.cookie_jar.update_cookies(
{'AIOHTTP_SESSION': value} # type: ignore
)
def create_app(handler: _Handler) -> web.Application:
app = web.Application()
setup_middleware(app, SimpleCookieStorage(max_age=10))
app.router.add_route('GET', '/', handler)
return app
async def test_max_age_also_returns_expires(
aiohttp_client: _TAiohttpClient
) -> None:
async def handler(request: web.Request) -> web.StreamResponse:
# Ignoring type since time.time is mocked in this context
time.time.return_value = 0.0 # type: ignore[attr-defined]
session = await get_session(request)
session['c'] = 3
return web.Response(body=b'OK')
with mock.patch('time.time') as m_clock:
m_clock.return_value = 0.0
client = await aiohttp_client(create_app(handler))
make_cookie(client, {'a': 1, 'b': 2})
resp = await client.get('/')
assert resp.status == 200
assert 'expires=Thu, 01-Jan-1970 00:00:10 GMT' in \
resp.headers['SET-COOKIE']
|
nilq/baby-python
|
python
|
import json
import os
import dotenv
class ConfigError(Exception):
def __init__(self, field):
super().__init__(f'Missing environment variable {field}')
def get_env_var(name: str, default: str = None, prefix='', allow_empty=False):
if prefix:
env = prefix + '_' + name
else:
env = name
value = os.getenv(env)
if not value and default:
value = default
if not allow_empty and not value:
raise ConfigError(env)
return value
def set_env_var(name: str, value):
if isinstance(value, (list, dict, tuple)):
value = json.dumps(value).replace(" ", "")
dotenv.set_key(".env", key_to_set=name, value_to_set=value, quote_mode="")
|
nilq/baby-python
|
python
|
"""Library for Byte-pair-encoding (BPE) tokenization.
Authors
* Abdelwahab Heba 2020
* Loren Lugosch 2020
"""
import os.path
import torch
import logging
import csv
import json
import sentencepiece as spm
from speechbrain.dataio.dataio import merge_char
from speechbrain.utils import edit_distance
import speechbrain as sb
logger = logging.getLogger(__name__)
class SentencePiece:
"""BPE class call the SentencePiece unsupervised text tokenizer from Google.
Reference: https://github.com/google/sentencepiece
SentencePiece lib is an unsupervised text tokenizer and detokenizer.
It implements subword units like Byte-pair-encoding (BPE),
Unigram language model and char/word tokenizer.
Arguments
---------
model_dir : str
The directory where the model will be saved (or already stored).
vocab_size : int, None, optional
Vocab size for the chosen tokenizer type (BPE, Unigram).
The vocab_size is optional for char, and mandatory for BPE & unigram
tokenization.
annotation_train : str
Path of the annotation file which is used to learn the tokenizer. It
can be in JSON or csv format.
annotation_read : str
The data entry which contains the word sequence in the annotation file.
model_type : str
(bpe, char, unigram).
If "bpe", train unsupervised tokenization of piece of words. see:
https://www.aclweb.org/anthology/P16-1162/
If "word" take the vocabulary from the input text.
If "unigram" do piece of word tokenization using unigram language
model, see: https://arxiv.org/abs/1804.10959
char_format_input : bool
Whether the read entry contains characters format input.
(default: False)
(e.g., a p p l e _ i s _ g o o d)
character_coverage : int
Amount of characters covered by the model, good defaults
are: 0.9995 for languages with a rich character set like Japanese or
Chinese and 1.0 for other languages with small character set.
(default: 1.0)
user_defined_symbols : string
String contained a list of symbols separated by a comma.
User-defined symbols are handled as one piece in any context.
(default: None)
max_sentencepiece_length : int
Maximum number of characters for the tokens. (default: 10)
bos_id : int
If -1 the bos_id = unk_id = 0. otherwise, bos_id = int. (default: -1)
eos_id : int
If -1 the bos_id = unk_id = 0. otherwise, bos_id = int. (default: -1)
split_by_whitespace : bool
If False, allow the sentencepiece to extract piece crossing multiple
words. This feature is important for : Chinese/Japanese/Korean.
(default: True)
num_sequences : int
If not none, use at most this many sequences to train the tokenizer
(for large datasets). (default: None)
annotation_list_to_check : list,
List of the annotation file which is used for checking the accuracy of
recovering words from the tokenizer.
annotation_format : str
The format of the annotation file. JSON or csv are the formats supported.
Example
-------
>>> import torch
>>> dict_int2lab = {1: "HELLO", 2: "MORNING"}
>>> model_dir = "tests/unittests/tokenizer_data/"
>>> # Example with csv
>>> annotation_train = "tests/unittests/tokenizer_data/dev-clean.csv"
>>> annotation_read = "wrd"
>>> model_type = "bpe"
>>> bpe = SentencePiece(model_dir,100, annotation_train, annotation_read,
... model_type)
>>> batch_seq = torch.Tensor([[1, 2, 2, 1],[1, 2, 1, 0]])
>>> batch_lens = torch.Tensor([1.0, 0.75])
>>> encoded_seq_ids, encoded_seq_pieces = bpe(
... batch_seq, batch_lens, dict_int2lab, task="encode"
... )
>>> # Example using JSON
>>> annotation_train = "tests/unittests/tokenizer_data/dev-clean.json"
>>> annotation_read = "wrd"
>>> bpe = SentencePiece(model_dir,100, annotation_train, annotation_read,
... model_type, annotation_format = 'json')
>>> encoded_seq_ids, encoded_seq_pieces = bpe(
... batch_seq, batch_lens, dict_int2lab, task="encode"
... )
"""
def __init__(
self,
model_dir,
vocab_size,
annotation_train=None,
annotation_read=None,
model_type="unigram",
char_format_input=False,
character_coverage=1.0,
user_defined_symbols=None,
max_sentencepiece_length=10,
bos_id=-1,
eos_id=-1,
pad_id=-1,
unk_id=0,
split_by_whitespace=True,
num_sequences=None,
annotation_list_to_check=None,
annotation_format="csv",
):
if model_type not in ["unigram", "bpe", "char"]:
raise ValueError("model_type must be one of : [unigram, bpe, char]")
if not os.path.isdir(model_dir):
os.makedirs(model_dir)
if not isinstance(vocab_size, int):
raise ValueError("vocab_size must be integer.")
self.annotation_train = annotation_train
self.annotation_read = annotation_read
self.annotation_format = annotation_format
if self.annotation_train is not None:
ext = os.path.splitext(self.annotation_train)[1]
self.text_file = self.annotation_train.replace(ext, ".txt")
self.prefix_model_file = os.path.join(
model_dir, str(vocab_size) + "_" + model_type
)
self.vocab_size = str(vocab_size)
self.model_type = model_type
self.char_format_input = char_format_input
self.character_coverage = str(character_coverage)
self.max_sentencepiece_length = str(max_sentencepiece_length)
self.bos_id = str(bos_id)
self.eos_id = str(eos_id)
self.pad_id = str(pad_id)
self.unk_id = str(unk_id)
self.num_sequences = num_sequences
self.split_by_whitespace = split_by_whitespace
self.user_defined_symbols = user_defined_symbols
if not os.path.isfile(self.prefix_model_file + ".model"):
logger.info("Train tokenizer with type:" + self.model_type)
if not os.path.isfile(self.text_file):
try:
if sb.utils.distributed.if_main_process():
if annotation_format == "csv":
self._csv2text()
elif annotation_format == "json":
self._json2text()
else:
raise ValueError(
"Annotation format not supported. Supported formats are csv and json. Got "
+ annotation_format
)
finally:
sb.utils.distributed.ddp_barrier()
try:
if sb.utils.distributed.if_main_process():
self._train_BPE()
finally:
sb.utils.distributed.ddp_barrier()
else:
logger.info("Tokenizer is already trained.")
logger.info("==== Loading Tokenizer ===")
logger.info("Tokenizer path: " + self.prefix_model_file + ".model")
logger.info("Tokenizer vocab_size: " + str(self.vocab_size))
logger.info("Tokenizer type: " + self.model_type)
self.sp = spm.SentencePieceProcessor()
self.sp.load(self.prefix_model_file + ".model")
try:
if sb.utils.distributed.if_main_process():
if annotation_list_to_check is not None:
self._check_coverage_from_bpe(annotation_list_to_check)
finally:
sb.utils.distributed.ddp_barrier()
def _csv2text(self):
"""Read CSV file and convert specific data entries into text file.
"""
if not os.path.isfile(os.path.abspath(self.annotation_train)):
raise ValueError(
self.annotation_train
+ " is not a file. please provide annotation file for training."
)
logger.info(
"Extract "
+ self.annotation_read
+ " sequences from:"
+ self.annotation_train
)
annotation_file = open(self.annotation_train, "r")
reader = csv.reader(annotation_file)
headers = next(reader, None)
if self.annotation_read not in headers:
raise ValueError(
self.annotation_read + " must exist in:" + self.annotation_train
)
index_label = headers.index(self.annotation_read)
text_file = open(self.text_file, "w+")
row_idx = 0
for row in reader:
if self.num_sequences is not None and row_idx > self.num_sequences:
print(
"Using %d sequences to train the tokenizer."
% self.num_sequences
)
break
row_idx += 1
sent = row[index_label]
if self.char_format_input:
(sent,) = merge_char([sent.split()])
sent = " ".join(sent)
text_file.write(sent + "\n")
text_file.close()
annotation_file.close()
logger.info("Text file created at: " + self.text_file)
def _json2text(self):
"""Read JSON file and convert specific data entries into text file.
"""
if not os.path.isfile(os.path.abspath(self.annotation_train)):
raise ValueError(
self.annotation_train
+ " is not a file. please provide annotation file for training."
)
logger.info(
"Extract "
+ self.annotation_read
+ " sequences from:"
+ self.annotation_train
)
# Read JSON
with open(self.annotation_train, "r") as f:
out_json = json.load(f)
# Save text file
text_file = open(self.text_file, "w+")
row_idx = 0
for snt_id in out_json.keys():
if self.num_sequences is not None and row_idx > self.num_sequences:
print(
"Using %d sequences to train the tokenizer."
% self.num_sequences
)
break
row_idx += 1
sent = out_json[snt_id][self.annotation_read]
if self.char_format_input:
(sent,) = merge_char([sent.split()])
sent = " ".join(sent)
text_file.write(sent + "\n")
text_file.close()
logger.info("Text file created at: " + self.text_file)
def _train_BPE(self):
"""Train tokenizer with unsupervised techniques (BPE, Unigram) using
SentencePiece Library. If you use "char" mode, the SentencePiece
creates a char dict so the vocab_size attribute is not needed.
"""
query = (
"--input="
+ self.text_file
+ " --model_prefix="
+ self.prefix_model_file
+ " --model_type="
+ self.model_type
+ " --bos_id="
+ self.bos_id
+ " --eos_id="
+ self.eos_id
+ " --pad_id="
+ self.pad_id
+ " --unk_id="
+ self.unk_id
+ " --max_sentencepiece_length="
+ self.max_sentencepiece_length
+ " --character_coverage="
+ self.character_coverage
)
if self.model_type not in ["char"]:
# include vocab_size
query += " --vocab_size=" + str(self.vocab_size)
if self.user_defined_symbols is not None:
query += " --user_defined_symbols=" + self.user_defined_symbols
if not self.split_by_whitespace:
query += " --split_by_whitespace=false"
# Train tokenizer
spm.SentencePieceTrainer.train(query)
def _check_coverage_from_bpe(self, list_annotation_files=[]):
"""Logging the accuracy of the BPE model to recover words from the training text.
Arguments
---------
annotation_list_to_check : list,
List of the annotation file which is used for checking the accuracy of recovering words from the tokenizer.
"""
for annotation_file in list_annotation_files:
if os.path.isfile(os.path.abspath(annotation_file)):
logger.info(
"==== Accuracy checking for recovering text from tokenizer ==="
)
# csv reading
if self.annotation_format == "csv":
fannotation_file = open(annotation_file, "r")
reader = csv.reader(fannotation_file)
headers = next(reader, None)
if self.annotation_read not in headers:
raise ValueError(
self.annotation_read
+ " must exist in:"
+ annotation_file
)
index_label = headers.index(self.annotation_read)
# json reading
else:
with open(self.annotation_train, "r") as f:
reader = json.load(f)
index_label = self.annotation_read
wrong_recover_list = []
for row in reader:
if self.annotation_format == "csv":
row = row[index_label]
else:
row = reader[row][index_label]
if self.char_format_input:
(row,) = merge_char([row.split()])
row = " ".join(row)
row = row.split("\n")[0]
encoded_id = self.sp.encode_as_ids(row)
decode_text = self.sp.decode_ids(encoded_id)
(details,) = edit_distance.wer_details_for_batch(
["utt1"],
[row.split(" ")],
[decode_text.split(" ")],
compute_alignments=True,
)
if details["WER"] > 0:
for align in details["alignment"]:
if align[0] != "=" and align[1] is not None:
if align[1] not in wrong_recover_list:
wrong_recover_list.append(align[1])
if self.annotation_format == "csv":
fannotation_file.close()
logger.info("recover words from: " + annotation_file)
if len(wrong_recover_list) > 0:
logger.warn(
"Wrong recover words: " + str(len(wrong_recover_list))
)
logger.warn(
"Tokenizer vocab size: " + str(self.sp.vocab_size())
)
logger.warn(
"accuracy recovering words: "
+ str(
1
- float(len(wrong_recover_list))
/ self.sp.vocab_size()
)
)
else:
logger.info("Wrong recover words: 0")
logger.warning("accuracy recovering words: " + str(1.0))
else:
logger.info(
"No accuracy recover checking for" + annotation_file
)
def __call__(
self, batch, batch_lens=None, ind2lab=None, task="encode",
):
"""This __call__ function implements the tokenizer encoder and decoder
(restoring the string of word) for BPE, Regularized BPE (with unigram),
and char (speechbrain/nnet/RNN.py).
Arguments
----------
batch : tensor.IntTensor or list
List if ( batch_lens = None and task = "decode_from_list")
Contains the original labels. Shape: [batch_size, max_length]
batch_lens : tensor.LongTensor
Containing the relative length of each label sequences. Must be 1D
tensor of shape: [batch_size]. (default: None)
ind2lab : dict
Dictionary which maps the index from label sequences
(batch tensor) to string label.
task : str
("encode", "decode", "decode_from_list)
"encode": convert the batch tensor into sequence of tokens.
the output contain a list of (tokens_seq, tokens_lens)
"decode": convert a tensor of tokens to a list of word sequences.
"decode_from_list": convert a list of token sequences to a list
of word sequences.
"""
if task == "encode" and ind2lab is None:
raise ValueError("Tokenizer encoder must have the ind2lab function")
if task == "encode":
# Convert list of words/chars to bpe ids
bpe = []
max_bpe_len = 0
batch_lens = (batch_lens * batch.shape[1]).int()
for i, utt_seq in enumerate(batch):
tokens = [
ind2lab[int(index)] for index in utt_seq[: batch_lens[i]]
]
if self.char_format_input:
(words_list,) = merge_char([tokens])
sent = " ".join(words_list)
else:
sent = " ".join(tokens)
bpe_encode = self.sp.encode_as_ids(sent)
bpe.append(bpe_encode)
# save the longest bpe sequence
# it help to compute the relative length of each utterance
if len(bpe_encode) > max_bpe_len:
max_bpe_len = len(bpe_encode)
# Create bpe tensor
bpe_tensor = torch.zeros(
(batch.shape[0], max_bpe_len), device=batch.device
)
bpe_lens = torch.zeros((batch.shape[0]), device=batch.device)
for i, bpe_utt in enumerate(bpe):
bpe_tensor[i, : len(bpe_utt)] = torch.Tensor(bpe_utt)
bpe_lens[i] = len(bpe_utt) / max_bpe_len
return bpe_tensor, bpe_lens
elif task == "decode_from_list":
# From list of hyps (not padded outputs)
# do decoding
return [self.sp.decode_ids(utt_seq).split(" ") for utt_seq in batch]
elif task == "decode":
# From a batch tensor and a length tensor
# find the absolute batch lengths and do decoding
batch_lens = (batch_lens * batch.shape[1]).int()
return [
self.sp.decode_ids(
utt_seq[: batch_lens[i]].int().tolist()
).split(" ")
for i, utt_seq in enumerate(batch)
]
|
nilq/baby-python
|
python
|
from ExceptionHandler import ExceptionHandler
class InputController:
def __init__(self, inputReader, exceptionHandler):
self.InputReader = inputReader
self.ExceptionHandler = exceptionHandler
def pollUserInput(self):
return self.ExceptionHandler.executeFunc()
|
nilq/baby-python
|
python
|
import math
def factors(n):
results = set()
for i in range(1, int(math.sqrt(n)) + 1):
if n % i == 0:
results.add(i)
results.add(int(n/i))
return results
x = 0
i = 0
while True:
x += i
if len(factors(x)) > 500:
print(x)
i += 1
|
nilq/baby-python
|
python
|
"""
MesoNet
Authors: Brandon Forys and Dongsheng Xiao, Murphy Lab
https://github.com/bf777/MesoNet
Licensed under the Creative Commons Attribution 4.0 International License (see LICENSE for details)
The method "vxm_data_generator" is adapted from VoxelMorph:
Balakrishnan, G., Zhao, A., Sabuncu, M. R., Guttag, J., & Dalca, A. V. (2019). VoxelMorph: A Learning Framework for
Deformable Medical Image Registration. IEEE Transactions on Medical Imaging, 38(8), 1788–1800.
https://doi.org/10.1109/TMI.2019.2897538
VoxelMorph is distributed under the Apache License 2.0.
"""
from mesonet.mask_functions import *
import voxelmorph as vxm
from skimage.color import rgb2gray
def vxm_data_generator(x_data, template, batch_size=1):
"""
Generator that takes in data of size [N, H, W], and yields data for
our custom vxm model. Note that we need to provide numpy data for each
input, and each output.
inputs: moving [bs, H, W, 1], fixed image [bs, H, W, 1]
outputs: moved image [bs, H, W, 1], zero-gradient [bs, H, W, 2]
"""
# preliminary sizing
if batch_size == 1:
x_data = rgb2gray(x_data)
template = rgb2gray(template)
x_data = np.expand_dims(x_data, axis=0)
template = np.expand_dims(template, axis=0)
vol_shape = x_data.shape[1:] # extract data shape
ndims = len(vol_shape)
# prepare a zero array the size of the deformation
zero_phi = np.zeros([batch_size, *vol_shape, ndims])
while True:
# prepare inputs:
# images need to be of the size [batch_size, H, W, 1]
idx1 = np.random.randint(0, template.shape[0], size=batch_size)
moving_images = template[idx1, ..., np.newaxis]
idx2 = np.random.randint(0, x_data.shape[0], size=batch_size)
fixed_images = x_data[idx2, ..., np.newaxis]
inputs = [moving_images, fixed_images]
# prepare outputs (the 'true' moved image):
# of course, we don't have this, but we know we want to compare
# the resulting moved image with the fixed image.
# we also wish to penalize the deformation field.
# NOTE: we don't currently use these output images in our analyses;
# the inputs are put directly into vxm_model.predict().
outputs = [fixed_images, zero_phi]
yield inputs, outputs
def init_vxm_model(img_path, model_path):
"""
Initializes a VoxelMorph model to be applied.
:param img_path: (required) The path to the image to be aligned using VoxelMorph.
:param model_path: (required) The path to the VoxelMorph model to be used.
:return:
"""
# configure unet features
nb_features = [
[32, 32, 32, 32], # encoder features
[32, 32, 32, 32, 32, 16], # decoder features
]
# Since our input is a 2D image, we can take the shape from the first two dimensions in .shape
inshape = img_path.shape[0:2]
vxm_model = vxm.networks.VxmDense(inshape, nb_features, int_steps=0)
losses = [vxm.losses.MSE().loss, vxm.losses.Grad("l2").loss]
lambda_param = 0.05
loss_weights = [1, lambda_param]
vxm_model.compile(optimizer="Adam", loss=losses, loss_weights=loss_weights)
vxm_model.load_weights(model_path)
return vxm_model
def vxm_transform(x_data, flow_path):
"""
Carried out a VoxelMorph transformation.
:param x_data: (required) The image data to be transformed.
:param flow_path: (required) If we already have a deformation field that we want
to apply to all data, use the deformation field specified at this path.
:return:
"""
# If we already have a deformation field that we want to apply to all data,
# use this deformation field instead of computing a new one.
# preliminary sizing
flow_data = np.load(flow_path)
x_data = rgb2gray(x_data)
x_data = np.expand_dims(x_data, axis=0)
x_data = x_data[..., np.newaxis]
vol_size = x_data.shape[1:-1]
results = vxm.networks.Transform(
vol_size, interp_method="linear", nb_feats=x_data.shape[-1]
).predict([x_data, flow_data])
output_img = results[0, :, :, 0]
return output_img
def voxelmorph_align(model_path, img_path, template, exist_transform, flow_path):
"""
Carries out a VoxelMorph alignment procedure, and returns the output image and corresponding flow field.
:param model_path: (required) The path to a VoxelMorph model to use. By default, this is in the MesoNet repository >
mesonet/models/voxelmorph.
:param img_path: (required) The path to an image to be aligned using VoxelMorph.
:param template: (required) The path to a VoxelMorph template image to which the input image will be aligned,
creating the transformation to be applied to the output image.
:param exist_transform: (required) If True, uses an existing VoxelMorph flow field (the .npy file saved alongside
each VoxelMorph transformed image) to carry out the transformations (instead of computing a new flow field).
:param flow_path: (required) The path to the directory to which the VoxelMorph flow field from the current
transformation should be saved.
:return:
"""
if not exist_transform:
vxm_model = init_vxm_model(img_path, model_path)
val_generator = vxm_data_generator(img_path, template)
val_input, _ = next(val_generator)
# Makes predictions on each image
results = vxm_model.predict(val_input)
# Saves output mask
output_img = [img[0, :, :, 0] for img in results][0]
# Saves flow image to flow
flow_img = results[1]
else:
print("using existing transform")
output_img = vxm_transform(img_path, flow_path)
# Saves flow image to flow
flow_img = ""
print("Results saved!")
return output_img, flow_img
|
nilq/baby-python
|
python
|
from py.test import raises
from pypy.conftest import gettestobjspace
class AppTestUnicodeData:
def setup_class(cls):
space = gettestobjspace(usemodules=('unicodedata',))
cls.space = space
def test_hangul_syllables(self):
import unicodedata
# Test all leading, vowel and trailing jamo
# but not every combination of them.
for code, name in ((0xAC00, 'HANGUL SYLLABLE GA'),
(0xAE69, 'HANGUL SYLLABLE GGAEG'),
(0xB0D2, 'HANGUL SYLLABLE NYAGG'),
(0xB33B, 'HANGUL SYLLABLE DYAEGS'),
(0xB5A4, 'HANGUL SYLLABLE DDEON'),
(0xB80D, 'HANGUL SYLLABLE RENJ'),
(0xBA76, 'HANGUL SYLLABLE MYEONH'),
(0xBCDF, 'HANGUL SYLLABLE BYED'),
(0xBF48, 'HANGUL SYLLABLE BBOL'),
(0xC1B1, 'HANGUL SYLLABLE SWALG'),
(0xC41A, 'HANGUL SYLLABLE SSWAELM'),
(0xC683, 'HANGUL SYLLABLE OELB'),
(0xC8EC, 'HANGUL SYLLABLE JYOLS'),
(0xCB55, 'HANGUL SYLLABLE JJULT'),
(0xCDBE, 'HANGUL SYLLABLE CWEOLP'),
(0xD027, 'HANGUL SYLLABLE KWELH'),
(0xD290, 'HANGUL SYLLABLE TWIM'),
(0xD4F9, 'HANGUL SYLLABLE PYUB'),
(0xD762, 'HANGUL SYLLABLE HEUBS'),
(0xAE27, 'HANGUL SYLLABLE GYIS'),
(0xB090, 'HANGUL SYLLABLE GGISS'),
(0xB0AD, 'HANGUL SYLLABLE NANG'),
(0xB316, 'HANGUL SYLLABLE DAEJ'),
(0xB57F, 'HANGUL SYLLABLE DDYAC'),
(0xB7E8, 'HANGUL SYLLABLE RYAEK'),
(0xBA51, 'HANGUL SYLLABLE MEOT'),
(0xBCBA, 'HANGUL SYLLABLE BEP'),
(0xBF23, 'HANGUL SYLLABLE BBYEOH'),
(0xD7A3, 'HANGUL SYLLABLE HIH')):
assert unicodedata.name(unichr(code)) == name
assert unicodedata.lookup(name) == unichr(code)
# Test outside the range
raises(ValueError, unicodedata.name, unichr(0xAC00 - 1))
raises(ValueError, unicodedata.name, unichr(0xD7A3 + 1))
def test_cjk(self):
import sys
if sys.maxunicode < 0x10ffff:
skip("requires a 'wide' python build.")
import unicodedata
cases = ((0x3400, 0x4DB5),
(0x4E00, 0x9FA5))
if unicodedata.unidata_version >= "4.1":
cases = ((0x3400, 0x4DB5),
(0x4E00, 0x9FBB),
(0x20000, 0x2A6D6))
for first, last in cases:
# Test at and inside the boundary
for i in (first, first + 1, last - 1, last):
charname = 'CJK UNIFIED IDEOGRAPH-%X'%i
assert unicodedata.name(unichr(i)) == charname
assert unicodedata.lookup(charname) == unichr(i)
# Test outside the boundary
for i in first - 1, last + 1:
charname = 'CJK UNIFIED IDEOGRAPH-%X'%i
try:
unicodedata.name(unichr(i))
except ValueError:
pass
raises(KeyError, unicodedata.lookup, charname)
|
nilq/baby-python
|
python
|
# Copyright 2021 Huawei Technologies Co., Ltd.All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Mapper module."""
from mindconverter.graph_based_converter.constant import ExchangeMessageKeywords, TemplateKeywords
from mindconverter.graph_based_converter.mapper.base import AtenToMindSporeMapper
class FlattenMapper(AtenToMindSporeMapper):
"""Flatten mapper."""
@staticmethod
def _operation_name_in_ms(*args, **kwargs):
return "P.Reshape"
@staticmethod
def _convert_params(**kwargs):
return dict()
@staticmethod
def _convert_trained_weights(**kwargs):
return dict()
@staticmethod
def _generate_snippet_template(**kwargs):
template, exchange_msg, outputs_list, outputs_mapping = AtenToMindSporeMapper._generate_snippet_template(
**kwargs)
raw_params = kwargs.get("raw_params")
if not raw_params:
return template, exchange_msg, outputs_list, outputs_mapping
op = kwargs["operation"]
weights = kwargs["weights"]
output_shape = raw_params.get("output_shape")
args = {"shape": output_shape}
variable_slot = "var_0"
target_shape = f"self.{{{variable_slot}}}_shape = tuple({{shape}})"
init_template = f"self.{{{variable_slot}}} = {op}()"
construct_template = f"opt_{{{variable_slot}}} = self.{{{variable_slot}}}" \
f"({{{ExchangeMessageKeywords.VariableScope.value.INPUTS.value}}}, " \
f"self.{{{variable_slot}}}_shape)"
template = {
variable_slot: {
TemplateKeywords.INIT.value: [init_template, target_shape],
TemplateKeywords.CONSTRUCT.value: [construct_template]
}
}
exchange_msg = AtenToMindSporeMapper._generate_exchange_msg(variable_slot=variable_slot, op=op, args=args,
weights=weights)
return template, exchange_msg, outputs_list, outputs_mapping
|
nilq/baby-python
|
python
|
class A:
def foo<caret><error descr="Method must have a first parameter, usually called 'self'">()</error>: # Add 'self'
pass
|
nilq/baby-python
|
python
|
import copy
import math
import random
import typing as t
import hypemaths as hm
from ..exceptions import (
InvalidMatrixError,
MatrixDimensionError,
MatrixNotSquare,
)
from ..mixins import CopyMixin
class Matrix(CopyMixin):
def __init__(
self,
matrix: t.Union[int, float, list] = None
) -> None:
"""
Parameters
----------
matrix : t.Union[int, float, list]
This is the nested 2D lists which will be converted into an efficient `Matrix` object capable of several
calculations and features. Defaults to `None`.
"""
if not matrix:
raise ValueError("You need to pass the 2D for the matrix object!")
else:
self.matrix = self._cleaned_matrix(matrix)
@property
def rows(self) -> int:
"""
Returns
-------
int
The number of rows in the 2D matrix created.
"""
return len(self.matrix)
@property
def cols(self) -> int:
"""
Returns
-------
int
The number of the columns in the 2D matrix created.
"""
return len(self.matrix[0])
@property
def dims(self) -> tuple:
"""
Returns
-------
tuple
The tuple containing the shape or the rows and columns in the matrix created.
"""
return tuple(self._get_mat_dimension(self.matrix))
@property
def size(self) -> int:
"""
Returns
-------
int
The integer which is the total number of items in the matrix
"""
matrix = Matrix(self.matrix)
return matrix.cols * matrix.rows
def __hash__(self):
return hash(self.matrix)
def __repr__(self) -> str:
return "{}([{}])".format(
self.__class__.__name__,
",\n ".join([str(x) for x in self.matrix])
)
def __eq__(self, other: "Matrix") -> bool:
if not isinstance(other, Matrix):
raise TypeError(
f"Equality comparison with Matrix can only be performed with another Matrix, got {type(other)}"
)
return self.matrix == other.matrix
def __getitem__(self, index: t.Union[int, tuple]) -> t.Union[int, float, list]:
if isinstance(index, int):
return self.matrix[index]
else:
return self.matrix[index[0]][index[1]]
def __setitem__(self, index: t.Union[int, tuple], value: t.Union[int, float]) -> None:
if isinstance(value, (int, float)):
if isinstance(index, int):
self.matrix[index] = value
else:
self.matrix[index[0]][index[1]] = value
else:
raise TypeError(
f"All values must be integers or floats, but value[{value}] is {type(value)}."
)
def __add__(self, other: "Matrix") -> "Matrix":
cls = self.__class__
if not isinstance(other, cls):
raise TypeError(f"Matrix can only be added with other matrix. Not {type(other)}")
if not (self.rows, self.cols) == (other.rows, other.cols):
raise MatrixDimensionError("These matrices cannot be added due to wrong dimensions.")
matrix = [[self[row][cols] + other[row][cols] for cols in range(self.cols)] for row in range(self.rows)]
return cls(matrix)
def __sub__(self, other: "Matrix") -> "Matrix":
cls = self.__class__
if not isinstance(other, cls):
raise TypeError(f"Matrix can only be subtracted with other matrix. Not {type(other)}")
if not (self.rows, self.cols) == (other.rows, other.cols):
raise MatrixDimensionError("These matrices cannot be subtracted due to wrong dimensions.")
matrix = [[self[row][cols] - other[row][cols] for cols in range(self.cols)] for row in range(self.rows)]
return cls(matrix)
def __mul__(self, other: t.Union["Matrix"]) -> "Matrix":
cls = self.__class__
if isinstance(other, (int, float)):
matrix = [[element * other for element in row] for row in self]
return cls(matrix)
if not isinstance(other, cls):
raise TypeError(f"Matrix can only be multiplied with other matrix. Not {type(other)}")
if self.cols != other.rows:
raise MatrixDimensionError("These matrices cannot be multiplied due to wrong dimensions.")
matrix = [[
sum(a * b for a, b in zip(self_row, other_col)) for other_col in zip(*other)] for self_row in self
]
return cls(matrix)
def __truediv__(self, other: "Matrix") -> "Matrix":
cls = self.__class__
if isinstance(other, (int, float)):
matrix = [[element / other for element in row] for row in self]
return cls(matrix)
if not isinstance(other, cls):
raise TypeError(f"Matrix can only be divided with other matrix. Not {type(other)}")
if self.cols != other.rows:
raise MatrixDimensionError("These matrices cannot be divided due to wrong dimensions.")
matrix = [[
sum(a / b for a, b in zip(self_row, other_col)) for other_col in zip(*other)] for self_row in self
]
return cls(matrix)
def __radd__(self, other: "Matrix") -> "Matrix":
return self.__add__(other)
def __rmul__(self, other: "Matrix") -> "Matrix":
return self.__mul__(other)
def __matmul__(self, other: "Matrix") -> "Matrix":
return self.__mul__(other)
def __abs__(self) -> "Matrix":
cls = self.__class__
matrix = [
[abs(self[row][cols]) for cols in range(self.cols)]
for row in range(self.rows)
]
return cls(matrix)
def __round__(self, n: t.Optional[int] = None) -> "Matrix":
cls = self.__class__
matrix = [
[round(self[row][cols], ndigits=n) for cols in range(self.cols)] for row in range(self.rows)
]
return cls(matrix)
def __int__(self) -> "Matrix":
cls = self.__class__
matrix = [
[int(self[row][cols]) for cols in range(self.cols)]
for row in range(self.rows)
]
return cls(matrix)
def __float__(self) -> "Matrix":
cls = self.__class__
matrix = [
[float(self[row][cols]) for cols in range(self.cols)]
for row in range(self.rows)
]
return cls(matrix)
@classmethod
def get_filled_matrix(cls, dims: tuple, fill: t.Union[int, float]) -> "Matrix":
"""
Create a Matrix object with dimension specified containing fill value specified.
Parameters
----------
dims : tuple
This is the dimensions of the fill matrix, created when the `matrix` parameter is not specified and only
this value and the fill value is provided. Defaults to `None`.
fill : t.Union[int, float]
This is the fill value, which works with the `dims` parameter to create a filled matrix with the given
value. Defaults to `None`.
Returns
-------
Matrix
Returns filled matrix object with the dimensions and fill value passed.
Examples
--------
Create a matrix of dimensions : (2, 2) with the fill value of 5.
>>> matrix = Matrix.get_filled_matrix((2, 2), 5)
>>> matrix
Matrix([[5, 5], [5, 5]])
Create a matrix of dimensions : (4, 3) with the fill value 9
>>> matrix = Matrix.get_filled_matrix((4, 3), 9)
>>> matrix
Matrix([[9, 9, 9], [9, 9, 9], [9, 9, 9], [9, 9, 9]])
"""
return cls(cls._create_filled_matrix(dims, fill))
@classmethod
def get_randomized_matrix(
cls, dims: tuple, min_value: int, max_value: int, seed: int = None, round_digits: t.Optional[int] = 2
) -> "Matrix":
"""
Generate a random matrix object with the specified parameters.
Parameters
----------
dims: tuple
The dimensions for the matrix to be generated.
min_value: int
The minimum value for random number generation
max_value: int
The maximum value for random number generation
seed: int
The seed for random numer generation which can be recreated later.
round_digits: int
The number of digits to be in the number after decimal. Set the value as number for integer values.
Returns
-------
Matrix
The random matrix generated from the function.
Examples
--------
Generate a matrix with random integer values
>>> matrix = Matrix.get_randomized_matrix((2, 2), 1, 10, round_digits=None)
>>> matrix
Matrix([[4, 9], [9, 2]])
Generate a reproducible matrix with seed of 7
>>> matrix = Matrix.get_randomized_matrix((2, 2), 1, 10, seed=7)
>>> matrix
Matrix([[3.91, 2.36], [6.86, 1.65]])
Generate a float matrix with 5 digits after decimal
>>> matrix = Matrix.get_randomized_matrix((2, 2), 1, 10, round_digits=5)
>>> matrix
Matrix([[5.82294, 4.2912], [1.52199, 5.56692]])
"""
def is_float_or_int(value: t.Any) -> bool:
if not isinstance(value, (int, float)):
raise TypeError(
f"The values or value must be integer or float, but the given fill value is {type(value)}."
)
return True
if len(dims) != 2:
raise ValueError("You must pass the 2 DIMENSIONS for the Matrix fill.")
if is_float_or_int(min_value) and is_float_or_int(max_value):
if seed is not None:
random.seed(seed)
if not round_digits:
matrix = [
[round(random.uniform(min_value, max_value)) for _ in range(dims[1])] for _ in range(dims[0])
]
return cls(matrix)
else:
matrix = [
[
round(random.uniform(min_value, max_value), ndigits=round_digits) for _ in range(dims[1])
] for _ in range(dims[0])
]
return cls(matrix)
@staticmethod
def _cleaned_matrix(matrix: list) -> list:
"""
Checks if a matrix passed is valid or not and returns the processed and cleaned matrix.
Parameters
----------
matrix : list
The matrix passed to this function for processing, validation and cleaning.
Returns
-------
list
The list consisting the validated and cleaned matrix after passing the checks.
Raises
------
TypeError
If the matrix contains any datatype other than `int` or `float`.
InvalidMatrixError
If the matrix has invalid size or cannot be validated.
"""
def contains_sublist(mat: list) -> bool:
"""
Parameters
----------
mat: list
The matrix passed for checking if it contains sublist.
Returns
-------
bool
If the matrix passed contains sublist.
"""
return all(isinstance(element, list) for element in mat)
def value_check(mat: list) -> bool:
"""
Parameters
----------
mat: list
The matrix passed for validating the datatypes in it.
Returns
-------
bool
If the matrix contains any datatypes other than `int` or `float`.
Raises
------
TypeError
Raised if the matrix consists of value which is not a `int` or `float`.
"""
for row, row_values in enumerate(mat):
for col, value in enumerate(row_values):
if not isinstance(value, (int, float)):
raise TypeError(
f"All values must be integers or floats, but value[{row}][{col}] is {type(value)}"
)
return True
if isinstance(matrix, (int, float)):
return [[matrix]]
matrix = [matrix] if not contains_sublist(matrix) else matrix
if value_check(matrix):
len_set = set([len(x) for x in matrix])
if len(len_set) > 1 and value_check(matrix):
raise InvalidMatrixError(
"Matrix sizes are invalid! Must have same number of element in each sub list."
)
return matrix
@staticmethod
def _create_filled_matrix(dims: tuple, fill: t.Union[int, float] = None) -> list:
"""
Parameters
----------
dims: tuple
The dimensions for the matrix to be initialized. Only 2 dimensions (X, Y) are allowed.
fill: t.Union[int, float]
The value to be filled across the matrix of the specified dimension.
Returns
-------
list
The 2D python list, to be converted into `Matrix` object.
Raises
------
ValueError
If the number of dimensions don't equal to 2.
TypeError
If the fill value isn't either `int` or `float`.
"""
if len(dims) != 2:
raise ValueError(
"You must pass the 2 DIMENSIONS for the Matrix fill.")
if not fill:
fill = 0
if not isinstance(fill, (int, float)):
raise TypeError(
f"The fill value must be integer or float, but the given fill value is {type(fill)}."
)
matrix_structure = []
first_row = [fill] * dims[1]
for _ in range(dims[0]):
matrix_structure.append(first_row.copy())
return matrix_structure
def _get_mat_dimension(self, matrix: list) -> list:
"""
Parameters
----------
matrix : list
The matrix whose dimensions are to be figured out.
Returns
-------
list
A tuple containing the dimensions of the matrix passed.
"""
if not isinstance(matrix, list):
return []
return [len(matrix)] + self._get_mat_dimension(matrix[0])
def clone(self) -> "Matrix":
"""
Returns the copy of the matrix.
Returns
-------
Matrix
The copy of the present matrix.
Examples
--------
Getting the copy instead of directly assigning, when you want to modify the matrix without disturbing the first
one.
>>> matrix = Matrix([[1, 2], [3, 4]])
>>> matrix.clone()
Matrix([[1, 2], [3, 4]])
"""
return copy.deepcopy(self)
def trace(self) -> t.Union[int, float]:
"""
Returns the sum of the diagonals of the matrix
Returns
-------
t.Union[int, float]
The sum of the diagonals of the current `Matrix`
Raises
------
MatrixNotSquare
If the number of columns and rows are not equal in the `Matrix`.
Examples
--------
Getting the sum of the rows of the specified matrix.
>>> matrix = Matrix([[5, 5], [3, 4]])
>>> matrix.trace()
9
"""
if self.rows != self.cols:
raise MatrixNotSquare("Cannot retrieve the sum of diagonals as the row and column count are not same.")
total = 0
for i in range(self.rows):
total += self[i, i]
return total
def transpose(self) -> "Matrix":
"""
Transposes the matrix.
This converts the matrix elements order, by converting the rows into columns and vice versa.
Returns
-------
Matrix
The transposed matrix.
Examples
--------
>>> mat = Matrix([[1, 2], [3, 4]])
>>> mat.transpose()
Matrix([[1, 3], [2, 4]])
"""
cls = self.__class__
matrix = [[self[cols][row] for cols in range(self.rows)] for row in range(self.cols)]
return cls(matrix)
def frobenius_norm(self) -> float:
"""
Calculate the frobenius norm of the matrix.
The frobenius norm is computed by taking square root of the sums the squares of each entry of the matrix.
This can be used to calculate the 2-norm of a column vector.
Returns
-------
float:
The computed frobenius norm.
"""
sum_of_squares = 0
for column in self.matrix:
for elem in column:
sum_of_squares += elem ** 2
return math.sqrt(sum_of_squares)
def determinant(self) -> float:
"""
Get the determinant of a matrix.
In linear algebra, the determinant is a scalar value that can be computed from the elements of a square
matrix and encodes certain properties of the linear transformation described by the matrix. The determinant of
a matrix ``A`` is denoted det, det ``A``, or ``|A|``.
Returns
-------
float:
The determinant of the matrix.
"""
matrix_size = len(self.matrix)
matrix_copy = self.clone()
for fd in range(matrix_size): # FD - The focus diagonal.
for i in range(fd + 1, matrix_size):
if matrix_copy[fd][fd] == 0:
matrix_copy[fd][fd] = 1.0e-18
current_row_scaler = matrix_copy[i][fd] / matrix_copy[fd][fd]
for j in range(matrix_size):
matrix_copy[i][j] = matrix_copy[i][j] - current_row_scaler * matrix_copy[fd][j]
product = 1.0
for i in range(matrix_size):
product *= matrix_copy[i][i]
return product
@classmethod
def from_vector(cls, vector: "hm.Vector") -> "Matrix":
"""
Convert a `Vector` into a `Matrix` object.
Parameters
----------
vector: Vector
The vector which is going to be converted into Matrix.
Returns
-------
Matrix
The matrix formed after conversion of vector.
Examples
--------
>>> from hypemaths import Vector
>>> vec = Vector(1, 2, 3, 4)
>>> vec
Vector([1, 2, 3, 4])
>>> Matrix.from_vector(vec)
Matrix([[1], [2], [3], [4]])
"""
matrix_list = [[value] for value in vector]
return cls(matrix_list)
def flatten(self) -> "hm.Vector":
"""
Return a flattened version of the matrix.
All elements of the matrix are placed into a single row.
Returns
-------
hm.Vector
A vector containing the elements of the matrix passed.
Examples
--------
>>> m = hm.Matrix([[1,2], [3,4]])
>>> m.flatten()
Vector([[1, 2, 3, 4]])
"""
flat_list = []
for element in self.matrix:
if type(element) is list:
# If the element is of type list, iterate through the sublist
for item in element:
flat_list.append(item)
else:
flat_list.append(element)
return hm.Vector(flat_list)
def sum(self, axis: int = None) -> t.Union[int, float, "hm.Vector", "hm.Matrix"]:
"""
Returns the sum of the entire matrix or along a specific axis
Parameters
----------
axis: {0, 1}, Optional
The sum of the matrix along which axis
Returns
-------
int, float
The sum of the elements of the matrix
Examples
--------
>>> m = Matrix([[1, 2], [4, 2], [7, 2]])
>>> m.sum()
18
>>> m.sum(0)
Matrix([[12, 6]])
>>> m.sum(1)
Matrix([[3], [6], [9]])
"""
matrix = Matrix(self.matrix)
if axis is None:
return sum(matrix.flatten())
if axis not in [-1, 0, 1, None]:
raise TypeError(f"Axis {axis} is out of bounds for array of 2nd dimension.")
if not isinstance(axis, (list, int)):
raise TypeError(f"Axis should be inteer or list indices. Got {type(axis)}")
if axis == 0:
return Matrix([float(sum(i)) for i in zip(*matrix)])
elif axis == 1:
return Matrix([[sum(i)] for i in matrix])
|
nilq/baby-python
|
python
|
#
# Qutebrowser Config
#
from cconfig import CConfig
# Custom state full config options
cc = CConfig(config)
cc.redirect = True
# ==================== General Settings ==================================
c.hints.chars = 'dfghjklcvbnm'
c.hints.uppercase = True
c.confirm_quit = ['never']
c.content.fullscreen.window = True
c.spellcheck.languages = ["de-DE", "en-GB", "en-US"]
c.tabs.show = 'never'
c.tabs.tabs_are_windows = True
c.new_instance_open_target = 'window'
c.url.default_page = 'about:blank'
c.url.start_pages = ['about:blank']
c.zoom.default = 150
c.content.autoplay = False
c.content.mute = True
c.fonts.web.size.minimum = 14
c.editor.command = ['st', '-e', 'nvim', '-f', '{file}', '-c',
'normal{line}G{column0}l']
c.content.default_encoding = 'utf-8'
# ==================== Privacy & Security ================================
c.content.javascript.enabled = False
c.content.cookies.accept = 'never'
c.content.plugins = False
c.content.geolocation = False
c.content.pdfjs = False
c.content.webgl = False
c.content.javascript.can_access_clipboard = False
c.content.headers.referer = 'same-domain'
c.content.dns_prefetch = False
c.content.canvas_reading = True # some websites break when disabled
c.content.headers.do_not_track = False # can be used to fingerprint
c.content.webrtc_ip_handling_policy = 'disable-non-proxied-udp'
c.content.hyperlink_auditing = False
# ==================== Adblock ===========================================
c.content.blocking.enabled = True
c.content.blocking.method = 'both'
c.content.blocking.hosts.block_subdomains = True
c.content.blocking.adblock.lists = [
"https://easylist.to/easylist/easylist.txt",
"https://easylist.to/easylist/easyprivacy.txt",
"https://easylist.to/easylist/fanboy-social.txt",
"https://secure.fanboy.co.nz/fanboy-cookiemonster.txt",
"https://secure.fanboy.co.nz/fanboy-annoyance.txt",
"https://easylist-downloads.adblockplus.org/antiadblockfilters.txt",
"https://curben.gitlab.io/malware-filter/urlhaus-filter-online.txt",
"https://pgl.yoyo.org/adservers/serverlist.php?hostformat=hosts&showintro=0",
"https://github.com/uBlockOrigin/uAssets/raw/master/filters/legacy.txt",
"https://github.com/uBlockOrigin/uAssets/raw/master/filters/filters.txt",
"https://github.com/uBlockOrigin/uAssets/raw/master/filters/filters-2020.txt",
"https://github.com/uBlockOrigin/uAssets/raw/master/filters/filters-2021.txt",
"https://github.com/uBlockOrigin/uAssets/raw/master/filters/badware.txt",
"https://github.com/uBlockOrigin/uAssets/raw/master/filters/privacy.txt",
"https://github.com/uBlockOrigin/uAssets/raw/master/filters/resource-abuse.txt",
]
c.content.blocking.hosts.lists = [
"https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts",
]
# ==================== Dark Mode =========================================
c.colors.webpage.preferred_color_scheme = 'dark'
c.colors.webpage.darkmode.enabled = True
c.colors.webpage.darkmode.algorithm = 'lightness-cielab'
c.colors.webpage.darkmode.policy.images = 'always'
c.colors.webpage.darkmode.grayscale.images = 0.5
c.colors.webpage.darkmode.threshold.background = 150
c.colors.webpage.darkmode.threshold.text = 120
c.colors.webpage.bg = '#000000'
c.content.user_stylesheets = ['~/.config/qutebrowser/css/custom-dark.css']
# ==================== Downloads =========================================
c.downloads.location.directory = '~/Downloads'
c.downloads.location.prompt = True
c.downloads.location.remember = True
c.downloads.location.suggestion = 'both'
c.downloads.open_dispatcher = "xdg-open '{}'"
c.downloads.position = 'bottom'
c.downloads.prevent_mixed_content = True
c.downloads.remove_finished = -1
# ==================== Aliases ===========================================
c.aliases = {
'w': 'session-save',
'q': 'quit',
'wq': 'quit --save',
'cs': 'config-source',
'au': 'adblock-update',
'qr': 'spawn --userscript qr',
}
# ==================== Bindings ==========================================
# Javascript
config.bind('ess', 'set -p -t content.javascript.enabled true ;; reload')
config.bind('eSs', 'set -p content.javascript.enabled true ;; reload')
config.bind('esh', 'set -p -t -u *://{url:host}/* content.javascript.enabled true ;; reload')
config.bind('eSh', 'set -p -u *://{url:host}/* content.javascript.enabled true ;; reload')
config.bind('esH', 'set -p -t -u *://*.{url:host}/* content.javascript.enabled true ;; reload')
config.bind('eSH', 'set -p -u *://*.{url:host}/* content.javascript.enabled true ;; reload')
config.bind('esu', 'set -p -t -u {url} content.javascript.enabled true ;; reload')
config.bind('eSu', 'set -p -u {url} content.javascript.enabled true ;; reload')
config.unbind('d')
config.bind('dss', 'set -p -t content.javascript.enabled false ;; reload')
config.bind('dSs', 'set -p content.javascript.enabled false ;; reload')
config.bind('dsh', 'set -p -t -u *://{url:host}/* content.javascript.enabled false ;; reload')
config.bind('dSh', 'set -p -u *://{url:host}/* content.javascript.enabled false ;; reload')
config.bind('dsH', 'set -p -t -u *://*.{url:host}/* content.javascript.enabled false ;; reload')
config.bind('dSH', 'set -p -u *://*.{url:host}/* content.javascript.enabled false ;; reload')
config.bind('dsu', 'set -p -t -u {url} content.javascript.enabled false ;; reload')
config.bind('dSu', 'set -p -u {url} content.javascript.enabled false ;; reload')
config.bind('tss', 'config-cycle -p -t content.javascript.enabled ;; reload')
config.bind('tSs', 'config-cycle -p content.javascript.enabled ;; reload')
# Cookies
config.bind('ecc', 'set -p -t content.cookies.accept no-3rdparty ;; reload')
config.bind('eCc', 'set -p content.cookies.accept no-3rdparty ;; reload')
config.bind('ech', 'set -p -t -u *://{url:host}/* content.cookies.accept no-3rdparty ;; reload')
config.bind('eCh', 'set -p -u *://{url:host}/* content.cookies.accept no-3rdparty ;; reload')
config.bind('ecH', 'set -p -t -u *://*.{url:host}/* content.cookies.accept no-3rdparty ;; reload')
config.bind('eCH', 'set -p -u *://*.{url:host}/* content.cookies.accept no-3rdparty ;; reload')
config.bind('ecu', 'set -p -t -u {url} content.cookies.accept no-3rdparty ;; reload')
config.bind('eCu', 'set -p -u {url} content.cookies.accept no-3rdparty ;; reload')
config.bind('ecac', 'set -p -t content.cookies.accept all ;; reload')
config.bind('eCac', 'set -p content.cookies.accept all ;; reload')
config.bind('ecah', 'set -p -t -u *://{url:host}/* content.cookies.accept all ;; reload')
config.bind('eCah', 'set -p -u *://{url:host}/* content.cookies.accept all ;; reload')
config.bind('ecaH', 'set -p -t -u *://*.{url:host}/* content.cookies.accept all ;; reload')
config.bind('eCaH', 'set -p -u *://*.{url:host}/* content.cookies.accept all ;; reload')
config.bind('ecau', 'set -p -t -u {url} content.cookies.accept all ;; reload')
config.bind('eCau', 'set -p -u {url} content.cookies.accept all ;; reload')
config.bind('dcc', 'set -p -t content.cookies.accept never ;; reload')
config.bind('dCc', 'set -p content.cookies.accept never ;; reload')
config.bind('dch', 'set -p -t -u *://{url:host}/* content.cookies.accept never ;; reload')
config.bind('dCh', 'set -p -u *://{url:host}/* content.cookies.accept never ;; reload')
config.bind('dcH', 'set -p -t -u *://*.{url:host}/* content.cookies.accept never ;; reload')
config.bind('dCH', 'set -p -u *://*.{url:host}/* content.cookies.accept never ;; reload')
config.bind('dcu', 'set -p -t -u {url} content.cookies.accept never ;; reload')
config.bind('dCu', 'set -p -u {url} content.cookies.accept never ;; reload')
config.bind('tcc', 'config-cycle -p -t content.cookies.accept no-3rdparty never ;; reload')
config.bind('tCc', 'config-cycle -p content.cookies.accept no-3rdparty never ;; reload')
config.bind('tch', 'config-cycle -p -t -u *://{url:host}/* content.cookies.accept no-3rdparty never ;; reload')
config.bind('tCh', 'config-cycle -p -u *://{url:host}/* content.cookies.accept no-3rdparty never ;; reload')
config.bind('tcH', 'config-cycle -p -t -u *://*.{url:host}/* content.cookies.accept no-3rdparty never ;; reload')
config.bind('tCH', 'config-cycle -p -u *://*.{url:host}/* content.cookies.accept no-3rdparty never ;; reload')
config.bind('tcu', 'config-cycle -p -t -u {url} content.cookies.accept no-3rdparty never ;; reload')
config.bind('tCu', 'config-cycle -p -u {url} content.cookies.accept no-3rdparty never ;; reload')
config.bind('tcac', 'config-cycle -p -t content.cookies.accept all never ;; reload')
config.bind('tCac', 'config-cycle -p content.cookies.accept all never ;; reload')
config.bind('tcah', 'config-cycle -p -t -u *://{url:host}/* content.cookies.accept all never ;; reload')
config.bind('tCah', 'config-cycle -p -u *://{url:host}/* content.cookies.accept all never ;; reload')
config.bind('tcaH', 'config-cycle -p -t -u *://*.{url:host}/* content.cookies.accept all never ;; reload')
config.bind('tCaH', 'config-cycle -p -u *://*.{url:host}/* content.cookies.accept all never ;; reload')
config.bind('tcau', 'config-cycle -p -t -u {url} content.cookies.accept all never ;; reload')
config.bind('tCau', 'config-cycle -p -u {url} content.cookies.accept all never ;; reload')
# AdBlocker
config.bind('ebb', 'set -p -t content.blocking.enabled true ;; reload')
config.bind('eBb', 'set -p content.blocking.enabled true ;; reload')
config.bind('ebh', 'set -p -t -u *://{url:host}/* content.blocking.enabled true ;; reload')
config.bind('eBh', 'set -p -u *://{url:host}/* content.blocking.enabled true ;; reload')
config.bind('ebH', 'set -p -t -u *://*.{url:host}/* content.blocking.enabled true ;; reload')
config.bind('eBH', 'set -p -u *://*.{url:host}/* content.blocking.enabled true ;; reload')
config.bind('ebu', 'set -p -t -u {url} content.blocking.enabled true ;; reload')
config.bind('eBu', 'set -p -u {url} content.blocking.enabled true ;; reload')
config.bind('dbb', 'set -p -t content.blocking.enabled false ;; reload')
config.bind('dBb', 'set -p content.blocking.enabled false ;; reload')
config.bind('dbh', 'set -p -t -u *://{url:host}/* content.blocking.enabled false ;; reload')
config.bind('dBh', 'set -p -u *://{url:host}/* content.blocking.enabled false ;; reload')
config.bind('dbH', 'set -p -t -u *://*.{url:host}/* content.blocking.enabled false ;; reload')
config.bind('dBH', 'set -p -u *://*.{url:host}/* content.blocking.enabled false ;; reload')
config.bind('dbu', 'set -p -t -u {url} content.blocking.enabled false ;; reload')
config.bind('dBu', 'set -p -u {url} content.blocking.enabled false ;; reload')
config.bind('tbb', 'config-cycle -p -t content.blocking.enabled ;; reload')
config.bind('tBb', 'config-cycle -p content.blocking.enabled ;; reload')
config.bind('tbh', 'config-cycle -p -t -u *://{url:host}/* content.blocking.enabled ;; reload')
config.bind('tBh', 'config-cycle -p -u *://{url:host}/* content.blocking.enabled ;; reload')
config.bind('tbH', 'config-cycle -p -t -u *://*.{url:host}/* content.blocking.enabled ;; reload')
config.bind('tBH', 'config-cycle -p -u *://*.{url:host}/* content.blocking.enabled ;; reload')
config.bind('tbu', 'config-cycle -p -t -u {url} content.blocking.enabled ;; reload')
config.bind('tBu', 'config-cycle -p -u {url} content.blocking.enabled ;; reload')
# Images
config.bind('eii', 'set -p -t content.images true ;; reload')
config.bind('eIi', 'set -p content.images true ;; reload')
config.bind('eih', 'set -p -t -u *://{url:host}/* content.images true ;; reload')
config.bind('eIh', 'set -p -u *://{url:host}/* content.images true ;; reload')
config.bind('eiH', 'set -p -t -u *://*.{url:host}/* content.images true ;; reload')
config.bind('eIH', 'set -p -u *://*.{url:host}/* content.images true ;; reload')
config.bind('eiu', 'set -p -t -u {url} content.images true ;; reload')
config.bind('eIu', 'set -p -u {url} content.images true ;; reload')
config.bind('dii', 'set -p -t content.images false ;; reload')
config.bind('dIi', 'set -p content.images false ;; reload')
config.bind('dih', 'set -p -t -u *://{url:host}/* content.images false ;; reload')
config.bind('dIh', 'set -p -u *://{url:host}/* content.images false ;; reload')
config.bind('diH', 'set -p -t -u *://*.{url:host}/* content.images false ;; reload')
config.bind('dIH', 'set -p -u *://*.{url:host}/* content.images false ;; reload')
config.bind('diu', 'set -p -t -u {url} content.images false ;; reload')
config.bind('dIu', 'set -p -u {url} content.images false ;; reload')
# Plugins
config.bind('epp', 'set -p -t content.plugins true ;; reload')
config.bind('ePp', 'set -p content.plugins true ;; reload')
config.bind('eph', 'set -p -t -u *://{url:host}/* content.plugins true ;; reload')
config.bind('ePh', 'set -p -u *://{url:host}/* content.plugins true ;; reload')
config.bind('epH', 'set -p -t -u *://*.{url:host}/* content.plugins true ;; reload')
config.bind('ePH', 'set -p -u *://*.{url:host}/* content.plugins true ;; reload')
config.bind('epu', 'set -p -t -u {url} content.plugins true ;; reload')
config.bind('ePu', 'set -p -u {url} content.plugins true ;; reload')
config.bind('dpp', 'set -p -t content.plugins false ;; reload')
config.bind('dPp', 'set -p content.plugins false ;; reload')
config.bind('dph', 'set -p -t -u *://{url:host}/* content.plugins false ;; reload')
config.bind('dPh', 'set -p -u *://{url:host}/* content.plugins false ;; reload')
config.bind('dpH', 'set -p -t -u *://*.{url:host}/* content.plugins false ;; reload')
config.bind('dPH', 'set -p -u *://*.{url:host}/* content.plugins false ;; reload')
config.bind('dpu', 'set -p -t -u {url} content.plugins false ;; reload')
config.bind('dPu', 'set -p -u {url} content.plugins false ;; reload')
# Tor proxy
config.bind('et', 'set -p -t content.proxy socks://127.0.0.1:9050')
config.bind('dt', 'set -p -t content.proxy none')
config.bind('tt', 'config-cycle -p -t content.proxy none socks://127.0.0.1:9050')
# Mute
config.bind('emm', 'set -p -t content.mute true')
config.bind('eMm', 'set -p content.mute true')
config.bind('emh', 'set -p -t -u *://{url:host}/* content.mute true')
config.bind('eMh', 'set -p -u *://{url:host}/* content.mute true')
config.bind('emH', 'set -p -t -u *://*.{url:host}/* content.mute true')
config.bind('eMH', 'set -p -u *://*.{url:host}/* content.mute true')
config.bind('emu', 'set -p -t -u {url} content.mute true')
config.bind('eMu', 'set -p -u {url} content.mute true')
config.bind('dmm', 'set -p -t content.mute false')
config.bind('dMm', 'set -p content.mute false')
config.bind('dmh', 'set -p -t -u *://{url:host}/* content.mute false')
config.bind('dMh', 'set -p -u *://{url:host}/* content.mute false')
config.bind('dmH', 'set -p -t -u *://*.{url:host}/ content.mute false')
config.bind('dMH', 'set -p -u *://*.{url:host}/* content.mute false')
config.bind('dmu', 'set -p -t -u {url} content.mute false')
config.bind('dMu', 'set -p -u {url} content.mute false')
config.bind('tmm', 'config-cycle -p -t content.mute')
config.bind('tMm', 'config-cycle -p content.mute')
config.bind('tmh', 'config-cycle -p -t -u *://{url:host}/* content.mute')
config.bind('tMh', 'config-cycle -p -u *://{url:host}/* content.mute')
config.bind('tmH', 'config-cycle -p -t -u *://*.{url:host}/* content.mute')
config.bind('tMH', 'config-cycle -p -u *://*.{url:host}/* content.mute')
config.bind('tmu', 'config-cycle -p -t -u {url} content.mute')
config.bind('tMu', 'config-cycle -p -u {url} content.mute')
# Local Storage
config.bind('ell', 'set -p -t content.local_storage true ;; reload')
config.bind('eLl', 'set -p content.local_storage true ;; reload')
config.bind('elh', 'set -p -t -u *://{url:host}/* content.local_storage true ;; reload')
config.bind('eLh', 'set -p -u *://{url:host}/* content.local_storage true ;; reload')
config.bind('elH', 'set -p -t -u *://*.{url:host}/* content.local_storage true ;; reload')
config.bind('eLH', 'set -p -u *://*.{url:host}/* content.local_storage true ;; reload')
config.bind('elu', 'set -p -t -u {url} content.local_storage true ;; reload')
config.bind('eLu', 'set -p -u {url} content.local_storage true ;; reload')
config.bind('dll', 'set -p -t content.local_storage false ;; reload')
config.bind('dLl', 'set -p content.local_storage false ;; reload')
config.bind('dlh', 'set -p -t -u *://{url:host}/* content.local_storage false ;; reload')
config.bind('dLh', 'set -p -u *://{url:host}/* content.local_storage false ;; reload')
config.bind('dlH', 'set -p -t -u *://*.{url:host}/ content.local_storage false ;; reload')
config.bind('dLH', 'set -p -u *://*.{url:host}/* content.local_storage false ;; reload')
config.bind('dlu', 'set -p -t -u {url} content.local_storage false ;; reload')
config.bind('dLu', 'set -p -u {url} content.local_storage false ;; reload')
config.bind('tll', 'config-cycle -p -t content.local_storage ;; reload')
config.bind('tLl', 'config-cycle -p content.local_storage ;; reload')
config.bind('tlh', 'config-cycle -p -t -u *://{url:host}/* content.local_storage ;; reload')
config.bind('tLh', 'config-cycle -p -u *://{url:host}/* content.local_storage ;; reload')
config.bind('tlH', 'config-cycle -p -t -u *://*.{url:host}/* content.local_storage ;; reload')
config.bind('tLH', 'config-cycle -p -u *://*.{url:host}/* content.local_storage ;; reload')
config.bind('tlu', 'config-cycle -p -t -u {url} content.local_storage ;; reload')
config.bind('tLu', 'config-cycle -p -u {url} content.local_storage ;; reload')
# clipboard
config.bind('eyy', 'set -p -t content.javascript.can_access_clipboard true ;; reload')
config.bind('eYy', 'set -p content.javascript.can_access_clipboard true ;; reload')
config.bind('eyh', 'set -p -t -u *://{url:host}/* content.javascript.can_access_clipboard true ;; reload')
config.bind('eYh', 'set -p -u *://{url:host}/* content.javascript.can_access_clipboard true ;; reload')
config.bind('eyH', 'set -p -t -u *://*.{url:host}/* content.javascript.can_access_clipboard true ;; reload')
config.bind('eYH', 'set -p -u *://*.{url:host}/* content.javascript.can_access_clipboard true ;; reload')
config.bind('eyu', 'set -p -t -u {url} content.javascript.can_access_clipboard true ;; reload')
config.bind('eYu', 'set -p -u {url} content.javascript.can_access_clipboard true ;; reload')
config.bind('dyy', 'set -p -t content.javascript.can_access_clipboard false ;; reload')
config.bind('dYy', 'set -p content.javascript.can_access_clipboard false ;; reload')
config.bind('dyh', 'set -p -t -u *://{url:host}/* content.javascript.can_access_clipboard false ;; reload')
config.bind('dYh', 'set -p -u *://{url:host}/* content.javascript.can_access_clipboard false ;; reload')
config.bind('dyH', 'set -p -t -u *://*.{url:host}/ content.javascript.can_access_clipboard false ;; reload')
config.bind('dYH', 'set -p -u *://*.{url:host}/* content.javascript.can_access_clipboard false ;; reload')
config.bind('dyu', 'set -p -t -u {url} content.javascript.can_access_clipboard false ;; reload')
config.bind('dYu', 'set -p -u {url} content.javascript.can_access_clipboard false ;; reload')
config.bind('tyy', 'config-cycle -p -t content.javascript.can_access_clipboard ;; reload')
config.bind('tYy', 'config-cycle -p content.javascript.can_access_clipboard ;; reload')
config.bind('tyh', 'config-cycle -p -t -u *://{url:host}/* content.javascript.can_access_clipboard ;; reload')
config.bind('tYh', 'config-cycle -p -u *://{url:host}/* content.javascript.can_access_clipboard ;; reload')
config.bind('tyH', 'config-cycle -p -t -u *://*.{url:host}/* content.javascript.can_access_clipboard ;; reload')
config.bind('tYH', 'config-cycle -p -u *://*.{url:host}/* content.javascript.can_access_clipboard ;; reload')
config.bind('tyu', 'config-cycle -p -t -u {url} content.javascript.can_access_clipboard ;; reload')
config.bind('tYu', 'config-cycle -p -u {url} content.javascript.can_access_clipboard ;; reload')
# redirect
config.bind('er', 'spawn --userscript redirect True')
config.bind('dr', 'spawn --userscript redirect False')
# rebinds
config.bind('q', 'close')
config.bind('O', 'set-cmd-text -s :open -w')
config.bind('F', 'hint all window ')
config.bind('I', 'hint -f inputs normal ')
config.bind('m', 'tab-mute')
config.bind('gc', 'tab-clone')
# config.bind('<Escape>', 'mode-leave ;; jseval -q document.activeElement.blur()', mode='insert')
config.bind('<Ctrl+Escape>', 'fake-key <Escape>')
# leader binds
leader = '<Space>'
config.bind(leader + leader, 'fake-key ' + leader)
config.bind(leader + 'o', 'set-cmd-text -s :open -p')
config.bind(leader + 'vv', 'hint links spawn --detach mpv "{hint-url}"')
config.bind(leader + 'vr', 'hint -r links spawn --detach mpv "{hint-url}"')
config.bind(leader + 'vu', 'spawn --detach mpv "{url}"')
config.bind(leader + 'dd', 'hint links spawn ytdl "{hint-url}"')
config.bind(leader + 'dr', 'hint -r links spawn --detach ytdl "{hint-url}"')
config.bind(leader + 'du', 'spawn --detach ytdl "{url}"')
config.bind(leader + 'ii', 'hint images spawn --detach img -u "{hint-url}"')
config.bind(leader + 'ir', 'hint -r images spawn --detach img -u "{hint-url}"')
config.bind(leader + 'iu', 'spawn --detach img -u "{url}"')
config.bind(leader + 'cc', 'hint links spawn --detach chromium "{hint-url}"')
config.bind(leader + 'cr', 'hint -r links spawn --detach chromium "{hint-url}"')
config.bind(leader + 'cu', 'spawn --detach chromium "{url}"')
config.bind(leader + 'ff', 'hint links spawn --detach firefox "{hint-url}"')
config.bind(leader + 'fr', 'hint -r links spawn --detach firefox "{hint-url}"')
config.bind(leader + 'fu', 'spawn --detach firefox "{url}"')
config.bind(leader + 'tt', 'hint links spawn --detach tm -a "{hint-url}"')
config.bind(leader + 'tr', 'hint -r links spawn --detach tm -a "{hint-url}"')
config.bind(leader + 'tu', 'spawn --detach tm -a "{url}"')
config.bind(leader + 'qq', 'hint links userscript qr')
config.bind(leader + 'qu', 'spawn --userscript qr')
config.bind(leader + 'qr', 'hint -r links userscript qr')
# ==================== Search Engines ====================================
c.url.searchengines = {
'DEFAULT': 'https://search.simonhugh.xyz/searx/search?q={}',
# DuckDuckGO
'd': 'https://duckduckgo.com/?q={}',
# Google
'g' : 'http://www.google.com/search?q={}',
# Google Maps
'm' : 'https://www.google.com/maps/search/{}',
# Youtube
'y' : 'https://www.youtube.com/results?search_query={}',
# Amazon
'a' : 'https://www.amazon.co.uk/s?k={}',
}
# redirect urls
config.source('redirect.py')
# load autoconfig.yml
config.load_autoconfig(True)
|
nilq/baby-python
|
python
|
"""Misc funcs for backtester"""
import pandas as pd
from io import StringIO
from . import fb_amzn
def load_example():
"""Load example input data"""
df = pd.read_csv(StringIO(fb_amzn.data))
df['date'] = pd.to_datetime(df['date']).dt.tz_localize('US/Central')
return df
|
nilq/baby-python
|
python
|
import pprint
import sys
import numpy as np
def pbatch(source, dic):
ss = np.transpose(source)
for line in ss[:10]:
for word in line:
a = dic[word]
b = a
if a == "SOS":
b = "{"
elif a == "EOS":
b = "}"
elif a == "ZERO":
b = "_"
elif a == "UNK":
b = "|"
sys.stdout.write(b)
print " "
print ""
def pbatch_many(source, dic, n_x):
ss = np.transpose(source)
iis = [0, 20, n_x-8,n_x-1]
for ii in iis:
line = ss[ii]
for word in line:
a = dic[word]
b = a
if a == "SOS":
b = "{"
elif a == "EOS":
b = "}"
elif a == "ZERO":
b = "_"
elif a == "UNK":
b = "|"
sys.stdout.write(b)
print " "
print ""
|
nilq/baby-python
|
python
|
from cryptofield.fieldmatrix import *
import unittest
class TestFMatrix(unittest.TestCase):
def testMatrixGetRow1(self):
F = FField(4)
m = FMatrix(F, 3, 3)
m.ident()
r = m.getRow(1)
ans = [FElement(F, 0), FElement(F, 1), FElement(F, 0)]
self.assertEqual(r, ans)
def testMatrixGetColumn1(self):
F = FField(4)
m = FMatrix(F, 5, 5)
m.ident()
r = m.getColumn(0)
ans = [FElement(F, 1), FElement(F, 0), FElement(F, 0), FElement(F, 0), FElement(F, 0)]
self.assertEqual(r, ans)
def testMatrixInverse1(self):
F = FField(2)
m = FMatrix(F, 4, 4)
m.setRow(0, [FElement(F, 0), FElement(F, 0), FElement(F, 0), FElement(F, 1)])
m.setRow(1, [FElement(F, 0), FElement(F, 0), FElement(F, 1), FElement(F, 0)])
m.setRow(2, [FElement(F, 0), FElement(F, 1), FElement(F, 0), FElement(F, 0)])
m.setRow(3, [FElement(F, 1), FElement(F, 0), FElement(F, 0), FElement(F, 1)])
inv = m.inverse()
ans = FMatrix(F, 4, 4)
ans.setRow(0, [FElement(F, 1), FElement(F, 0), FElement(F, 0), FElement(F, 1)])
ans.setRow(1, [FElement(F, 0), FElement(F, 0), FElement(F, 1), FElement(F, 0)])
ans.setRow(2, [FElement(F, 0), FElement(F, 1), FElement(F, 0), FElement(F, 0)])
ans.setRow(3, [FElement(F, 1), FElement(F, 0), FElement(F, 0), FElement(F, 0)])
self.assertEqual(inv, ans)
|
nilq/baby-python
|
python
|
# Generated by Django 2.2.5 on 2019-10-28 17:29
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('service', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='rating',
name='service',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ratings', to='service.Service'),
),
]
|
nilq/baby-python
|
python
|
def binary_search(input_array, value):
first = 0
last = len(input_array) - 1
found = False
while first <= last and not found:
middle_point = (first + last)//2
if input_array[middle_point] == value:
found = True
else:
if value < input_array[middle_point]:
last = middle_point -1
else:
first = middle_point + 1
return input_array.index(value) if found else -1
|
nilq/baby-python
|
python
|
from rest_framework import permissions
class IsBuyerOrSellerUser(permissions.BasePermission):
def has_permission(self, request, view):
if request.user.is_authenticated and request.user.is_buyer_or_seller:
return True
return False
|
nilq/baby-python
|
python
|
from copy import deepcopy
from dbt.contracts.graph.manifest import WritableManifest
from dbt.contracts.results import CatalogArtifact
def edit_catalog(
catalog: CatalogArtifact, manifest: WritableManifest
) -> CatalogArtifact:
output = deepcopy(catalog)
node_names = tuple(node for node in output.nodes)
for node in node_names:
if node not in manifest.nodes:
output.nodes.pop(node)
source_names = tuple(source for source in output.sources)
for source in source_names:
if source not in manifest.sources:
output.sources.pop(source)
return output
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
encryption test_services module.
"""
import pytest
import pyrin.security.encryption.services as encryption_services
import pyrin.configuration.services as config_services
from pyrin.security.encryption.handlers.aes128 import AES128Encrypter
from pyrin.security.encryption.handlers.rsa256 import RSA256Encrypter
from pyrin.security.encryption.exceptions import DuplicatedEncryptionHandlerError, \
InvalidEncryptionHandlerTypeError, EncryptionHandlerNotFoundError, DecryptionError
def test_register_encryption_handler_duplicate():
"""
registers an already available encryption handler.
it should raise an error.
"""
with pytest.raises(DuplicatedEncryptionHandlerError):
encryption_services.register_encryption_handler(AES128Encrypter())
def test_register_encryption_handler_invalid_type():
"""
registers an encryption handler with an invalid type.
it should raise an error.
"""
with pytest.raises(InvalidEncryptionHandlerTypeError):
encryption_services.register_encryption_handler(25)
def test_register_encryption_handler_duplicate_with_replace():
"""
registers an already available encryption handler with replace option.
it should not raise an error.
"""
encryption_services.register_encryption_handler(RSA256Encrypter(), replace=True)
def test_encrypt_default():
"""
encrypts the given value using default handler and returns the encrypted result.
"""
message = 'confidential'
encrypted_value = encryption_services.encrypt(message)
assert encrypted_value is not None
assert config_services.get('security', 'encryption',
'default_encryption_handler') in encrypted_value
def test_encrypt_aes128():
"""
encrypts the given value using aes128 handler and returns the encrypted result.
"""
message = 'confidential'
encrypted_value = encryption_services.encrypt(message, handler_name='AES128')
assert encrypted_value is not None
assert 'AES128' in encrypted_value
def test_encrypt_rsa256():
"""
encrypts the given value using rsa256 handler and returns the encrypted result.
"""
message = 'confidential'
encrypted_value = encryption_services.encrypt(message, handler_name='RSA256')
assert encrypted_value is not None
assert 'RSA256' in encrypted_value
def test_encrypt_invalid_handler():
"""
encrypts the given value using an invalid handler.
it should raise an error.
"""
with pytest.raises(EncryptionHandlerNotFoundError):
encryption_services.encrypt('confidential', handler_name='missing_handler')
def test_decrypt_default():
"""
decrypts the given full encrypted value using default
handler and returns the decrypted result.
"""
message = 'confidential'
encrypted_value = encryption_services.encrypt(message)
original_value = encryption_services.decrypt(encrypted_value)
assert original_value == message
def test_decrypt_aes128():
"""
decrypts the given full encrypted value using aes128
handler and returns the decrypted result.
"""
message = 'confidential'
encrypted_value = encryption_services.encrypt(message, handler_name='AES128')
original_value = encryption_services.decrypt(encrypted_value)
assert original_value == message
def test_decrypt_rsa256():
"""
decrypts the given full encrypted value using rsa256
handler and returns the decrypted result.
"""
message = 'confidential'
encrypted_value = encryption_services.encrypt(message, handler_name='RSA256')
original_value = encryption_services.decrypt(encrypted_value)
assert original_value == message
def test_decrypt_invalid_value():
"""
decrypts the given invalid encrypted value using default handler.
it should raise an error.
"""
with pytest.raises(DecryptionError):
message = 'confidential'
encrypted_value = encryption_services.encrypt(message)
encrypted_value = encrypted_value.replace('o', 'b')
encryption_services.decrypt(encrypted_value)
def test_decrypt_invalid_handler():
"""
decrypts the given encrypted value using an invalid handler.
it should raise an error.
"""
with pytest.raises(EncryptionHandlerNotFoundError):
message = 'confidential'
encrypted_value = encryption_services.encrypt(message)
handler = config_services.get('security', 'encryption',
'default_encryption_handler')
encrypted_value = encrypted_value.replace(handler, 'missing handler')
encryption_services.decrypt(encrypted_value)
def test_decrypt_mismatch_handler():
"""
decrypts the given encrypted value using a handler that is not the original handler.
it should raise an error.
"""
with pytest.raises(DecryptionError):
message = 'confidential'
handler = 'AES128'
mismatch_handler = 'RSA256'
encrypted_value = encryption_services.encrypt(message, handler_name=handler)
encrypted_value = encrypted_value.replace(handler, mismatch_handler)
encryption_services.decrypt(encrypted_value)
def test_generate_key_aes128():
"""
generates a valid key for aes128 handler and returns it.
"""
key = encryption_services.generate_key('AES128')
assert key is not None and len(key) > 0
def test_generate_key_rsa256():
"""
generates a valid public/private key pair for rsa256 handler and returns it.
"""
public, private = encryption_services.generate_key('RSA256')
assert public is not None and private is not None
assert len(public) > 0 and len(private) > 0
def test_generate_key_invalid_handler():
"""
generates a key for an invalid handler.
it should raise an error.
"""
with pytest.raises(EncryptionHandlerNotFoundError):
encryption_services.generate_key('missing handler')
def test_encrypter_is_singleton():
"""
tests that different types of encrypters are singleton.
"""
encrypter1 = AES128Encrypter()
encrypter2 = AES128Encrypter()
assert encrypter1 == encrypter2
encrypter3 = RSA256Encrypter()
encrypter4 = RSA256Encrypter()
assert encrypter3 == encrypter4
|
nilq/baby-python
|
python
|
"""
HoNCore. Python library providing connectivity and functionality
with HoN's chat server.
Packet ID definitions.
Updated 23-7-11.
Client version 2.40.2
"""
""" Server -> Client """
HON_SC_AUTH_ACCEPTED = 0x1C00
HON_SC_PING = 0x2A00
HON_SC_CHANNEL_MSG = 0x03
HON_SC_JOINED_CHANNEL = 0x04
HON_SC_ENTERED_CHANNEL = 0x05
HON_SC_LEFT_CHANNEL = 0x06
HON_SC_WHISPER = 0x08
HON_SC_WHISPER_FAILED = 0x09
HON_SC_INITIAL_STATUS = 0x0B
HON_SC_UPDATE_STATUS = 0x0C
HON_SC_CLAN_MESSAGE = 0x13
HON_SC_LOOKING_FOR_CLAN = 0x18
HON_SC_PM = 0x1C
HON_SC_PM_FAILED = 0x1D
HON_SC_WHISPER_BUDDIES = 0x20
HON_SC_MAX_CHANNELS = 0x21
HON_SC_USER_INFO_NO_EXIST = 0x2B
HON_SC_USER_INFO_OFFLINE = 0x2C
HON_SC_USER_INFO_ONLINE = 0x2D
HON_SC_USER_INFO_IN_GAME = 0x2E
HON_SC_CHANNEL_UPDATE = 0x2F
HON_SC_CHANNEL_UPDATE_TOPIC = 0x30
HON_SC_CHANNEL_KICK = 0x31
HON_SC_CHANNEL_BAN = 0x32
HON_SC_CHANNEL_UNBAN = 0x33
HON_SC_CHANNEL_BANNED = 0x34
HON_SC_CHANNEL_SILENCED = 0x35
HON_SC_CHANNEL_SILENCE_LIFTED = 0x36
HON_SC_CHANNEL_SILENCE_PLACED = 0x37
HON_SC_MESSAGE_ALL = 0x39
HON_SC_CHANNEL_PROMOTE = 0x3A
HON_SC_CHANNEL_DEMOTE = 0x3B
HON_SC_CHANNEL_AUTH_ENABLE = 0x3E
HON_SC_CHANNEL_AUTH_DISABLE = 0x3F
HON_SC_CHANNEL_AUTH_ADD = 0x40
HON_SC_CHANNEL_AUTH_DELETE = 0x41
HON_SC_CHANNEL_AUTH_LIST = 0x42
HON_SC_CHANNEL_PASSWORD_CHANGED = 0x43
HON_SC_CHANNEL_AUTH_ADD_FAIL = 0x44
HON_SC_CHANNEL_AUTH_DEL_FAIL = 0x45
HON_SC_JOIN_CHANNEL_PASSWORD = 0x46
HON_SC_CLAN_MEMBER_ADDED = 0x4E
HON_SC_NAME_CHANGE = 0x5A
HON_SC_CHANNEL_EMOTE = 0x65
HON_SC_TOTAL_ONLINE = 0x68
HON_SC_REQUEST_NOTIFICATION = 0xB2
HON_SC_NOTIFICATION = 0xB4
"Reverse-engineered"
HON_SC_GAME_INVITE = 0x25
""" GameServer -> Client """
HON_GSC_PACKET_RECV = "HON_GSC_PACKET_RECV"
HON_GSC_PING = 0x4c
HON_GSC_AUTH_ACCEPTED = 0x5c
HON_GSC_CHANNEL_MSG = 0x6c
HON_GSC_TIMEOUT = 0x5101
HON_GSC_SERVER_STATE = 0x03
HON_GSC_SERVER_INFO = 0x01
""" Client -> Server """
HON_CS_PONG = 0x2A01
HON_CS_CHANNEL_MSG = 0x03
HON_CS_WHISPER = 0x08
HON_CS_AUTH_INFO = 0x0C00
HON_CS_BUDDY_ADD_NOTIFY = 0x0D
HON_CS_JOIN_GAME = 0x10
HON_CS_CLAN_MESSAGE = 0x13
HON_CS_PM = 0x1C
HON_CS_JOIN_CHANNEL = 0x1E
HON_CS_WHISPER_BUDDIES = 0x20
HON_CS_LEAVE_CHANNEL = 0x22
HON_CS_USER_INFO = 0x2A
HON_CS_UPDATE_TOPIC = 0x30
HON_CS_CHANNEL_KICK = 0x31
HON_CS_CHANNEL_BAN = 0x33
HON_CS_CHANNEL_UNBAN = 0x32
HON_CS_CHANNEL_SILENCE_USER = 0x38
HON_CS_CHANNEL_PROMOTE = 0x3A
HON_CS_CHANNEL_DEMOTE = 0x3B
HON_CS_CHANNEL_AUTH_ENABLE = 0x3E
HON_CS_CHANNEL_AUTH_DISABLE = 0x3F
HON_CS_CHANNEL_AUTH_ADD = 0x40
HON_CS_CHANNEL_AUTH_DELETE = 0x41
HON_CS_CHANNEL_AUTH_LIST = 0x42
HON_CS_CHANNEL_SET_PASSWORD = 0x43
HON_CS_JOIN_CHANNEL_PASSWORD = 0x46
HON_CS_CLAN_ADD_MEMBER = 0x47
HON_CS_CHANNEL_EMOTE = 0x65
HON_CS_BUDDY_ACCEPT = 0xB3
HON_CS_START_MM_GROUP = 0x0C0A
HON_CS_INVITE_TO_MM = 0x0C0D
"Reverse-engineered"
HON_CS_GAME_INVITE = 0x24
HON_CS_GAME_SERVER_IP = 0xf
HON_CS_GAME_SERVER_INFO = 0x1000
""" Client -> GameServer """
HON_CGS_PONG = 0
HON_CGS_AUTH_INFO = 0xc001
HON_CGS_AUTH_MAGIC_PACKET = 0xc901cbcf
# Dummy Events / Custom events?
HON_SC_PACKET_RECV = "HON_SC_PACKET_RECV"
HON_GSC_PACKET_RECV = "HON_GSC_PACKET_RECV"
""" User Flags"""
HON_FLAGS_NONE = 0x00
HON_FLAGS_OFFICER = 0x01
HON_FLAGS_LEADER = 0x02
HON_FLAGS_ADMINISTRATOR = 0x03
HON_FLAGS_STAFF = 0x04
HON_FLAGS_PREPURCHASED = 0x40
""" User States"""
HON_STATUS_OFFLINE = 0
HON_STATUS_ONLINE = 3
HON_STATUS_INLOBBY = 4
HON_STATUS_INGAME = 5
""" Login Modes"""
HON_MODE_NORMAL = 0x00
HON_MODE_INVISIBLE = 0x03
""" Game Server"""
GAME_SERVER_TYPE = 90
MAXIMUM_SERVER_PING = 90
""" Team Slots"""
TEAM_SLOTS = {
'BLUE' : (1, 0),
'TEAL' : (1, 1),
'PURPLE' : (1, 2),
'YELLOW' : (1, 3),
'ORANGE' : (1, 4),
'PINK' : (2, 0),
'GREY' : (2, 1),
'LIGHTBLUE' : (2, 2),
'GREEN' : (2, 3),
'BROWN' : (2, 4),
'SPECTATOR' : (3, 0),
'REFEREE' : (4, 0)
}
HON_SERVER_VERSION = "2.6.10"
#HON_HOST_ID = 1542367444
HON_CONNECTION_ID = 52175
HON_HOST_ID = 1253506080
#HON_CONNECTION_ID = 24938
|
nilq/baby-python
|
python
|
"""A quantum tic tac toe running in command line"""
from qiskit import Aer
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister
from qiskit import CompositeGate
from qiskit import execute
import numpy as np
from composite_gates import cry,cnx,any_x,bus_or,x_bus
class Move():
def __init__(self,indices,player,q1=None,q2=None):
"""A data structure for game moves"""
self.indices = indices
self.player=player
self.q1=q1
self.q2=q2
def __str__(self):
return str([self.indices,self.player,self.q1,self.q2])
class Board():
def __init__(self,x,y,print_info=False):
#quantum register, classical register, quantum circuit.
self.print_info=print_info
self.q = QuantumRegister(1)
self.c = ClassicalRegister(1)
self.qc = QuantumCircuit(self.q, self.c)
self.qc.cry = cry
self.qc.x_bus = x_bus
self.qc.cnx = cnx
self.qc.any_x = any_x
self.qc.bus_or = bus_or
#the dimensions of the bord
self.x=x
self.y=y
#To keep track of what is in each cell, no entanglement etc.
#Provides a graphic of the game state.
self.cells = np.empty((x,y),dtype=object)
self.cells[:]='' #Initially game is empty.
self.game_full = False
self.moves = []
def __str__(self):
return str(self.cells)
def add_move(self,indices,player):
"""Adds a move if it is non-clashing, otherwise passes it on"""
for index in indices:
if index[0] >= self.x:
return 'Index out of range'
if index[1] >= self.y:
return 'Index out of range'
status = self._add_move(indices,player)
if status=='ok':
if player==0:
char = 'X'
elif player==1:
char = 'O'
char+=str(len(self.moves))
for index in indices:
s = self.cells[index[0],index[1]]
if s: #If the cell has some text
#Add char with a comma
self.cells[index[0],index[1]]+=' '+char
else: #cell is empty so just add char
self.cells[index[0],index[1]]+=char
print(self.cells)
return status
def _add_move(self,indices,player):
"""Actually adds the move if not clashing,
otherwise passes it to _add_clashing_move"""
if len(indices)==2:
if indices[0]==indices[1]:
indices = [indices[0]]
num=len(indices)
caught_clashes = False #turns true if all moves are safe clashes
for existing_move in self.moves:
for index in indices:
if index in existing_move.indices:
if len(existing_move.indices)==1:
return 'overfull'
#This move will ALWAYS be there, if it can.
#hence, overfull.
else:
#captures any clash
caught_clashes = True
if caught_clashes:
return self._add_clashing_move(indices,player)
else:
#Reach this section if there are no clashes at all
if num==1:
self.moves.append(Move(indices,player)) #No control needed
return 'ok'
else:
self.q.size+=2 #indicator qubit, and move qubit
q1 = self.q[self.q.size-2] #To make this readable...
q2 = self.q[self.q.size-1]
self.qc.h(q1) #the last qubit in register.
self.qc.x(q2)
self.qc.cx(q1,q2)
self.moves.append(Move(indices,player,q1,q2))
return 'ok'
def _add_clashing_move(self,indices,player):
"""Adds a clashing move"""
if len(indices)==1: #100% of qubit is on one clashing spot.
#This spot COULD be occupied.
self.q.size+=1 #Only one bit needed, move happens or not.
index = indices[0]
bus = []
for existing_move in self.moves:
if index in existing_move.indices:
if index==existing_move.indices[0]:
bus.append(existing_move.q1)
elif index==existing_move.indices[1]:
bus.append(existing_move.q2)
#Now if any entry on the bus is true, our qubit is false.
self.qc.x(self.q[self.q.size-1]) # make it 1
self.qc.any_x(self.qc,*bus,self.q[self.q.size-1])
#negate is any dependents are true.
#So the new move can happen if none of the others happen.
self.moves.append(Move(indices,player,self.q[self.q.size-1]))
return 'ok'
elif len(indices)==2:
#Check first spot is not occupied, then second spot if first
#is not occupied.
self.q.size+=2 #Two bits needed (maybe) for each index.
#This can be optimized, in effect only one qubit is needed,
#and its result indicates the selected qubit.
#However, then some control qubit is needed too.
#Since there are moves that could possibly be erased completely!
bus0 = []
bus1 = []
for existing_move in self.moves:
if indices[0] in existing_move.indices:
if indices[0]==existing_move.indices[0]:
bus0.append(existing_move.q1)
elif indices[0]==existing_move.indices[1]:
bus0.append(existing_move.q2)
if indices[1] in existing_move.indices:
if indices[1]==existing_move.indices[0]:
bus1.append(existing_move.q1)
elif indices[1]==existing_move.indices[1]:
bus1.append(existing_move.q2)
#Now if any entry on the bus is true, our first qubit is false.
q1 = self.q[self.q.size-2] #a bit easier to look at (:
q2 = self.q[self.q.size-1]
if bus0:
self.qc.x(q1)
self.qc.cnx(self.qc,*bus0,q1)
else: self.qc.h(q1)
#And now the second qubit is 1 only if none of its competitors
#are 1, and likewise if the previous qubit is zero.
self.qc.x(q2)
self.qc.bus_or(self.qc,q2,bus1,[q1])
self.moves.append(Move(indices,player,q1,q2))
return 'ok'
def run(self):
"""Game loop"""
self.running=True
if self.print_info:
print("Welcome to Quantum tic tac toe!")
print("At each turn choose if to make one or two moves.")
print("Playing one move at a time is a classic tic tac toe game.")
print("At each turn the game state is printed;")
print("X3 is the third move, played by X. When a move is made in a super position,")
print("You will see its label, say X3, appear in several places.")
print("This means your move is in a superposition of two classical moves!")
print("You can make a move in a possibly occupied spot.")
print("Then the new move will be anti-correlated with the move already in that spot.")
print("And so the game branches out into many possible states.")
print("The outcome is then computed by simulation...")
print("so don't make too many quantum moves or it will take long to compute!")
print("Enter 'q' at any time to quit")
print("Enter 'end' to end the game, and compute the winner(s).")
print("Good luck!")
while self.running:
self.ask_player(0)
self.ask_player(1)
if self.game_full:
self.compute_winner()
def ask_player(self,player):
"""Ask a player for move details"""
asking=False
if self.running:
asking = True
while asking:
if player==0:
player_name = 'X'
elif player==1:
player_name = 'O'
print("PLAYER "+player_name+" :")
cells = self.question('Play in 1 or 2 cells?')
if cells=='1':
x = int(self.question('x index:'))
y = int(self.question('y index:'))
status = self.add_move([[y,x]],player)
if status == 'ok':
asking = False
else: print(status)
elif cells=='2':
x1 = int(self.question('x1 index:'))
y1 = int(self.question('y1 index:'))
x2 = int(self.question('x2 index:'))
y2 = int(self.question('y2 index:'))
status = self.add_move([[y1,x1],[y2,x2]],player)
if status == 'ok':
asking = False
else: print(status)
if not self.running:
asking=False
def question(self,text):
"""ask user a question"""
if self.running:
answer = input(text)
if answer=='q':
self.running=False
return None
elif answer=='end':
self.game_full = True
self.running = False
else:
return answer
else: return None
def compute_winner(self):
"""Find overall game winner, by finding winners of each outcome"""
self.c.size = self.q.size #Make them the same
self.qc.measure(self.q, self.c) #Measure
backend = Aer.get_backend('qasm_simulator')
job_sim = execute(self.qc, backend=backend, shots=100)
sim_result = job_sim.result()
print("simulation: ", sim_result)
print(sim_result.get_counts(self.qc))
self.counts = sim_result.get_counts(self.qc)
for count in self.counts: #Takes key names
c = list(count)[:-1] #splits key '1011' => ['1','0','1','1']
c = c[::-1] #invert it so it goes 0 up...
#Ignore the last bit since I dont know how to get rid of it
#It is zero always.
#The reason it is included is that I create a quantum register and
#then start adding operations, quantum registers need at least one bit.
counter = 0
weight = self.counts[count]
empty = np.zeros((self.x,self.y),dtype=str)
for m in self.moves:
if m.player == 0:
char = 'x'
elif m.player==1:
char = 'o'
result = []
if m.q1:
result.append(c[counter])
counter+=1
if m.q2:
result.append(c[counter])
counter+=1
#print(result)
if len(result) == len(m.indices):
#print(m)
if result[0]=='1':
empty[m.indices[0][0],m.indices[0][1]] = char
if len(result)>1:
if result[1]=='1':
if result[0]=='1':
print('problem! a move appeard in two places.')
print(m)
empty[m.indices[1][0],m.indices[1][1]] = char
elif not result: #Then it was a classcal move
empty[m.indices[0][0],m.indices[0][1]] = char
xscore,oscore=self.winners(empty)
print('X wins: '+str(xscore))
print('O wins: '+str(oscore))
print('Shots: '+str(weight))
print(empty)
def winners(self,empty):
"""Compute winners of a board"""
oscore = 0
xscore = 0
for x in range(self.x):
if empty[x,1]==empty[x,0] and empty[x,2]==empty[x,1]:
if empty[x,0]=='o':
oscore+=1
elif empty[x,0]=='x':
xscore +=1
for y in range(self.y):
if empty[1,y]==empty[0,y] and empty[2,y]==empty[0,y]:
if empty[0,y]=='o':
oscore+=1
elif empty[0,y]=='x':
xscore +=1
if empty[0,0]==empty[1,1] and empty[1,1]==empty[2,2]:
if empty[0,0]=='o':
oscore+=1
elif empty[0,0]=='x':
xscore += 1
if empty[2,0]==empty[1,1] and empty[1,1]==empty[0,2]:
if empty[2,0]=='o':
oscore+=1
elif empty[2,0]=='x':
xscore += 1
return [xscore,oscore]
def _populate_board(self):
"""Automatically populate as below, for testing purposes"""
self.add_move([[2,2],[0,0]],1)
self.add_move([[1,1],[1,2]],0)
self.add_move([[1,2],[2,1]],1)
self.add_move([[2,1]],0)
self.add_move([[0,1]],1)
self.add_move([[1,0]],0)
self.add_move([[2,0]],1)
self.add_move([[2,2]],0)
self.add_move([[0,0]],1)
self.add_move([[0,2]],0)
self.add_move([[1,1]],1)
self.add_move([[1,2]],0)
if __name__=="__main__":
B= Board(3,3)
B.run()
#B._populate_board()
#a = B.compute_winner()
|
nilq/baby-python
|
python
|
import os
import foundations
from foundations_contrib.global_state import current_foundations_context, message_router
from foundations_events.producers.jobs import RunJob
foundations.set_project_name('default')
job_id = os.environ['ACCEPTANCE_TEST_JOB_ID']
pipeline_context = current_foundations_context().pipeline_context()
pipeline_context.file_name = job_id
RunJob(message_router, pipeline_context).push_message()
foundations.set_tag('model type', 'simple mlp')
foundations.set_tag('data set', 'out of time')
foundations.set_tag('what I was doing,', 'drinking tea')
print('Hello World!')
|
nilq/baby-python
|
python
|
class MySql(object):
pass
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
#
# Author: Jeremy Compostella <jeremy.compostella@gmail.com>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
'''This module implements a car charger task based on the Wallbox EV charger.
'''
import os
import sys
from datetime import datetime, timedelta
from select import select
from time import sleep
import Pyro5
import requests
from cachetools import TTLCache
from wallbox import Wallbox
from car_sensor import CarSensorProxy
from power_sensor import RecordScale
from scheduler import Priority, SchedulerProxy, Task
from sensor import SensorReader
from tools import NameServer, Settings, debug, init, log_exception
from watchdog import WatchdogProxy
DEFAULT_SETTINGS = {'power_sensor_key': 'EV',
'min_available_current': 6,
'cycle_length': 15}
MODULE_NAME = 'car_charger'
class CarCharger(Task):
'''Wallbox car charger Task.
This task handles a Wallbox car charger and automatically adjusts the
charge rate based on produced power availability.
'''
FULLY_CHARGED = 'Connected: waiting for car demand'
PLUGGED_IN = ['Charging', FULLY_CHARGED,
'Connected: waiting for next schedule',
'Paused by user']
def __init__(self, wallbox: Wallbox, charger_id: int, settings: Settings):
Task.__init__(self, Priority.LOW, keys=[settings.power_sensor_key],
auto_adjust=True)
self.wallbox = wallbox
self.charger_id = charger_id
self.settings = settings
self.cache = TTLCache(1, timedelta(seconds=3), datetime.now)
self.state_of_charge = None
def __call(self, name, *args):
for _ in range(3):
try:
method = getattr(self.wallbox, name)
return method(self.charger_id, *args)
except requests.exceptions.HTTPError:
log_exception('%s%s failed' % (name, args), *sys.exc_info())
self.wallbox.authenticate()
except requests.exceptions.ReadTimeout:
log_exception('%s%s failed' % (name, args), *sys.exc_info())
sleep(0.5)
raise RuntimeError('%s%s failed too many times' % (name, args))
@property
def status(self):
'''JSON representation of the charger status.'''
try:
return self.cache['status']
except KeyError:
self.cache['status'] = self.__call('getChargerStatus')
return self.cache['status']
@Pyro5.api.expose
@Pyro5.api.oneway
def start(self):
debug('Starting')
self.__call('resumeChargingSession')
self.cache.clear()
@Pyro5.api.expose
@Pyro5.api.oneway
def stop(self):
debug('Stopping')
self.__call('pauseChargingSession')
self.__call('setMaxChargingCurrent', self.min_available_current)
self.cache.clear()
@property
def status_description(self):
'''String describing the charger status.'''
return self.status['status_description']
@property
def min_available_current(self):
'''Minimum current supported by the charger in Ampere.'''
return self.settings.min_available_current
@property
def max_available_current(self):
'''Maximal current supported by the charger in Ampere.'''
return self.status['config_data']['max_available_current']
@Pyro5.api.expose
def is_running(self) -> bool:
return self.status_description == 'Charging'
@Pyro5.api.expose
def is_stoppable(self):
return True
@Pyro5.api.expose
def is_runnable(self):
'''True if calling the 'start' function would initiate charging.'''
return self.status_description in self.PLUGGED_IN \
and self.status_description != self.FULLY_CHARGED
@Pyro5.api.expose
def meet_running_criteria(self, ratio, power=0) -> bool:
debug('meet_running_criteria(%.3f, %.3f)' % (ratio, power))
if not self.is_runnable():
return False
if self.is_running():
return ratio >= 0.8
return ratio >= 1
@property
@Pyro5.api.expose
def desc(self):
description = '%s(%s' % (self.__class__.__name__, self.priority.name)
if self.state_of_charge is not None:
description += ', %.1f%%' % self.state_of_charge
return description + ')'
@property
@Pyro5.api.expose
def power(self):
return self.min_available_current * .24
def adjust_priority(self, state_of_charge):
'''Update the priority according to the current state of charge'''
self.state_of_charge = state_of_charge
thresholds = {Priority.URGENT: 50, Priority.HIGH: 65,
Priority.MEDIUM: 80, Priority.LOW: 101}
for priority in reversed(Priority):
if state_of_charge < thresholds[priority]:
self.priority = priority
break
def current_rate_for(self, power):
'''Return the appropriate current in Ampere for POWER in KWh.'''
rate = max(int(power / .24), self.min_available_current)
return min(rate, self.max_available_current)
def adjust_charge_rate(self, record):
'''Adjust the charging rate according to the instant POWER record.'''
available = -(record['net'] - self.usage(record))
current = self.current_rate_for(available)
if self.status['config_data']['max_charging_current'] != current:
debug('Adjusting to %dA (%.2f KWh)' % (current, available))
self.__call('setMaxChargingCurrent', current)
def main():
'''Register and run the car charger task.'''
# pylint: disable=too-many-locals
base = os.path.splitext(__file__)[0]
config = init(base + '.log')['Wallbox']
settings = Settings(base + '.ini', DEFAULT_SETTINGS)
wallbox = Wallbox(config['login'], config['password'],
requestGetTimeout=5)
wallbox.authenticate()
device_id = int(config['device_id'])
if device_id not in wallbox.getChargersList():
raise RuntimeError('%d charger ID does not exist' % device_id)
task = CarCharger(wallbox, device_id, settings)
Pyro5.config.COMMTIMEOUT = 5
daemon = Pyro5.api.Daemon()
nameserver = NameServer()
uri = daemon.register(task)
nameserver.register_task(MODULE_NAME, uri)
sensor = CarSensorProxy()
power_sensor = SensorReader('power')
power_simulator = SensorReader('power_simulator')
scheduler = SchedulerProxy()
watchdog = WatchdogProxy()
debug("... is now ready to run")
while True:
settings.load()
watchdog.register(os.getpid(), MODULE_NAME)
watchdog.kick(os.getpid())
try:
nameserver.register_task(MODULE_NAME, uri)
except RuntimeError:
log_exception('Failed to register the sensor',
*sys.exc_info())
# Self-testing: on basic operation failure unregister from the
# scheduler.
try:
task.status_description # pylint: disable=pointless-statement
scheduler.register_task(uri)
except RuntimeError:
debug('Self-test failed, unregister from the scheduler')
scheduler.unregister_task(uri)
next_cycle = datetime.now() + timedelta(
# pylint: disable=maybe-no-member
seconds=settings.cycle_length)
while True:
timeout = next_cycle - datetime.now()
sockets, _, _ = select(daemon.sockets, [], [],
timeout.seconds
+ timeout.microseconds / 1000000)
if sockets:
daemon.events(sockets)
if datetime.now() >= next_cycle:
break
try:
task.adjust_priority(sensor.read()['state of charge'])
except RuntimeError:
debug('Could not read current state of charge')
if not task.is_running():
continue
record = power_sensor.read(scale=RecordScale.SECOND)
if not record:
debug('No new power record, use the simulator')
record = power_simulator.read(scale=RecordScale.SECOND)
if not record:
debug('Failed to get a record from the simulator')
if record:
try:
task.adjust_charge_rate(record)
except RuntimeError:
log_exception('adjust_charge_rate() failed', *sys.exc_info())
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.