id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
5141357 | import healpy as hp
import numpy as np
def iqu2teb(IQU, nside, lmax=None):
alms = hp.map2alm(IQU, lmax=lmax, pol=True)
return hp.alm2map(alms, nside=nside, lmax=lmax, pol=False)
def teb2iqu(TEB, nside, lmax=None):
alms = hp.map2alm(TEB, lmax=lmax, pol=False)
return hp.alm2map(alms, nside=nside, lmax=lmax, pol=True)
def messenger_1(data_vec, T_pixel, n_iter, s_cov_diag_grade, nside, noise_bar_diag, noise_diag):
s = np.zeros(data_vec.shape, dtype='complex')
T_harmonic_grade = np.ones(hp.map2alm(hp.ud_grade(data_vec.real, nside),
lmax=nside * 3 - 1).shape) * T_pixel[0] / np.float(nside * nside)
harmonic_operator = (s_cov_diag_grade / (s_cov_diag_grade + T_harmonic_grade))
pixel_operator_signal = (noise_bar_diag / (noise_bar_diag + T_pixel))
pixel_operator_data = (T_pixel / (T_pixel + noise_diag))
for i in range(n_iter):
t = pixel_operator_data * data_vec + pixel_operator_signal * s
# t = hp.ud_grade(t,512)
t_alm1 = hp.map2alm(t.real, lmax=3 * nside - 1)
t_alm2 = hp.map2alm(t.imag, lmax=3 * nside - 1)
s1 = hp.alm2map(harmonic_operator * t_alm1, nside=nside, lmax=nside * 3 - 1, verbose=False)
s2 = hp.alm2map(harmonic_operator * t_alm2, nside=nside, lmax=nside * 3 - 1, verbose=False)
s = s1 + 1j * s2
# s = hp.ud_grade(s, 128)
# _ = hp.mollview(s.imag), plt.show()
print(np.var(s))
return s
def messenger_2(data_vec, s_cov_diag, T_ell, noise_diag, T_pixel, noise_bar_diag, nside, n_iter):
data_vec_QU = np.concatenate([data_vec.real, data_vec.imag])
s = np.zeros(data_vec_QU.shape, dtype='complex')
convergence_test = [0.]
harmonic_operator = s_cov_diag / (s_cov_diag + T_ell)
pixel_operator_signal = (noise_bar_diag / (noise_bar_diag + T_pixel))
pixel_operator_data = (T_pixel / (T_pixel + noise_diag))
for i in range(n_iter):
t = pixel_operator_data * data_vec_QU + pixel_operator_signal * s # here t = concat[t_Q, t_U]
t = np.real(t)
t = [t[int(t.shape[0] / 2):] * 0., t[:int(t.shape[0] / 2)], t[int(t.shape[0] / 2):]] # here t = {t_I = 0, t_Q, t_U}
t = hp.ud_grade(t, nside) # now upgrade
t_alm = hp.map2alm(t, lmax=3 * (nside) - 1, pol=True)
s = harmonic_operator * np.concatenate([t_alm[1], t_alm[2]])
s = [s[int(s.shape[0] / 2):] * 0., s[:int(s.shape[0] / 2)], s[int(s.shape[0] / 2):]]
print(np.var(s[0]), np.var(s[1]), np.var(s[2]))
convergence_test.append(np.var(s[1]))
s = hp.alm2map(s, nside=nside, lmax=nside * 3 - 1, verbose=False, pol=True)
# s_qu = np.copy(s)
s = np.concatenate([s[1], s[2]])
return s | StarcoderdataPython |
1819105 | <gh_stars>0
from inspera.reader import InsperaReader | StarcoderdataPython |
3227994 | <reponame>lunabox/leetcodepy<gh_stars>0
#coding:utf8
'''
Created on 2017年3月27日
@author: wanlipeng
'''
from problems.listnode import ListNode
class Solution(object):
def reverseList(self, head):
if head is None:
return None
newHead = ListNode(0)
newHead.next = head
curNode = head.next
head.next = None
while curNode is not None:
temp = curNode.next
curNode.next = newHead.next
newHead.next = curNode
curNode = temp
return newHead.next
def isPalindrome(self, head):
"""
:type head: ListNode
:rtype: bool
"""
if head is None or head.next is None:
return True
slow = head
fast = head
while fast and fast.next:
slow = slow.next
fast = fast.next.next
if fast: #如果fast后面还有,则说明是奇数个节点
slow.next = self.reverseList(slow.next)
slow = slow.next
else: #偶数个
slow = self.reverseList(slow)
while slow:
if head.val != slow.val:
return False
slow = slow.next
head = head.next
return True
| StarcoderdataPython |
1970143 | # -*- coding: utf-8 -*-
"""
eve-demo-client
~~~~~~~~~~~~~~~
Simple and quickly hacked togheter, this script is used to reset the
eve-demo API to its initial state. It will use standard API calls to:
1) delete all items in the 'people' and 'works' collections
2) post multiple items in both collection
I guess it can also serve as a basic example of how to programmatically
manage a remot e API using the phenomenal Requests library by <NAME>
(a very basic 'get' function is included even if not used).
:copyright: (c) 2015 by <NAME>.
:license: BSD, see LICENSE for more details.
"""
import sys
import json
import random
import requests
ENTRY_POINT = '127.0.0.1:80'
def post_people():
people = [
{
'firstname': 'John',
'lastname': 'Doe',
'role': ['author'],
'location': {'address': '422 South Gay Street', 'city': 'Auburn'},
'born': 'Thu, 27 Aug 1970 14:37:13 GMT'
},
{
'firstname': 'Serena',
'lastname': 'Love',
'role': ['author'],
'location': {'address': '363 Brannan St', 'city': 'San Francisco'},
'born': 'Wed, 25 Feb 1987 17:00:00 GMT'
},
{
'firstname': 'Mark',
'lastname': 'Green',
'role': ['copy', 'author'],
'location': {'address': '4925 Lacross Road', 'city': 'New York'},
'born': 'Sat, 23 Feb 1985 12:00:00 GMT'
},
{
'firstname': 'Julia',
'lastname': 'Red',
'role': ['copy'],
'location': {'address': '98 Yatch Road', 'city': 'San Francisco'},
'born': 'Sun, 20 Jul 1980 11:00:00 GMT'
},
{
'firstname': 'Anne',
'lastname': 'White',
'role': ['contributor', 'copy'],
'location': {'address': '32 Joseph Street', 'city': 'Ashfield'},
'born': 'Fri, 25 Sep 1970 10:00:00 GMT'
},
{
'firstname': 'Anton',
'lastname': 'Friberg',
'role': ['author'],
'location': {'address': '22644 Magistratsvägen 55R', 'city': 'Lund'},
'born': 'Wed, 7 Jan 1993 23:55:03 GMT'
},
]
r = perform_post('people', json.dumps(people))
print("'people' posted", r.status_code)
valids = []
if r.status_code == 201:
response = r.json()
if response['_status'] == 'OK':
for person in response['_items']:
if person['_status'] == "OK":
valids.append(person['_id'])
else:
r.raise_for_status()
return valids
def post_works(ids):
works = []
for i in range(28):
works.append(
{
'title': 'Book Title #%d' % i,
'description': 'Description #%d' % i,
'owner': random.choice(ids),
}
)
r = perform_post('works', json.dumps(works))
print("'works' posted", r.status_code)
if r.status_code != 201:
r.raise_for_status()
def perform_post(resource, data):
headers = {'Content-Type': 'application/json'}
return requests.post(endpoint(resource), data, headers=headers)
def delete():
r = perform_delete('people')
print("'people' deleted", r.status_code)
r = perform_delete('works')
print("'works' deleted", r.status_code)
def perform_delete(resource):
return requests.delete(endpoint(resource))
def endpoint(resource):
return 'http://%s/%s/' % (
ENTRY_POINT if not sys.argv[1:] else sys.argv[1], resource)
if __name__ == '__main__':
delete()
people = post_people()
post_works(people)
| StarcoderdataPython |
3478114 | """Test Cases for HexitecAdapter, Hexitec in hexitec.HexitecDAQ, hexitec.Hexitec.
<NAME>, STFC Detector Systems Software Group
"""
from hexitec.adapter import HexitecAdapter, Hexitec, HexitecDetectorDefaults
from odin.adapters.parameter_tree import ParameterTreeError
import unittest
import pytest
import time
import sys
if sys.version_info[0] == 3: # pragma: no cover
from unittest.mock import Mock, MagicMock, patch
else: # pragma: no cover
from mock import Mock, MagicMock, patch
class DetectorAdapterTestFixture(object):
"""Test fixture class."""
def __init__(self):
"""Initialise object."""
self.options = {
"fem_0":
"""
id = 0,
server_ctrl_ip = 127.0.0.1,
camera_ctrl_ip = 127.0.0.1,
server_data_ip = 127.0.0.1,
camera_data_ip = 127.0.0.1
"""
}
# TODO: Below "hack" prevents polling randomly failing tests relying on add_callback
# assertion; Needs reworking once watchdog(s) ready to be unit tested
with patch('hexitec.adapter.HexitecFem'), patch('hexitec.adapter.HexitecDAQ'):
with patch('hexitec.adapter.Hexitec._start_polling'): # TODO: To be amended
self.adapter = HexitecAdapter(**self.options)
self.detector = self.adapter.hexitec # shortcut, makes assert lines shorter
self.path = "detector/acquisition/number_frames"
self.put_data = 1024
self.request = Mock()
self.request.configure_mock(
headers={'Accept': 'application/json', 'Content-Type': 'application/json'},
body=self.put_data
)
self.request_again = Mock()
self.request_again.configure_mock(
headers={'Accept': 'application/json', 'Content-Type': 'application/json'},
body=5
)
self.fake_fp = MagicMock()
self.fake_fi = MagicMock()
# once the odin_data adapter is refactored to use param tree, this structure will need fixing
self.fp_data = {
"value": [
{
"status": {
"configuration_complete": True,
},
"connected": True,
"plugins": {
"names": [
"correction",
"hdf",
"view"
]
},
"hdf": {
"frames_written": 0,
"frames_processed": 0,
"writing": True
},
"histogram": {
"frames_processed": 2
},
}
]
}
self.fi_data = {
"config_dir": "fake/config_dir",
"fr_config_files": [
"first.config",
"not.config",
"hexitec_fr.config"
],
"fp_config_files": [
"not.config",
"hexitec_fp.config"
]
}
# set up fake adapter
fp_return = Mock()
fp_return.configure_mock(data=self.fp_data)
self.fake_fp.get = Mock(return_value=fp_return)
fi_return = Mock()
fi_return.configure_mock(data=self.fi_data)
self.adapters = {
"fp": self.fake_fp,
"file_interface": self.fake_fi
}
class TestAdapter(unittest.TestCase):
"""Unit tests for the adapter class."""
def setUp(self):
"""Set up test fixture for each unit test."""
self.test_adapter = DetectorAdapterTestFixture()
# @pytest.mark.skip("Test Skipped")
def test_adapter_get(self):
"""Test that a call to the detector adapter's GET method returns the correct response."""
# Hack: Initialise adapters in adapter.py
self.test_adapter.adapter.adapters = self.test_adapter.adapters
expected_response = {
'number_frames': 10
}
response = self.test_adapter.adapter.get(
self.test_adapter.path,
self.test_adapter.request)
assert response.data == expected_response
assert response.status_code == 200
def test_adapter_get_fp_adapter(self):
"""Test that a call to the detector adapter's GET method returns the correct response."""
# Hack: Initialise adapters in adapter.py
self.test_adapter.adapter.adapters = self.test_adapter.adapters
expected_response = {
'frames_written': 0,
'frames_processed': 0,
'writing': True
}
response = self.test_adapter.adapter.get(
"fp/",
self.test_adapter.request)
assert response.status_code == 200
assert expected_response == response.data['value'][0]['hdf']
def test_adapter_get_error(self):
"""Test adapter handles invalid GET."""
false_path = "not/a/path"
expected_response = {
'error': "Invalid path: {}".format(false_path)
}
response = self.test_adapter.adapter.get(
false_path,
self.test_adapter.request)
assert response.data == expected_response
assert response.status_code == 400
def test_adapter_get_raises_parameter_tree_exception(self):
"""Test adapter handles parameter tree exception."""
path = "fp/"
expected_response = {
'error': "Invalid path: {}".format(path)
}
# Mock logging to provoke ParameterTree exception
with patch('logging.debug') as mock_log:
mock_log.side_effect = ParameterTreeError()
response = self.test_adapter.adapter.get(
path,
self.test_adapter.request)
assert response.data == expected_response
assert response.status_code == 400
def test_adapter_put(self):
"""Test that a normal PUT works ok."""
expected_response = {
'number_frames': self.test_adapter.put_data
}
self.test_adapter.adapter.adapters = self.test_adapter.adapters
response = self.test_adapter.adapter.put(
self.test_adapter.path,
self.test_adapter.request)
assert response.data == expected_response
assert response.status_code == 200
assert self.test_adapter.detector.number_frames == self.test_adapter.put_data
def test_adapter_put_error(self):
"""Test adapter handles invalid PUT."""
false_path = "not/a/path"
expected_response = {
'error': "Invalid path: {}".format(false_path)
}
response = self.test_adapter.adapter.put(
false_path,
self.test_adapter.request)
assert response.data == expected_response
assert response.status_code == 400
assert self.test_adapter.detector.number_frames == 10
def test_adapter_delete(self):
"""Test that adapter's DELETE function works."""
expected_response = '{}: DELETE on path {}'.format("HexitecAdapter", self.test_adapter.path)
response = self.test_adapter.adapter.delete(self.test_adapter.path,
self.test_adapter.request)
assert response.data == expected_response
assert response.status_code == 200
class TestDetector(unittest.TestCase):
"""Unit tests for detector class."""
def setUp(self):
"""Set up test fixture for each unit test."""
self.test_adapter = DetectorAdapterTestFixture()
def test_detector_init(self):
"""Test function initialises detector OK."""
defaults = HexitecDetectorDefaults()
assert self.test_adapter.detector.file_dir == defaults.save_dir
assert len(self.test_adapter.detector.fems) == 1
assert isinstance(self.test_adapter.detector.fems[0], MagicMock)
fem = self.test_adapter.detector.fems[0]
fem.connect()
fem.connect.assert_called_with()
def test_detector_init_default_fem(self):
"""Test that the detector correctly sets up the default fem if none provided."""
defaults = HexitecDetectorDefaults()
with patch('hexitec.adapter.HexitecFem') as mock_fem, patch('hexitec.adapter.HexitecDAQ'):
detector = Hexitec({})
mock_fem.assert_called_with(
fem_id=defaults.fem["id"],
parent=detector,
server_ctrl_ip_addr=defaults.fem["server_ctrl_ip"],
camera_ctrl_ip_addr=defaults.fem["camera_ctrl_ip"],
server_data_ip_addr=defaults.fem["server_data_ip"],
camera_data_ip_addr=defaults.fem["camera_data_ip"]
)
def test_detector_init_multiple_fem(self):
"""Test function initialises multiple fems."""
options = self.test_adapter.options
options["fem_1"] = """
id = 0,
server_ctrl_ip = 127.0.0.2,
camera_ctrl_ip = 127.0.0.2,
server_data_ip = 127.0.0.2,
camera_data_ip = 127.0.0.2
"""
with patch('hexitec.adapter.HexitecFem'), patch('hexitec.adapter.HexitecDAQ'):
detector = Hexitec(options)
assert len(detector.fems) == 2
def test_poll_fems(self):
"""Test poll fem works."""
self.test_adapter.detector.adapters = self.test_adapter.adapters
self.test_adapter.detector.fems[0].acquisition_completed = True
self.test_adapter.detector.fems[0].health = True
self.test_adapter.detector.poll_fems()
# Ensure shutdown_processing() was called [it changes the following bool]
assert self.test_adapter.detector.acquisition_in_progress is False
def test_check_fem_watchdog(self):
"""Test fem watchdog works."""
self.test_adapter.detector.acquisition_in_progress = True
self.test_adapter.detector.fems[0].hardware_busy = True
self.test_adapter.detector.fems[0].acquire_timestamp = time.time()
self.test_adapter.detector.fem_tx_timeout = 0
self.test_adapter.detector.check_fem_watchdog()
# Ensure shutdown_processing() was called [it changes the following two bools]
assert self.test_adapter.detector.daq.shutdown_processing is True
assert self.test_adapter.detector.acquisition_in_progress is False
def test_check_daq_watchdog(self):
"""Test daq watchdog works."""
self.test_adapter.detector.daq.in_progress = True
# Ensure time difference is three seconds while timeout artificially at 0 seconds
self.test_adapter.detector.daq.processed_timestamp = time.time() - 3
self.test_adapter.detector.daq_rx_timeout = 0
self.test_adapter.detector.check_daq_watchdog()
# Ensure shutdown_processing() was called [it changes the following two bools]
assert self.test_adapter.detector.daq.shutdown_processing is True
assert self.test_adapter.detector.acquisition_in_progress is False
def test_detector_shutdown_processing_correct(self):
"""Test function shuts down processing."""
self.test_adapter.detector.daq.shutdown_processing = False
self.test_adapter.detector.acquisition_in_progress = True
self.test_adapter.detector.shutdown_processing()
assert self.test_adapter.detector.daq.shutdown_processing is True
assert self.test_adapter.detector.acquisition_in_progress is False
def test_detector_get_od_status_fp(self):
"""Test detector handles valid fp adapter request."""
with patch("hexitec.adapter.ApiAdapterRequest") as mock_request:
# Initialise adapters in adapter
self.test_adapter.detector.adapters = self.test_adapter.adapters
rc_value = self.test_adapter.detector._get_od_status("fp")
config = None
# Doublechecking _get_od_status() fp adapter's get() - redundant?
mock_request.assert_called_with(config, content_type="application/json")
assert self.test_adapter.fp_data["value"] == [rc_value]
def test_detector_get_od_status_misnamed_adapter(self):
"""Test detector throws exception on misnamed adapter."""
with patch("hexitec.adapter.ApiAdapterRequest"):
# Initialise adapters in adapter
self.test_adapter.detector.adapters = self.test_adapter.adapters
adapter = "wRong"
rc_value = self.test_adapter.detector._get_od_status(adapter)
response = {"Error": "Adapter {} not found".format(adapter)}
assert response == rc_value
def test_connect_hardware(self):
"""Test function works OK."""
with patch("hexitec.adapter.IOLoop") as mock_loop:
self.test_adapter.detector.bias_clock_running = False
self.test_adapter.detector.connect_hardware("")
mock_loop.instance().add_callback.assert_called_with(self.test_adapter.detector.start_bias_clock)
def test_start_bias_clock(self):
"""Test function starch bias clock if not already running."""
self.test_adapter.detector.fems[0].bias_refresh_interval = 60.0
self.test_adapter.detector.start_bias_clock()
bias_clock_running = self.test_adapter.detector.bias_clock_running
init_time = self.test_adapter.detector.bias_init_time
assert bias_clock_running is True
assert pytest.approx(init_time) == time.time()
def test_poll_bias_clock_allow_collection_outside_outside_of_bias_window(self):
"""Test function allows data collection outside bias (blackout) window."""
bri = 60.0
bvst = 3.0
trvh = 2.0
self.test_adapter.detector.fems[0].bias_refresh_interval = bri
self.test_adapter.detector.fems[0].bias_voltage_settle_time = bvst
self.test_adapter.detector.fems[0].time_refresh_voltage_held = trvh
self.test_adapter.detector.bias_init_time = time.time()
self.test_adapter.detector.collect_and_bias_time = bri + bvst + trvh
with patch("hexitec.adapter.IOLoop") as mock_loop:
self.test_adapter.detector.poll_bias_clock()
mock_loop.instance().call_later.assert_called_with(0.1,
self.test_adapter.detector.poll_bias_clock)
def test_poll_bias_clock_blocks_collection_during_bias_window(self):
"""Test function blocks data collection during bias window."""
bri = 60.0
bvst = 3.0
trvh = 2.0
self.test_adapter.detector.fems[0].bias_refresh_interval = bri
self.test_adapter.detector.fems[0].bias_voltage_settle_time = bvst
self.test_adapter.detector.fems[0].time_refresh_voltage_held = trvh
# Trick bias clock to go beyond collection window
self.test_adapter.detector.bias_init_time = time.time() - bri
self.test_adapter.detector.collect_and_bias_time = bri + bvst + trvh
with patch("hexitec.adapter.IOLoop") as mock_loop:
self.test_adapter.detector.poll_bias_clock()
assert self.test_adapter.detector.bias_blocking_acquisition is True
mock_loop.instance().call_later.assert_called_with(0.1,
self.test_adapter.detector.poll_bias_clock)
def test_poll_bias_clock_reset_bias_clock_beyond_blackout_period(self):
"""Test function resets bias clock (collection allowed) when bias blackout exceeded."""
bri = 60.0
bvst = 3.0
trvh = 2.0
self.test_adapter.detector.fems[0].bias_refresh_interval = bri
self.test_adapter.detector.fems[0].bias_voltage_settle_time = bvst
self.test_adapter.detector.fems[0].time_refresh_voltage_held = trvh
self.test_adapter.detector.collect_and_bias_time = bri + bvst + trvh
with patch("hexitec.adapter.IOLoop") as mock_loop:
self.test_adapter.detector.poll_bias_clock()
mock_loop.instance().call_later.assert_called_with(0.1,
self.test_adapter.detector.poll_bias_clock)
def test_initialise_hardware(self):
"""Test function initialises hardware.
First initialisation means 2 frames should be collected.
"""
frames = 10
self.test_adapter.detector.number_frames = frames
self.test_adapter.detector.first_initialisation = True
with patch("hexitec.adapter.IOLoop") as mock_loop:
self.test_adapter.detector.initialise_hardware("")
assert self.test_adapter.detector.acquisition_in_progress is True
assert self.test_adapter.detector.backed_up_number_frames == frames
assert self.test_adapter.detector.number_frames == 2
mock_loop.instance().call_later.assert_called_with(0.5,
self.test_adapter.detector.check_fem_finished_sending_data)
def test_disconnect_hardware(self):
"""Test function disconnects hardware and stops bias clock."""
self.test_adapter.detector.bias_clock_running = True
self.test_adapter.detector.disconnect_hardware("")
assert self.test_adapter.detector.bias_clock_running is False
def test_set_duration_enable_true(self):
"""Test function can update duration enable to True."""
self.test_adapter.detector.set_duration_enable(True)
assert self.test_adapter.detector.duration_enable is True
def test_set_duration_enable_False(self):
"""Test function can update duration enable to False."""
self.test_adapter.detector.set_duration_enable(False)
assert self.test_adapter.detector.duration_enable is False
def test_set_number_frames(self):
"""Test function sets number of frames."""
frames = 13
self.test_adapter.detector.set_number_frames(frames)
assert self.test_adapter.detector.number_frames == frames
def test_set_duration(self):
"""Test function sets collection duration."""
duration = 2
self.test_adapter.detector.set_duration(duration)
assert self.test_adapter.detector.duration == duration
def test_detector_initialize(self):
"""Test function can initialise adapters."""
adapters = {
"proxy": Mock(),
"file_interface": Mock(),
"fp": Mock(),
}
self.test_adapter.adapter.initialize(adapters)
assert adapters == self.test_adapter.detector.adapters
self.test_adapter.detector.daq.initialize.assert_called_with(adapters)
def test_detector_set_acq(self):
"""Test function can set number of frames."""
self.test_adapter.detector.set_number_frames(5)
assert self.test_adapter.detector.number_frames == 5
def test_detector_acquisition_handles_extended_acquisition(self):
"""Test function handles acquisition spanning >1 collection windows."""
self.test_adapter.detector.daq.configure_mock(
in_progress=False
)
self.test_adapter.detector.fems[0].bias_voltage_refresh = True
self.test_adapter.detector.bias_blocking_acquisition = False
self.test_adapter.detector.fems[0].bias_refresh_interval = 2
self.test_adapter.detector.bias_init_time = time.time()
self.test_adapter.detector.adapters = self.test_adapter.adapters
with patch("hexitec.adapter.IOLoop"):
self.test_adapter.detector.acquisition("data")
assert self.test_adapter.detector.extended_acquisition is True
def test_detector_acquisition_correct(self):
"""Test acquisition function works."""
self.test_adapter.detector.daq.configure_mock(
in_progress=False
)
self.test_adapter.detector.fems[0].bias_voltage_refresh = False
self.test_adapter.detector.first_initialisation = True
self.test_adapter.detector.adapters = self.test_adapter.adapters
self.test_adapter.detector.acquisition("data")
self.test_adapter.detector.daq.start_acquisition.assert_called_with(10)
def test_detector_acquisition_respects_bias_blocking(self):
"""Test function won't acquire data while bias blocking."""
self.test_adapter.detector.daq.configure_mock(
in_progress=False
)
self.test_adapter.detector.fems[0].bias_voltage_refresh = True
self.test_adapter.detector.bias_blocking_acquisition = True
with patch("hexitec.adapter.IOLoop") as mock_loop:
self.test_adapter.detector.acquisition("data")
mock_loop.instance().call_later.assert_called_with(0.1,
self.test_adapter.detector.acquisition)
def test_detector_acquisition_respects_collection_window(self):
"""Test function won't acquire data time window to small (i.e. < 0s)."""
self.test_adapter.detector.daq.configure_mock(
in_progress=False
)
self.test_adapter.detector.fems[0].bias_voltage_refresh = True
self.test_adapter.detector.bias_blocking_acquisition = False
self.test_adapter.detector.fems[0].bias_refresh_interval = 2
with patch("hexitec.adapter.IOLoop") as mock_loop:
self.test_adapter.detector.acquisition("data")
mock_loop.instance().call_later.assert_called_with(0.09,
self.test_adapter.detector.acquisition)
def test_await_daq_ready_waits_for_daq(self):
"""Test adapter's await_daq_ready waits for DAQ to be ready."""
self.test_adapter.detector.daq.configure_mock(
file_writing=False
)
with patch("hexitec.adapter.IOLoop") as mock_loop:
self.test_adapter.detector.await_daq_ready()
mock_loop.instance().call_later.assert_called_with(0.05,
self.test_adapter.detector.await_daq_ready)
def test_await_daq_ready_triggers_fem(self):
"""Test adapter's await_daq_ready triggers FEM(s) when ready."""
self.test_adapter.detector.daq.configure_mock(
file_writing=True
)
with patch("hexitec.adapter.IOLoop") as mock_loop:
self.test_adapter.detector.await_daq_ready()
mock_loop.instance().call_later.assert_called_with(0.08,
self.test_adapter.detector.trigger_fem_acquisition)
def test_trigger_fem_acquisition(self):
"""Test trigger data acquisition in FEM(s)."""
with patch("hexitec.adapter.IOLoop") as mock_loop:
self.test_adapter.detector.trigger_fem_acquisition()
mock_loop.instance().call_later.assert_called_with(0.0,
self.test_adapter.detector.check_fem_finished_sending_data)
init_time = self.test_adapter.detector.fem_start_timestamp
assert pytest.approx(init_time) == time.time()
def test_detector_acquisition_prevents_new_acquisition_whilst_one_in_progress(self):
"""Test adapter won't start acquisition whilst one already in progress."""
self.test_adapter.detector.daq.configure_mock(
in_progress=True
)
self.test_adapter.detector.acquisition("data")
self.test_adapter.detector.daq.start_acquisition.assert_not_called()
def test_check_fem_finished_sending_data_loop_if_hardware_busy(self):
"""Test function calls itself while fem busy sending data."""
self.test_adapter.detector.fems[0].hardware_busy = True
with patch("hexitec.adapter.IOLoop") as mock_loop:
self.test_adapter.detector.check_fem_finished_sending_data()
mock_loop.instance().call_later.assert_called_with(0.5,
self.test_adapter.detector.check_fem_finished_sending_data)
def test_check_fem_finished_sending_data_acquire_outstanding_frames(self):
"""Test function calls acquire to collect all required frames.
Where collection spans single bias window, need to revisit acquisition()
multiple time(s).
"""
self.test_adapter.detector.fems[0].hardware_busy = False
self.test_adapter.detector.extended_acquisition = True
self.test_adapter.detector.frames_already_acquired = 10
self.test_adapter.detector.number_frames = 20
with patch("hexitec.adapter.IOLoop") as mock_loop:
self.test_adapter.detector.check_fem_finished_sending_data()
mock_loop.instance().add_callback.assert_called_with(self.test_adapter.detector.acquisition)
def test_check_fem_finished_sending_data_resets_variables_on_completion(self):
"""Test function resets variables when all data has been sent.
Testing scenario: user requested 10 frames on cold start, therefore
system is acquiring two frames as part of initialisation.
"""
frames = 10
self.test_adapter.detector.number_frames = 2
self.test_adapter.detector.number_frames = frames
self.test_adapter.detector.fems[0].hardware_busy = False
self.test_adapter.detector.extended_acquisition = False
self.test_adapter.detector.first_initialisation = True
with patch("hexitec.adapter.IOLoop"):
self.test_adapter.detector.check_fem_finished_sending_data()
assert self.test_adapter.detector.number_frames == frames
assert self.test_adapter.detector.initial_acquisition is True
assert self.test_adapter.detector.extended_acquisition is False
assert self.test_adapter.detector.acquisition_in_progress is False
def test_cancel_acquisition(self):
"""Test function can cancel (in software) ongoing acquisition."""
self.test_adapter.detector.daq.configure_mock(
in_progress=False
)
self.test_adapter.detector.fems[0].bias_voltage_refresh = False
self.test_adapter.detector.first_initialisation = True
self.test_adapter.detector.adapters = self.test_adapter.adapters
print(self.test_adapter.adapters)
self.test_adapter.detector.fems[0].stop_acquisition = False
self.test_adapter.detector.cancel_acquisition()
assert self.test_adapter.detector.fems[0].stop_acquisition is True
def test_collect_offsets(self):
"""Test function initiates collect offsets."""
self.test_adapter.detector.fems[0].hardware_busy = False
self.test_adapter.detector._collect_offsets("")
self.test_adapter.detector.fems[0].collect_offsets.assert_called()
# assert self.test_adapter.detector.fems[0].hardware_busy is True
def test_commit_configuration(self):
"""Test function calls daq's commit_configuration."""
self.test_adapter.detector.commit_configuration("")
self.test_adapter.detector.daq.commit_configuration.assert_called()
| StarcoderdataPython |
6468995 | <filename>package/tests/test_ami_management/test_operations/test_delete_operation.py
from unittest import TestCase
from botocore.exceptions import ClientError
from mock import Mock, MagicMock, call
from cloudshell.cp.aws.domain.ami_management.operations.delete_operation import DeleteAMIOperation
class TestDeleteOperation(TestCase):
def setUp(self):
self.ec2_session = Mock()
self.tag_service = Mock()
self.security_group_service = Mock()
self.elastic_ip_service = Mock()
self.delete_operation = DeleteAMIOperation(Mock(), Mock(), self.security_group_service, self.tag_service,
self.elastic_ip_service)
self.instance = Mock()
self.instance.vpc_addresses.all = Mock(return_value=list())
self.logger = Mock()
self.delete_operation.instance_service.get_instance_by_id = Mock(return_value=self.instance)
def test_delete_operation(self):
self.instance.security_groups = MagicMock()
test_address_1 = self.instance.VpcAddress()
test_address_2 = self.instance.VpcAddress()
self.instance.vpc_addresses.all = Mock(return_value=[test_address_1, test_address_2])
self.delete_operation.elastic_ip_service.release_elastic_address = Mock()
self.delete_operation.delete_instance(self.logger, self.ec2_session, 'id')
self.delete_operation.instance_service.get_instance_by_id.called_with(self.ec2_session, 'id')
self.delete_operation.instance_service.terminate_instance.assert_called_once_with(self.instance)
self.delete_operation.elastic_ip_service.release_elastic_address.assert_called()
self.delete_operation.elastic_ip_service.release_elastic_address.assert_has_calls([
call(test_address_1), call(test_address_2)
])
def test_delete_operation_with_exclusive_security_group(self):
# arrange
sg_desc = {'GroupId': 'sg_id'}
self.instance.security_groups = [sg_desc]
sg = Mock()
self.ec2_session.SecurityGroup = Mock(return_value=sg)
self.tag_service.find_isolation_tag_value = Mock(return_value='Exclusive')
# act
self.delete_operation.delete_instance(self.logger, self.ec2_session, 'id')
# assert
self.assertTrue(self.tag_service.find_isolation_tag_value.called)
self.security_group_service.delete_security_group.assert_called_with(sg)
def test_delete_operation_instance_not_exist(self):
self.instance.security_groups = MagicMock()
error_response = {'Error': {
'Code': 'InvalidInstanceID.NotFound'
}}
self.delete_operation.instance_service.get_instance_by_id = Mock(side_effect=ClientError(error_response, 'Test'))
# act
self.delete_operation.delete_instance(self.logger, self.ec2_session, 'id')
# assert
self.logger.info.assert_called_with("Aws instance id was already terminated")
assert not self.delete_operation.instance_service.terminate_instance.called
| StarcoderdataPython |
150663 | <reponame>avi-pal/al-go-rithms<gh_stars>1000+
"""
AAn isogram is a word that has no repeating letters, consecutive or non-consecutive.
For example "something" and "brother" are isograms, where as "nothing" and "sister" are not.
Below method compares the length of the string with the length (or size) of the set of the same string.
The set of the string removes all duplicates, therefore if it is equal to the original string, then its an isogram
"""
# Function to check if string is an isogram
def check_isogram(string_to_be_evaluated):
if len(string_to_be_evaluated) == len(set(string_to_be_evaluated.lower())):
return True
return False
if __name__ == '__main__':
string_one = input("Enter string to check if it's an isogram:")
is_isogram = check_isogram(string_one)
if check_isogram:
print("The string has no repeated letters and is therefore an Isogram.")
else:
print("The string is not an Isogram.")
| StarcoderdataPython |
3457531 |
def aaa():
pass
def bbb():
pass
<caret> | StarcoderdataPython |
3340327 | import json
import unittest
from fleece import authpolicy
class AuthpolicyTests(unittest.TestCase):
"""Tests for :class: `fleece.authpolicy.AuthPolicy`."""
def setUp(self):
self.aws_account_id = "000000000000"
self.resource_base_path = ("arn:aws:execute-api:*:{}:myapi/" "mystage").format(
self.aws_account_id
)
@staticmethod
def generate_policy(effect, resources, condition=None):
policy_template = {
"principalId": "foo",
"policyDocument": {
"Version": "2012-10-17",
"Statement": [
{
"Action": "execute-api:Invoke",
"Effect": effect,
"Resource": resources,
}
],
},
"context": {},
}
if condition:
policy_template["policyDocument"]["Statement"][0]["Condition"] = condition
return policy_template
@staticmethod
def validate_policies(policy1, policy2):
assert json.dumps(policy1, sort_keys=True) == json.dumps(
policy2, sort_keys=True
)
def test_allow_all(self):
expected_policy = self.generate_policy(
"Allow", [self.resource_base_path + "/*/*"]
)
policy = authpolicy.AuthPolicy(
self.aws_account_id, principal="foo", rest_api_id="myapi", stage="mystage"
)
policy.allow_all_methods()
self.validate_policies(expected_policy, policy.build())
def test_deny_all(self):
expected_policy = self.generate_policy(
"Deny", [self.resource_base_path + "/*/*"]
)
policy = authpolicy.AuthPolicy(
self.aws_account_id, principal="foo", rest_api_id="myapi", stage="mystage"
)
policy.deny_all_methods()
self.validate_policies(expected_policy, policy.build())
def test_allow_method(self):
expected_policy = self.generate_policy(
"Allow", [self.resource_base_path + "/GET/test/path"],
)
policy = authpolicy.AuthPolicy(
self.aws_account_id, principal="foo", rest_api_id="myapi", stage="mystage"
)
policy.allow_method("GET", "/test/path")
self.validate_policies(expected_policy, policy.build())
def test_allow_method_with_conditions(self):
condition = {"DateLessThan": {"aws:CurrentTime": "foo"}}
expected_policy = self.generate_policy(
"Allow", [self.resource_base_path + "/GET/test/path"], condition=condition,
)
# NOTE(ryandub): I think there is a bug with conditions in the
# upstream source this is based on that appends an extra statement.
# Need to investigate this more and fix if necessary.
expected_policy["policyDocument"]["Statement"].append(
{"Action": "execute-api:Invoke", "Effect": "Allow", "Resource": []}
)
policy = authpolicy.AuthPolicy(
self.aws_account_id, principal="foo", rest_api_id="myapi", stage="mystage"
)
policy.allow_method_with_conditions("GET", "/test/path", condition)
self.validate_policies(expected_policy, policy.build())
def test_deny_method(self):
expected_policy = self.generate_policy(
"Deny", [self.resource_base_path + "/GET/test/path"]
)
policy = authpolicy.AuthPolicy(
self.aws_account_id, principal="foo", rest_api_id="myapi", stage="mystage"
)
policy.deny_method("GET", "/test/path")
self.validate_policies(expected_policy, policy.build())
def test_deny_method_with_conditions(self):
condition = {"DateLessThan": {"aws:CurrentTime": "foo"}}
expected_policy = self.generate_policy(
"Deny", [self.resource_base_path + "/GET/test/path"], condition=condition,
)
# NOTE(ryandub): I think there is a bug with conditions in the
# upstream source this is based on that appends an extra statement.
# Need to investigate this more and fix if necessary.
expected_policy["policyDocument"]["Statement"].append(
{"Action": "execute-api:Invoke", "Effect": "Deny", "Resource": []}
)
policy = authpolicy.AuthPolicy(
self.aws_account_id, principal="foo", rest_api_id="myapi", stage="mystage"
)
policy.deny_method_with_conditions("GET", "/test/path", condition)
self.validate_policies(expected_policy, policy.build())
| StarcoderdataPython |
168900 | import os
import json
try:
pyWrkspLoc = os.environ["PYWRKSP"]
except KeyError:
pyWrkspLoc = os.environ["HOME"] + input('Since you do not have the PYWRSKP env var '
'\nPlease enter the pwd for the pyWrskp repo not including the '
'"home" section')
class AnimalChoser:
def __init__(self, pywrskp):
self.name_1 = pywrskp + '/docs/txt-files/animal_chooser_options.txt'
self.name_2 = pywrskp + '/docs/txt-files/animal_chooser_atturbites.txt'
self.options = []
self.atturbites = []
self.load()
self.correct_atturbites = []
do = input('would you like to add or play?')
if do == 'play':
print('The current options for this game are:')
for item in self.options:
print(item['name'])
self.guess()
else:
self.add()
def guess(self):
for item in self.atturbites:
yes_or_no = input('is this animal/does it have {} (y/n)?'.format(item['name']))
item['y/n'] = yes_or_no
for item in self.atturbites:
if item['y/n'] == 'y':
self.correct_atturbites.append(item['name'])
choosen = False
for item in self.options:
item['info'] = sorted(item['info'])
self.correct_atturbites = sorted(self.correct_atturbites)
for item in self.options:
if item['info'] == self.correct_atturbites:
print('your animal is {}'.format(item['name']))
choosen = True
break
if not choosen:
print("This program can figure out what you choose, make sure it is on this list:")
for item in self.options:
print(item['name'])
'''print('debug info:')
print('self.correct_atturbites:')
print(self.correct_atturbites)
print('self.options:')
print(self.options)'''
def load(self):
try:
with open(self.name_1) as json_file:
self.options = json.load(json_file)
except FileNotFoundError:
print('This file does not exist (num1)')
exit(5)
try:
with open(self.name_2) as json_file:
self.atturbites = json.load(json_file)
except FileNotFoundError:
print('This file does not exist (num2)')
exit(5)
def add(self):
new_name = input('What is the name of this animal?')
new = {"name": new_name, "info": []}
new_attrbs = []
print('What are the atturbuites?')
while True:
attrb = input()
if attrb == '':
break
new_attrbs.append(attrb)
new["info"].append(attrb)
for item in new_attrbs:
for atra in self.atturbites:
if item == atra:
del item
for item in new_attrbs:
self.atturbites.append({'name': item, "y/n": ""})
self.options.append(new)
with open(self.name_1, 'w') as outfile:
json.dump(self.options, outfile)
with open(self.name_2, 'w') as outfile:
json.dump(self.atturbites, outfile)
game = AnimalChoser(pyWrkspLoc)
| StarcoderdataPython |
6493960 | import re
import nltk
import underthesea
class DataUtils:
def __init__(self, lang='vi'):
self.lang = lang
self.control = underthesea
pass
def nomalize_document(self, document, pattern=" +", repl=" ", reverse=False):
return re.sub(pattern, repl, document.strip())
def split_sentences(self, document, sent_split=[".", "\n", "!", "?"]):
sent_pattern = "[" + "".join(sent_split) + "]"
return list(filter(lambda x: x not in [' ', ''], re.split(sent_pattern, document)))
def tokenize(self, sent, black_list=[]):
return self.control.word_tokenize(sent)
def pos_tag(self, sent, black_list=[]):
return [(t, p) for t, p in self.control.pos_tag(sent) if p not in black_list]
def chunk_tokenize(self, sent):
return self.control.chunk(sent) | StarcoderdataPython |
3311844 | #
# Copyright (c) 2016, 2018, Oracle and/or its affiliates.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are
# permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of
# conditions and the following disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to
# endorse or promote products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
#
import re
import mx, mx_benchmark, mx_sulong, mx_buildtools
import os
import mx_subst
from os.path import join, exists
from mx_benchmark import VmRegistry, java_vm_registry, Vm, GuestVm, VmBenchmarkSuite
def _benchmarksDirectory():
return join(os.path.abspath(join(mx.suite('sulong').dir, os.pardir)), 'sulong-benchmarks')
_env_flags = []
if 'CPPFLAGS' in os.environ:
_env_flags = os.environ['CPPFLAGS'].split(' ')
class SulongBenchmarkSuite(VmBenchmarkSuite):
def group(self):
return 'Graal'
def subgroup(self):
return 'sulong'
def name(self):
return 'csuite'
def benchmarkList(self, bmSuiteArgs):
benchDir = _benchmarksDirectory()
if not exists(benchDir):
mx.abort('Benchmarks directory {} is missing'.format(benchDir))
return [f for f in os.listdir(benchDir) if os.path.isdir(join(benchDir, f)) and os.path.isfile(join(join(benchDir, f), 'Makefile'))]
def benchHigherScoreRegex(self):
return r'^(### )?(?P<benchmark>[a-zA-Z0-9\.\-_]+): +(?P<score>[0-9]+(?:\.[0-9]+)?)'
def failurePatterns(self):
return [
re.compile(r'error:'),
re.compile(r'Exception')
]
def successPatterns(self):
return [re.compile(r'^(### )?([a-zA-Z0-9\.\-_]+): +([0-9]+(?:\.[0-9]+)?)', re.MULTILINE)]
def rules(self, out, benchmarks, bmSuiteArgs):
return [
mx_benchmark.StdOutRule(self.benchHigherScoreRegex(), {
"benchmark": ("<benchmark>", str),
"metric.name": "time",
"metric.type": "numeric",
"metric.value": ("<score>", float),
"metric.score-function": "id",
"metric.better": "lower",
"metric.iteration": 0,
}),
]
def createCommandLineArgs(self, benchmarks, runArgs):
if len(benchmarks) != 1:
mx.abort("Please run a specific benchmark (mx benchmark csuite:<benchmark-name>) or all the benchmarks (mx benchmark csuite:*)")
return [benchmarks[0]] + runArgs
def get_vm_registry(self):
return native_vm_registry
class GccLikeVm(Vm):
def __init__(self, config_name, options):
self._config_name = config_name
self.options = options
def config_name(self):
return self._config_name
def c_compiler(self):
return self.compiler_name()
def cpp_compiler(self):
return self.compiler_name() + "++"
def run(self, cwd, args):
# save current Directory
self.currentDir = os.getcwd()
os.chdir(_benchmarksDirectory())
f = open(os.devnull, 'w')
benchmarkDir = args[0]
# enter benchmark dir
os.chdir(benchmarkDir)
# create directory for executable of this vm
if not os.path.exists(self.name()):
os.makedirs(self.name())
os.chdir(self.name())
if os.path.exists('bench'):
os.remove('bench')
env = os.environ.copy()
env['CFLAGS'] = ' '.join(self.options + _env_flags + ['-lm', '-lgmp'])
env['CC'] = mx_buildtools.ClangCompiler.CLANG
env['VPATH'] = '..'
cmdline = ['make', '-f', '../Makefile']
print env
print os.getcwd()
print cmdline
mx.run(cmdline, out=f, err=f, env=env)
myStdOut = mx.OutputCapture()
retCode = mx.run(['./bench'], out=myStdOut, err=f)
print myStdOut.data
# reset current Directory
os.chdir(self.currentDir)
return [retCode, myStdOut.data]
class GccVm(GccLikeVm):
def __init__(self, config_name, options):
super(GccVm, self).__init__(config_name, options)
def name(self):
return "gcc"
def compiler_name(self):
return "gcc"
class ClangVm(GccLikeVm):
def __init__(self, config_name, options):
super(ClangVm, self).__init__(config_name, options)
def name(self):
return "clang"
def compiler_name(self):
mx_sulong.ensureLLVMBinariesExist()
return mx_sulong.findLLVMProgram(mx_buildtools.ClangCompiler.CLANG)
class SulongVm(GuestVm):
def config_name(self):
return "default"
def name(self):
return "sulong"
def run(self, cwd, args):
# save current Directory
self.currentDir = os.getcwd()
os.chdir(_benchmarksDirectory())
f = open(os.devnull, 'w')
mx_sulong.ensureLLVMBinariesExist()
benchmarkDir = args[0]
# enter benchmark dir
os.chdir(benchmarkDir)
# create directory for executable of this vm
if not os.path.exists(self.name()):
os.makedirs(self.name())
os.chdir(self.name())
if os.path.exists('bench'):
os.remove('bench')
env = os.environ.copy()
env['CFLAGS'] = ' '.join(_env_flags + ['-lm', '-lgmp'])
env['LLVM_COMPILER'] = mx_buildtools.ClangCompiler.CLANG
env['CC'] = 'wllvm'
env['VPATH'] = '..'
cmdline = ['make', '-f', '../Makefile']
mx.run(cmdline, out=f, err=f, env=env)
mx.run(['extract-bc', 'bench'], out=f, err=f)
mx_sulong.opt(['-o', 'bench.bc', 'bench.bc'] + ['-mem2reg', '-globalopt', '-simplifycfg', '-constprop', '-instcombine', '-dse', '-loop-simplify', '-reassociate', '-licm', '-gvn'], out=f, err=f)
suTruffleOptions = [
'-Dgraal.TruffleBackgroundCompilation=false',
'-Dgraal.TruffleTimeThreshold=1000000',
'-Dgraal.TruffleInliningMaxCallerSize=10000',
'-Dgraal.TruffleCompilationExceptionsAreFatal=true',
mx_subst.path_substitutions.substitute('-Dpolyglot.llvm.libraryPath=<path:SULONG_LIBS>'),
'-Dpolyglot.llvm.libraries=libgmp.so.10']
sulongCmdLine = suTruffleOptions + mx_sulong.getClasspathOptions() + ['-XX:-UseJVMCIClassLoader', "com.oracle.truffle.llvm.launcher.LLVMLauncher"] + ['bench.bc']
result = self.host_vm().run(cwd, sulongCmdLine + args)
# reset current Directory
os.chdir(self.currentDir)
return result
def hosting_registry(self):
return java_vm_registry
_suite = mx.suite("sulong")
native_vm_registry = VmRegistry("Native", known_host_registries=[java_vm_registry])
native_vm_registry.add_vm(GccVm('O0', ['-O0']), _suite)
native_vm_registry.add_vm(ClangVm('O0', ['-O0']), _suite)
native_vm_registry.add_vm(GccVm('O1', ['-O1']), _suite)
native_vm_registry.add_vm(ClangVm('O1', ['-O1']), _suite)
native_vm_registry.add_vm(GccVm('O2', ['-O2']), _suite)
native_vm_registry.add_vm(ClangVm('O2', ['-O2']), _suite)
native_vm_registry.add_vm(GccVm('O3', ['-O3']), _suite)
native_vm_registry.add_vm(ClangVm('O3', ['-O3']), _suite)
native_vm_registry.add_vm(SulongVm(), _suite, 10)
| StarcoderdataPython |
3367731 | <reponame>topteulen/timely-beliefs
"""Function store for computing knowledge horizons given a certain event start.
When passed an event_start = None, these functions return bounds on the knowledge horizon,
i.e. a duration window in which the knowledge horizon must lie (e.g. between 0 and 2 days before the event start)."""
from typing import Optional, Tuple, Union
from datetime import datetime, timedelta
from timely_beliefs.sensors.utils import datetime_x_days_ago_at_y_oclock
def constant_timedelta(
event_start: Optional[datetime], knowledge_horizon: timedelta
) -> Union[timedelta, Tuple[timedelta, timedelta]]:
"""Knowledge horizon is a constant timedelta."""
if event_start is None:
return knowledge_horizon, knowledge_horizon
return knowledge_horizon
def timedelta_x_days_ago_at_y_oclock(
event_start: Optional[datetime], x: int, y: Union[int, float], z: str
) -> Union[timedelta, Tuple[timedelta, timedelta]]:
"""Knowledge horizon is with respect to a previous day at some hour."""
if event_start is None:
return (
timedelta(days=x, hours=-y - 2),
timedelta(days=x + 1, hours=-y + 2),
) # The 2's account for possible hour differences for double daylight saving time w.r.t. standard time
return event_start - datetime_x_days_ago_at_y_oclock(event_start, x, y, z)
| StarcoderdataPython |
8028927 | from django.contrib import admin
from voc.models import lesson, vocabulary, history, record, kkdata
class kkdataAdmin(admin.ModelAdmin):
list_display = ('word', 'kk')
class lessonAdmin(admin.ModelAdmin):
list_display = ('name', 'quantity')
class vocabularyAdmin(admin.ModelAdmin):
list_display = ('number', 'name', 'kk', 'pos', 'tran', 'sen', 'voca')
class historyAdmin(admin.ModelAdmin):
list_display = ('username', 'course', 'topic1', 'topic2', 'topic3', 'topic4', 'topic5', 'topic6', 'topic7', 'topic8', 'topic9', 'topic10', 'topic11', 'topic12', 'topic13', 'topic14', 'topic15', 'topic16', 'topic17', 'topic18', 'topic19', 'topic20', 'topic21', 'topic22', 'topic23', 'topic24', 'topic25', 'topic26', 'topic27', 'topic28', 'topic29', 'topic30', 'history1', 'history2', 'history3', 'history4', 'history5', 'history6', 'history7', 'history8', 'history9', 'history10', 'history11', 'history12', 'history13', 'history14', 'history15', 'history16', 'history17', 'history18', 'history19', 'history20', 'history21', 'history22', 'history23', 'history24', 'history25', 'history26', 'history27', 'history28', 'history29', 'history30')
class recordAdmin(admin.ModelAdmin):
list_display = ('username', 'result1', 'result2', 'result3', 'result4', 'result5', 'result6', 'result7', 'result8', 'result9', 'result10', 'result11', 'result12', 'result13', 'result14', 'result15', 'result16', 'result17', 'result18', 'result19', 'result20', 'result21', 'result22', 'result23', 'result24', 'result25', 'result26', 'result27', 'result28', 'result29', 'result30', 'answer1', 'answer2', 'answer3', 'answer4', 'answer5', 'answer6', 'answer7', 'answer8', 'answer9', 'answer10', 'answer11', 'answer12', 'answer13', 'answer14', 'answer15', 'answer16', 'answer17', 'answer18', 'answer19', 'answer20', 'answer21', 'answer22', 'answer23', 'answer24', 'answer25', 'answer26', 'answer27', 'answer28', 'answer29', 'answer30', 'score', 'users', 'lessons')
# search_fields = ('name')
admin.site.register(lesson,lessonAdmin)
admin.site.register(vocabulary,vocabularyAdmin)
admin.site.register(history,historyAdmin)
admin.site.register(record,recordAdmin)
admin.site.register(kkdata,kkdataAdmin)
# Register your models here.
| StarcoderdataPython |
3330162 | <filename>bot/reviewbot/tools/pyflakes.py
"""Review Bot tool to run pyflakes."""
from __future__ import unicode_literals
import re
from reviewbot.config import config
from reviewbot.tools.base import BaseTool
from reviewbot.utils.process import execute
class PyflakesTool(BaseTool):
"""Review Bot tool to run pyflakes."""
name = 'Pyflakes'
version = '1.0'
description = 'Checks Python code for errors using Pyflakes.'
timeout = 30
exe_dependencies = ['pyflakes']
file_patterns = ['*.py']
LINE_RE = re.compile(
r'^(?P<filename>[^:]+)(:(?P<linenum>\d+)(:(?P<column>\d+))?)?:? '
r'(?P<msg>.*)'
)
def build_base_command(self, **kwargs):
"""Build the base command line used to review files.
Args:
**kwargs (dict, unused):
Additional keyword arguments.
Returns:
list of unicode:
The base command line.
"""
return [config['exe_paths']['pyflakes']]
def handle_file(self, f, path, base_command, **kwargs):
"""Perform a review of a single file.
Args:
f (reviewbot.processing.review.File):
The file to process.
path (unicode):
The local path to the patched file to review.
base_command (list of unicode):
The base command used to run pyflakes.
**kwargs (dict, unused):
Additional keyword arguments.
"""
output, errors = execute(base_command + [path],
split_lines=True,
ignore_errors=True,
return_errors=True)
# pyflakes can output one of 3 things:
#
# 1. A lint warning about code, which looks like:
#
# filename:linenum:offset msg
#
# 2. An unexpected error:
#
# filename: msg
#
# 3. A syntax error, which will look like one of the following forms:
#
# 1. filename:linenum:offset: msg
# code line
# marker ("... ^")
#
# 2. filename:linenum: msg
# code line
#
# We need to handle each case. Fortunately, only #1 is sent to
# stdout, and the rest to stderr. We can easily pattern match based
# on where the lines came from.
LINE_RE = self.LINE_RE
for line in output:
m = LINE_RE.match(line)
if m:
try:
linenum = int(m.group('linenum'))
column = int(m.group('column'))
except ValueError:
# This isn't actually an info line. No idea what it is.
# Skip it.
continue
# Report on the lint message.
f.comment(m.group('msg'),
first_line=linenum,
start_column=column)
i = 0
while i < len(errors):
m = LINE_RE.match(errors[i])
if m:
linenum = m.group('linenum')
msg = m.group('msg')
if linenum is None:
# This is an unexpected error. Leave a general comment.
f.review.general_comment(
'pyflakes could not process %s: %s'
% (f.dest_file, msg))
else:
# This should be a syntax error.
try:
linenum = int(linenum)
column = int(m.group('column'))
except ValueError:
# This isn't actually an info line. This is
# unexpected, but skip it.
continue
f.comment(msg,
first_line=linenum,
start_column=column)
# Skip to the code line.
i += 1
if i + 1 < len(errors) and errors[i + 1].strip() == '^':
# This is a match offset line. Skip it.
i += 1
# Process the next error line.
i += 1
| StarcoderdataPython |
3511818 | <gh_stars>0
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.common.api import ms_function
from mindspore.common.parameter import Parameter
from mindspore.ops import operations as P
import mindspore as ms
def create_tensor(capcity, shapes, dtypes):
buffer = []
for i in range(len(shapes)):
buffer.append(Tensor(np.zeros(((capcity,)+shapes[i])), dtypes[i]))
return buffer
class RLBuffer(nn.Cell):
def __init__(self, batch_size, capcity, shapes, types):
super(RLBuffer, self).__init__()
self.buffer = create_tensor(capcity, shapes, types)
self._capacity = capcity
self.count = Parameter(Tensor(0, ms.int32), name="count")
self.head = Parameter(Tensor(0, ms.int32), name="head")
self.buffer_append = P.BufferAppend(self._capacity, shapes, types)
self.buffer_get = P.BufferGetItem(self._capacity, shapes, types)
self.buffer_sample = P.BufferSample(
self._capacity, batch_size, shapes, types)
self.randperm = P.Randperm(max_length=capcity, pad=-1)
self.reshape = P.Reshape()
@ms_function
def append(self, exps):
return self.buffer_append(self.buffer, exps, self.count, self.head)
@ms_function
def get(self, index):
return self.buffer_get(self.buffer, self.count, self.head, index)
@ms_function
def sample(self):
count = self.reshape(self.count, (1,))
index = self.randperm(count)
return self.buffer_sample(self.buffer, index, self.count, self.head)
s = Tensor(np.array([2, 2, 2, 2]), ms.float32)
a = Tensor(np.array([0, 1]), ms.int32)
r = Tensor(np.array([1]), ms.float32)
s_ = Tensor(np.array([3, 3, 3, 3]), ms.float32)
exp = [s, a, r, s_]
exp1 = [s_, a, r, s]
@ pytest.mark.level0
@ pytest.mark.platform_x86_gpu_training
@ pytest.mark.env_onecard
def test_Buffer():
context.set_context(mode=context.GRAPH_MODE, device_target='GPU')
buffer = RLBuffer(batch_size=32, capcity=100, shapes=[(4,), (2,), (1,), (4,)], types=[
ms.float32, ms.int32, ms.float32, ms.float32])
print("init buffer:\n", buffer.buffer)
for _ in range(0, 110):
buffer.append(exp)
buffer.append(exp1)
print("buffer append:\n", buffer.buffer)
b = buffer.get(-1)
print("buffer get:\n", b)
bs = buffer.sample()
print("buffer sample:\n", bs)
| StarcoderdataPython |
8089053 | <filename>database/gui.py<gh_stars>0
import numpy as np
import pandas as pd
from PyQt5 import QtWidgets, QtCore
def fill_table(df, table, max_rows=50):
# TODO: threading
# read indices of currently selected rows
selected_indexes = table.selectedIndexes()
selected_rows = []
for item in selected_indexes:
selected_rows.append(item.row())
# initialise the GUI table columns
table.clear()
# disable sorting to solve issues with repopulating
table.setSortingEnabled(False)
number_of_rows = min(max_rows, len(df.index))
table.setRowCount(number_of_rows)
table.setColumnCount(len(df.columns))
table.setHorizontalHeaderLabels(df.columns)
# fill the GUI table
for col in range(len(df.columns)):
for row in range(number_of_rows):
data = df.iloc[row, col]
item = QtWidgets.QTableWidgetItem()
if isinstance(data, (float, np.float64)):
# pad the floats so they'll be sorted correctly
formatted_data = '{:.3f}'.format(data).rjust(15)
item.setTextAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
elif isinstance(data, (int, np.int64)):
# pad the integers so they'll be sorted correctly
formatted_data = '{:d}'.format(data).rjust(15)
item.setTextAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
else:
formatted_data = str(data)
item.setData(QtCore.Qt.EditRole, formatted_data)
table.setItem(row, col, item)
table.resizeColumnToContents(col)
# enable table sorting by columns
table.setSortingEnabled(True)
# temporarily set MultiSelection
table.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection)
# reselect the prevously selected rows
# TODO: reselect by filename instead of table row number
for row in selected_rows:
table.selectRow(row)
# revert MultiSelection to ExtendedSelection
table.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
def populate_combobox(items_list, item_default, combobox_list):
# populate comboboxes
for combobox in combobox_list:
combobox.clear()
for item in items_list:
combobox.addItem(item, 0)
if item_default in items_list:
index = combobox.findText(item_default)
else:
index = 0
combobox.setCurrentIndex(index)
def populate_comboboxes(config, df, numeric_comboboxes, units_comboboxes, trace_comboboxes, comboboxes):
default_value_keys = [x[0] for x in config.items('DEFAULT VALUES')]
# TODO: also include datetime.time values
options = df.select_dtypes(include=['float64', 'int64', 'datetime64[ns]']).columns.values
for key, value in numeric_comboboxes.items():
if key in default_value_keys:
default_value = config['DEFAULT VALUES'][key]
else:
default_value = options[0]
populate_combobox(sorted(np.unique(options)),
default_value,
value)
for key, value in units_comboboxes.items():
options = [x[0] for x in config.items('%s UNIT FACTORS' % key.upper())]
populate_combobox(options,
options[0], # TODO: choose the one with value 1, in case it's not the first
[value])
for key, value in trace_comboboxes.items():
if key in default_value_keys:
default_value = config['DEFAULT VALUES'][key]
else:
default_value = None
populate_combobox([default_value],
default_value,
value)
for key, value in comboboxes.items():
options = list(filter(None, [x.strip() for x in config['GUI OPTIONS'][key].splitlines()]))
if key in default_value_keys:
default_value = config['DEFAULT VALUES'][key]
else:
default_value = options[0]
populate_combobox(options,
default_value,
value)
def populate_dates(column_date_local, df, start_date_edit, end_date_edit):
if column_date_local in df.columns:
start_date_edit.setDate(df[column_date_local].min())
end_date_edit.setDate(df[column_date_local].max())
def read_units(units_comboboxes):
"""Read units from GUI comboboxes."""
units = dict()
for key in units_comboboxes.keys():
units[key] = units_comboboxes[key].currentText()
return units
def list_selection(widget):
selected_options = []
for item in widget.selectedItems():
selected_options.append(item.text())
return selected_options
def populate_list(df, column, widget):
"""Populate the list widget with items from the dataframe column."""
items = np.sort(df[column].unique())
widget.clear()
for row, item in enumerate(items):
widget.addItem(item)
widget.item(row).setSelected(True)
def read_table(table, rows=None):
# read GUI table size
if rows is None:
rows = []
if not len(rows):
rows = range(table.rowCount())
columns = range(table.columnCount())
# read column names from the GUI table
column_names = []
for column in columns:
column_names.append(table.horizontalHeaderItem(column).text())
# initialise dataframe with certain columns
df = pd.DataFrame(columns=column_names)
# read data from GUI table
for row in rows:
for column_number, column_name in enumerate(column_names):
df.loc[row, column_name] = table.item(row, column_number).data(0)
# TODO: make this formatting more automatic
# format data types
datetime_column_names = ['start time', 'start time local', 'end time', 'end time local', 'timestamp']
for column_name in column_names:
# format dates
if column_name in datetime_column_names:
df[column_name] = pd.to_datetime(df[column_name])
elif 'position' in column_name:
df[column_name] = pd.to_numeric(df[column_name], errors='coerce')
# change strings to numbers
# elif column_name != 'file_name':
# df[column_name] = pd.to_numeric(df[column_name], errors='ignore')
return df
def read_selected_table_rows(table):
selected_rows = []
for item in table.selectedIndexes():
selected_rows.append(item.row())
selected_rows = np.unique(selected_rows)
# read the selected_rows from the table
df = read_table(table, selected_rows)
# return the dataframe
return df
def get_labels_text(labels):
text = dict()
for key, value in labels.items():
text[key] = value.text()
return text
def set_labels_text(labels, text):
for key in labels.keys():
labels[key].setText(text[key]) | StarcoderdataPython |
8114584 | import pandas as pd
import numpy as np
import PyPDF2
import textract
import re
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
#Open the PDF File using a File Object by Parsing
filename ='JavaBasics-notes.pdf'
pdfFileObj = open(filename,'rb')
pdfReader = PyPDF2.PdfFileReader(pdfFileObj)
#Reads the number of pages in the PDF
num_pages = pdfReader.numPages
#Reads through all the pages
count = 0
text = ""
while count < num_pages:
pageObj = pdfReader.getPage(count)
count +=1
text += pageObj.extractText()
if text != "":
text = text
else:
text = textract.process('http://bit.ly/epo_keyword_extraction_document', method='tesseract', language='eng')
#Separates out the keywords from the text
keywords = re.findall(r'[a-zA-Z]\w+',text)
#Create a dataframe of the keywords for easier processing using pandas package and prevents duplicates
df = pd.DataFrame(list(set(keywords)),columns=['keywords'])
#Calculate the number of occurences of a word ie the weight of word in the keywords extracted
def weight_calc(word,text,number_of_test_cases=1):
word_list = re.findall(word,text)
number_of_occurences =len(word_list)
tf = number_of_occurences/float(len(text))
idf = np.log((number_of_test_cases)/float(number_of_occurences))
tf_idf = tf*idf
return number_of_occurences,tf,idf ,tf_idf
df['number_of_occurences'] = df['keywords'].apply(lambda x: weight_calc(x,text)[0])
df['tf'] = df['keywords'].apply(lambda x: weight_calc(x,text)[1])
df['idf'] = df['keywords'].apply(lambda x: weight_calc(x,text)[2])
df['tf_idf'] = df['keywords'].apply(lambda x: weight_calc(x,text)[3])
#Sort the words in the order of weights
df = df.sort_values('tf_idf',ascending=True)
#print the dataframe
print(df.head(len(keywords)))
#Stores the data in excel file
writer = pd.ExcelWriter('keywords_extracted.xlsx', engine='xlsxwriter')
df.to_excel(writer, sheet_name='Sheet1')
writer.save() | StarcoderdataPython |
6561039 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pytorch_lightning.metrics.metric import Metric
from pytorch_lightning.metrics.classification import (
Accuracy,
Precision,
Recall,
Fbeta
)
from pytorch_lightning.metrics.regression import (
MeanSquaredError,
MeanAbsoluteError,
MeanSquaredLogError,
ExplainedVariance,
)
| StarcoderdataPython |
3432790 | <filename>awx/main/utils/handlers.py
# Copyright (c) 2017 Ansible by Red Hat
# All Rights Reserved.
# Python
import logging
import os.path
# Django
from django.conf import settings
class RSysLogHandler(logging.handlers.SysLogHandler):
append_nul = False
def _connect_unixsocket(self, address):
super(RSysLogHandler, self)._connect_unixsocket(address)
self.socket.setblocking(False)
def emit(self, msg):
if not settings.LOG_AGGREGATOR_ENABLED:
return
if not os.path.exists(settings.LOGGING['handlers']['external_logger']['address']):
return
try:
return super(RSysLogHandler, self).emit(msg)
except ConnectionRefusedError:
# rsyslogd has gone to lunch; this generally means that it's just
# been restarted (due to a configuration change)
# unfortunately, we can't log that because...rsyslogd is down (and
# would just us back ddown this code path)
pass
except BlockingIOError:
# for <some reason>, rsyslogd is no longer reading from the domain socket, and
# we're unable to write any more to it without blocking (we've seen this behavior
# from time to time when logging is totally misconfigured;
# in this scenario, it also makes more sense to just drop the messages,
# because the alternative is blocking the socket.send() in the
# Python process, which we definitely don't want to do)
pass
ColorHandler = logging.StreamHandler
if settings.COLOR_LOGS is True:
try:
from logutils.colorize import ColorizingStreamHandler
class ColorHandler(ColorizingStreamHandler):
def format(self, record):
message = logging.StreamHandler.format(self, record)
return '\n'.join([
self.colorize(line, record)
for line in message.splitlines()
])
level_map = {
logging.DEBUG: (None, 'green', True),
logging.INFO: (None, None, True),
logging.WARNING: (None, 'yellow', True),
logging.ERROR: (None, 'red', True),
logging.CRITICAL: (None, 'red', True),
}
except ImportError:
# logutils is only used for colored logs in the dev environment
pass
| StarcoderdataPython |
8191834 | <gh_stars>1-10
import numpy as np
import quinoa as qu
import matplotlib.pyplot as plt
import GPy as gpy
from scipy import linalg
#np.random.seed(31051985)
X = np.random.normal(scale = 1, size = (100,1))
Y = np.sin(X) + 0.01 * np.random.normal(size = (100,1))
#kern = qu.RBF(1, 1, 1)
ker = gpy.kern.RBF(1, 1, 1)
kern = qu.RBF(1,1,1)
m = gpy.models.GPRegression(X, Y, ker)
gp = qu.GP(X, Y, kern)
x = np.linspace(-4., 4., 501).reshape(501,1)
f, var = gp.predict(x)
#x0 = np.array([np.random.normal( size = (2,))]).reshape((2,1))
#fig = plt.figure(tight_layout = True)
#ax = fig.add_subplot(111)
#ax.plot(x, f, '-')
#ax.fill_between(x[:,0], f - 2*np.sqrt(np.diag(var)), f + 2*np.sqrt(np.diag(var)), alpha = 0.5)
#ax.plot(X[:,0], Y[:,0], 'x')
#ax.set_xlim([-4, 4])
#plt.show()
m.optimize(messages = True)
print '-' * 30
print m.kern.lengthscale[0], m.kern.variance[0], m.likelihood.gaussian_variance()[0]
print '-' * 30
m.plot()
#plt.show()
#print gp._kern._iso
gp.optimize()
#
#print gp.argmaxvar()
print gp._log_marginal_likelihood
print m._log_marginal_likelihood
f, var = gp.predict(x)
z = gp.sample(x, 5)#[:,0]
fig1 = plt.figure()
ax2 = fig1.add_subplot(111)
ax2.plot(x, f, '-')
ax2.plot(x, z, 'r--', linewidth = 1.)
ax2.fill_between(x[:,0], f - 2*np.sqrt(np.diag(var)), f + 2*np.sqrt(np.diag(var)), alpha = 0.5)
ax2.plot(X[:,0], Y[:,0], 'x')
plt.show()
#print gp.log_marginal_likelihood(np.array([m.kern.lengthscale[0], m.kern.variance[0], m.likelihood.gaussian_variance()[0]]))
| StarcoderdataPython |
9707707 | # test__sqlitedu.py
# Copyright 2019 <NAME>
# Licence: See LICENCE (BSD licence)
"""_sqlitedu _database tests"""
import unittest
import os
try:
import sqlite3
except ImportError: # Not ModuleNotFoundError for Pythons earlier than 3.6
sqlite3 = None
try:
import apsw
except ImportError: # Not ModuleNotFoundError for Pythons earlier than 3.6
apsw = None
from .. import _sqlite
from .. import _sqlitedu
from .. import filespec
from .. import recordset
from ..segmentsize import SegmentSize
from ..bytebit import Bitarray
_segment_sort_scale = SegmentSize._segment_sort_scale
class _SQLitedu(unittest.TestCase):
def setUp(self):
self.__ssb = SegmentSize.db_segment_size_bytes
class _D(_sqlitedu.Database, _sqlite.Database):
def open_database(self, **k):
super().open_database(dbe_module, **k)
self._D = _D
def tearDown(self):
self.database = None
self._D = None
SegmentSize.db_segment_size_bytes = self.__ssb
# Same tests as test__sqlite.Database___init__ with relevant additions.
# Alternative is one test method with just the additional tests.
class Database___init__(_SQLitedu):
def test_01(self):
self.assertRaisesRegex(
TypeError,
"".join(
(
"__init__\(\) takes from 2 to 5 positional arguments ",
"but 6 were given",
)
),
self._D,
*(None, None, None, None, None),
)
def test_02(self):
# Matches 'type object' before Python 3.9 but class name otherwise.
t = r"(?:type object|solentware_base\.core\.filespec\.FileSpec\(\))"
self.assertRaisesRegex(
TypeError,
"".join(
(
t,
" argument after \*\* must be a mapping, ",
"not NoneType",
)
),
self._D,
*(None,),
)
self.assertIsInstance(self._D({}), self._D)
self.assertIsInstance(self._D(filespec.FileSpec()), self._D)
def test_03(self):
self.assertRaisesRegex(
_sqlite.DatabaseError,
"".join(("Database folder name {} is not valid",)),
self._D,
*({},),
**dict(folder={}),
)
def test_04(self):
database = self._D({}, folder="a")
self.assertIsInstance(database, self._D)
self.assertEqual(os.path.basename(database.home_directory), "a")
self.assertEqual(os.path.basename(database.database_file), "a")
self.assertEqual(
os.path.basename(os.path.dirname(database.database_file)), "a"
)
self.assertEqual(database.specification, {})
self.assertEqual(database.segment_size_bytes, 4000)
self.assertEqual(database.dbenv, None)
self.assertEqual(database.table, {})
self.assertEqual(database.index, {})
self.assertEqual(database.segment_table, {})
self.assertEqual(database.ebm_control, {})
self.assertEqual(database.ebm_segment_count, {})
self.assertEqual(SegmentSize.db_segment_size_bytes, 4096)
# These tests are only difference to test__sqlite.Database___init__
self.assertEqual(database.deferred_update_points, None)
database.set_segment_size()
self.assertEqual(SegmentSize.db_segment_size_bytes, 4000)
self.assertEqual(database.deferred_update_points, frozenset({31999}))
self.assertEqual(database.first_chunk, {})
self.assertEqual(database.high_segment, {})
self.assertEqual(database.initial_high_segment, {})
self.assertEqual(database.existence_bit_maps, {})
self.assertEqual(database.value_segments, {})
def test_05(self):
database = self._D({})
self.assertEqual(database.home_directory, None)
self.assertEqual(database.database_file, None)
# This combination of folder and segment_size_bytes arguments is used for
# unittests, except for one to see a non-memory database with a realistic
# segment size.
def test_06(self):
database = self._D({}, segment_size_bytes=None)
self.assertEqual(database.segment_size_bytes, None)
database.set_segment_size()
self.assertEqual(SegmentSize.db_segment_size_bytes, 16)
self.assertEqual(database.deferred_update_points, frozenset({127}))
# Memory databases are used for these tests.
class Database_open_database(_SQLitedu):
def test_01(self):
self.database = self._D({})
repr_open_database = "".join(
(
"<bound method _SQLitedu.setUp.<locals>._D.open_database of ",
"<__main__._SQLitedu.setUp.<locals>._D object at ",
)
)
self.assertEqual(
repr(self.database.open_database).startswith(repr_open_database),
True,
)
def test_02(self):
self.database = self._D({}, segment_size_bytes=None)
self.database.open_database()
self.assertEqual(self.database.dbenv.__class__.__name__, "Connection")
self.database.close_database()
self.assertEqual(self.database.dbenv, None)
# Memory databases are used for these tests.
class _SQLiteOpen(_SQLitedu):
def setUp(self):
super().setUp()
self.database = self._D(
filespec.FileSpec(**{"file1": {"field1"}}), segment_size_bytes=None
)
self.database.open_database()
def tearDown(self):
self.database.close_database()
super().tearDown()
class Database_methods(_SQLiteOpen):
def test_01(self):
self.assertRaisesRegex(
TypeError,
"".join(
(
"database_cursor\(\) takes from 3 to 4 ",
"positional arguments but 5 were given",
)
),
self.database.database_cursor,
*(None, None, None, None),
)
self.assertRaisesRegex(
TypeError,
"".join(
(
"unset_defer_update\(\) takes 1 ",
"positional argument but 2 were given",
)
),
self.database.unset_defer_update,
*(None,),
)
self.assertRaisesRegex(
TypeError,
"".join(
(
"write_existence_bit_map\(\) missing 2 required ",
"positional arguments: 'file' and 'segment'",
)
),
self.database.write_existence_bit_map,
)
self.assertRaisesRegex(
TypeError,
"".join(
(
"new_deferred_root\(\) missing 2 required ",
"positional arguments: 'file' and 'field'",
)
),
self.database.new_deferred_root,
)
self.assertRaisesRegex(
TypeError,
"".join(
(
"set_defer_update\(\) takes 1 ",
"positional argument but 2 were given",
)
),
self.database.set_defer_update,
*(None,),
)
def test_02_database_cursor(self):
self.assertRaisesRegex(
_sqlitedu.DatabaseError,
"database_cursor not implemented",
self.database.database_cursor,
*(None, None),
)
def test_03_unset_defer_update(self):
self.database.start_transaction()
self.database.unset_defer_update()
def test_04_write_existence_bit_map(self):
segment = 0
b = b"\x7f\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
bs = recordset.RecordsetSegmentBitarray(segment, None, b)
self.database.existence_bit_maps["file1"] = {}
self.database.existence_bit_maps["file1"][segment] = bs
self.database.write_existence_bit_map("file1", segment)
def test_05_new_deferred_root(self):
self.assertEqual(self.database.table["file1_field1"], ["file1_field1"])
self.assertEqual(
self.database.index["file1_field1"], ["ixfile1_field1"]
)
self.database.new_deferred_root("file1", "field1")
self.assertEqual(
self.database.table["file1_field1"],
["file1_field1", "t_0_file1_field1"],
)
self.assertEqual(
self.database.index["file1_field1"],
["ixfile1_field1", "ixt_0_file1_field1"],
)
def test_06_set_defer_update_01(self):
self.database.set_defer_update()
self.assertEqual(self.database.initial_high_segment["file1"], None)
self.assertEqual(self.database.high_segment["file1"], None)
self.assertEqual(self.database.first_chunk["file1"], None)
def test_07_set_defer_update_02(self):
cursor = self.database.dbenv.cursor()
try:
cursor.execute(
"insert into file1 ( Value ) values ( ? )", ("Any value",)
)
finally:
cursor.close()
# In apsw, at Python3.6 when creating these tests, the insert does not
# start a transaction but in sqlite3, at Python3.7 when creating these
# tests, it does.
# The insert is there only to drive set_defer_update() through the
# intended path. In normal use the table will already be occupied or
# not and taken as found.
try:
self.database.commit()
except Exception as exc:
if exc.__class__.__name__ != "SQLError":
raise
self.database.set_defer_update()
self.assertEqual(self.database.initial_high_segment["file1"], 0)
self.assertEqual(self.database.high_segment["file1"], 0)
self.assertEqual(self.database.first_chunk["file1"], True)
def test_08_set_defer_update_03(self):
# Simulate normal use: the insert is not part of the deferred update.
self.database.start_transaction()
cursor = self.database.dbenv.cursor()
try:
cursor.execute(
"insert into file1 ( Value ) values ( ? )", ("Any value",)
)
finally:
cursor.close()
self.database.commit()
self.database.set_defer_update()
self.assertEqual(self.database.initial_high_segment["file1"], 0)
self.assertEqual(self.database.high_segment["file1"], 0)
self.assertEqual(self.database.first_chunk["file1"], True)
def test_09_get_ebm_segment(self):
self.assertEqual(
self.database.get_ebm_segment(
self.database.ebm_control["file1"], 1
),
None,
)
class Database__rows(_SQLitedu):
def test_01(self):
database = self._D({}, segment_size_bytes=None)
self.assertRaisesRegex(
TypeError,
"".join(
(
"_rows\(\) missing 2 required ",
"positional arguments: 'segvalues' and 'segment'",
)
),
database._rows,
)
def test_02(self):
database = self._D({}, segment_size_bytes=None)
values = {"kv3": (2, b"dd"), "kv1": (56, b"lots"), "kv2": (1, b"l")}
self.assertEqual(
[r for r in database._rows(values, 5)],
[
("kv1", 5, 56, b"lots"),
("kv2", 5, 1, b"l"),
("kv3", 5, 2, b"dd"),
],
)
class Database_do_final_segment_deferred_updates(_SQLiteOpen):
def test_01(self):
database = self._D({}, segment_size_bytes=None)
self.assertRaisesRegex(
TypeError,
"".join(
(
"do_final_segment_deferred_updates\(\) takes 1 ",
"positional argument but 2 were given",
)
),
database.do_final_segment_deferred_updates,
*(None,),
)
def test_02(self):
self.assertEqual(len(self.database.existence_bit_maps), 0)
self.assertIn(
"field1", self.database.specification["file1"]["secondary"]
)
self.database.do_final_segment_deferred_updates()
def test_03(self):
self.database.existence_bit_maps["file1"] = None
self.assertEqual(len(self.database.existence_bit_maps), 1)
self.assertIn(
"field1", self.database.specification["file1"]["secondary"]
)
self.database.do_final_segment_deferred_updates()
def test_04(self):
cursor = self.database.dbenv.cursor()
try:
cursor.execute(
"insert into file1 ( Value ) values ( ? )", ("Any value",)
)
finally:
cursor.close()
self.database.existence_bit_maps["file1"] = None
self.assertEqual(len(self.database.existence_bit_maps), 1)
self.assertIn(
"field1", self.database.specification["file1"]["secondary"]
)
self.assertRaisesRegex(
TypeError,
"'NoneType' object is not subscriptable",
self.database.do_final_segment_deferred_updates,
)
def test_05(self):
cursor = self.database.dbenv.cursor()
try:
cursor.execute(
"insert into file1 ( Value ) values ( ? )", ("Any value",)
)
finally:
cursor.close()
self.database.existence_bit_maps["file1"] = {}
ba = Bitarray()
ba.frombytes(
b"\30" + b"\x00" * (SegmentSize.db_segment_size_bytes - 1)
)
self.database.existence_bit_maps["file1"][0] = ba
self.assertEqual(len(self.database.existence_bit_maps), 1)
self.assertIn(
"field1", self.database.specification["file1"]["secondary"]
)
# The segment has one record, not the high record, in segment but no
# index references. See test_06 for opposite.
self.database.value_segments["file1"] = {}
self.database.do_final_segment_deferred_updates()
def test_06(self):
cursor = self.database.dbenv.cursor()
try:
for i in range(127):
cursor.execute(
"insert into file1 ( Value ) values ( ? )", ("Any value",)
)
finally:
cursor.close()
self.database.existence_bit_maps["file1"] = {}
ba = Bitarray()
ba.frombytes(
b"\3f" + b"\xff" * (SegmentSize.db_segment_size_bytes - 1)
)
self.database.existence_bit_maps["file1"][0] = ba
self.assertEqual(len(self.database.existence_bit_maps), 1)
self.assertIn(
"field1", self.database.specification["file1"]["secondary"]
)
# The segment has high record, and in this case others, in segment but
# no index references. See test_05 for opposite.
self.assertEqual(self.database.deferred_update_points, {i + 1})
self.database.do_final_segment_deferred_updates()
class Database_sort_and_write(_SQLiteOpen):
def test_01(self):
database = self._D({}, segment_size_bytes=None)
self.assertRaisesRegex(
TypeError,
"".join(
(
"sort_and_write\(\) missing 3 required ",
"positional arguments: 'file', 'field', and 'segment'",
)
),
database.sort_and_write,
)
def test_02(self):
self.assertRaisesRegex(
KeyError,
"'file1'",
self.database.sort_and_write,
*("file1", "nofield", None),
)
def test_03(self):
self.database.value_segments["file1"] = {}
self.database.sort_and_write("file1", "nofield", None)
self.database.sort_and_write("file1", "field1", None)
def test_04(self):
self.database.value_segments["file1"] = {"field1": None}
self.assertRaisesRegex(
TypeError,
"'NoneType' object is not iterable",
self.database.sort_and_write,
*("file1", "field1", None),
)
def test_05(self):
self.database.value_segments["file1"] = {"field1": {}}
self.assertRaisesRegex(
KeyError,
"'file1'",
self.database.sort_and_write,
*("file1", "field1", None),
)
def test_06(self):
self.database.value_segments["file1"] = {"field1": {}}
self.database.first_chunk["file1"] = True
self.database.initial_high_segment["file1"] = 4
self.assertRaisesRegex(
KeyError,
"'file1'",
self.database.sort_and_write,
*("file1", "field1", 4),
)
def test_07(self):
self.database.value_segments["file1"] = {"field1": {}}
self.database.first_chunk["file1"] = True
self.database.initial_high_segment["file1"] = 4
self.database.high_segment["file1"] = 3
self.database.sort_and_write("file1", "field1", 4)
self.assertEqual(self.database.table["file1_field1"], ["file1_field1"])
def test_08(self):
self.database.value_segments["file1"] = {"field1": {}}
self.database.first_chunk["file1"] = True
self.database.initial_high_segment["file1"] = 4
self.database.high_segment["file1"] = 3
self.database.sort_and_write("file1", "field1", 5)
self.assertEqual(
self.database.table["file1_field1"],
["file1_field1", "t_0_file1_field1"],
)
def test_09(self):
self.database.value_segments["file1"] = {"field1": {}}
self.database.first_chunk["file1"] = False
self.database.initial_high_segment["file1"] = 4
self.database.high_segment["file1"] = 3
self.database.sort_and_write("file1", "field1", 5)
self.assertEqual(self.database.table["file1_field1"], ["file1_field1"])
def test_10(self):
self.database.value_segments["file1"] = {"field1": {"int": 1}}
self.database.first_chunk["file1"] = False
self.database.initial_high_segment["file1"] = 4
self.database.high_segment["file1"] = 3
self.database.sort_and_write("file1", "field1", 5)
self.assertEqual(self.database.table["file1_field1"], ["file1_field1"])
cursor = self.database.dbenv.cursor()
self.assertEqual(
cursor.execute("select * from file1_field1").fetchall(),
[("int", 5, 1, 1)],
)
self.assertEqual(
cursor.execute("select * from file1__segment").fetchall(), []
)
def test_11(self):
self.database.value_segments["file1"] = {"field1": {"list": [1, 4]}}
self.database.first_chunk["file1"] = False
self.database.initial_high_segment["file1"] = 4
self.database.high_segment["file1"] = 3
self.database._int_to_bytes = [
n.to_bytes(2, byteorder="big")
for n in range(SegmentSize.db_segment_size)
]
self.database.sort_and_write("file1", "field1", 5)
self.assertEqual(self.database.table["file1_field1"], ["file1_field1"])
cursor = self.database.dbenv.cursor()
self.assertEqual(
cursor.execute("select * from file1_field1").fetchall(),
[("list", 5, 2, 1)],
)
self.assertEqual(
cursor.execute("select * from file1__segment").fetchall(),
[(b"\x00\x01\x00\x04",)],
)
def test_12(self):
ba = Bitarray()
ba.frombytes(b"\x0a" * 16)
self.database.value_segments["file1"] = {"field1": {"bits": ba}}
self.database.first_chunk["file1"] = False
self.database.initial_high_segment["file1"] = 4
self.database.high_segment["file1"] = 3
self.database.sort_and_write("file1", "field1", 5)
self.assertEqual(self.database.table["file1_field1"], ["file1_field1"])
cursor = self.database.dbenv.cursor()
self.assertEqual(
cursor.execute("select * from file1_field1").fetchall(),
[("bits", 5, 32, 1)],
)
self.assertEqual(
cursor.execute("select * from file1__segment").fetchall(),
[(b"\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n",)],
)
def test_13(self):
ba = Bitarray()
ba.frombytes(b"\x0a" * 16)
self.database.value_segments["file1"] = {
"field1": {"bits": ba, "list": [1, 2], "int": 9}
}
self.database.first_chunk["file1"] = False
self.database.initial_high_segment["file1"] = 4
self.database.high_segment["file1"] = 3
self.database._int_to_bytes = [
n.to_bytes(2, byteorder="big")
for n in range(SegmentSize.db_segment_size)
]
self.database.sort_and_write("file1", "field1", 5)
self.assertEqual(self.database.table["file1_field1"], ["file1_field1"])
cursor = self.database.dbenv.cursor()
self.assertEqual(
cursor.execute("select count ( * ) from file1_field1").fetchall(),
[(3,)],
)
self.assertEqual(
cursor.execute(
"select count ( * ) from file1__segment"
).fetchall(),
[(2,)],
)
def test_14(self):
cursor = self.database.dbenv.cursor()
cursor.execute(
" ".join(
(
"insert into file1_field1 ( field1 , Segment , RecordCount",
", file1 )",
"values ( ? , ? , ? , ? )",
)
),
("int", 5, 1, 1),
)
cursor.execute(
" ".join(
(
"insert into file1_field1 ( field1 , Segment , RecordCount",
", file1 )",
"values ( ? , ? , ? , ? )",
)
),
("list", 5, 2, 2),
)
cursor.execute(
" ".join(
(
"insert into file1__segment ( rowid , RecordNumbers )",
"values ( ? , ? )",
)
),
(2, b"\x00\x01\x00\x04"),
)
cursor.execute(
" ".join(
(
"insert into file1_field1 ( field1 , Segment , RecordCount"
", file1 )",
"values ( ? , ? , ? , ? )",
)
),
("bits", 5, 1, 2),
)
ba = Bitarray()
ba.frombytes(b"\x0a" * 16)
self.database.value_segments["file1"] = {
"field1": {"bits": ba, "list": [1, 2], "int": 9}
}
self.database.first_chunk["file1"] = False
self.database.initial_high_segment["file1"] = 4
self.database.high_segment["file1"] = 3
self.database._int_to_bytes = [
n.to_bytes(2, byteorder="big")
for n in range(SegmentSize.db_segment_size)
]
self.database.sort_and_write("file1", "field1", 5)
self.assertEqual(self.database.table["file1_field1"], ["file1_field1"])
self.assertEqual(
cursor.execute("select count ( * ) from file1_field1").fetchall(),
[(3,)],
)
self.assertEqual(
cursor.execute(
"select count ( * ) from file1__segment"
).fetchall(),
[(3,)],
)
class Database_merge(_SQLiteOpen):
def setUp(self):
super().setUp()
if SegmentSize._segment_sort_scale != _segment_sort_scale:
SegmentSize._segment_sort_scale = _segment_sort_scale
def test_01(self):
database = self._D({}, segment_size_bytes=None)
self.assertRaisesRegex(
TypeError,
"".join(
(
"merge\(\) missing 2 required ",
"positional arguments: 'file' and 'field'",
)
),
database.merge,
)
def test_02(self):
self.assertEqual(SegmentSize._segment_sort_scale, _segment_sort_scale)
self.assertEqual(self.database.table["file1_field1"], ["file1_field1"])
self.database.merge("file1", "field1")
if hasattr(self.database, "_path_marker"):
self.assertEqual(self.database._path_marker, {"p1"})
def test_03(self):
self.database.table["file1_field1"].append("t_0_file1_field1")
self.assertRaisesRegex(
Exception,
"(SQLError: )?no such table: t_0_file1_field1",
self.database.merge,
*("file1", "field1"),
)
def test_04(self):
self.assertEqual(SegmentSize._segment_sort_scale, _segment_sort_scale)
self.database.new_deferred_root("file1", "field1")
self.assertEqual(
self.database.table["file1_field1"],
["file1_field1", "t_0_file1_field1"],
)
self.database.merge("file1", "field1")
if hasattr(self.database, "_path_marker"):
self.assertEqual(
self.database._path_marker,
{
"p7",
"p9",
"p4",
"p5",
"p2",
"p21",
"p19",
"p11",
"p3",
"p8",
"p6",
},
)
# The combinations of _segment_sort_scale settings and 'insert into ...'
# statements in tests 5, 6, and 7 force merge() method through all paths
# where deferred updates have to be done.
# The remaining 'do-nothing' paths are traversed by tests 1, 2, 3, and 4.
def test_05(self):
self.assertEqual(SegmentSize._segment_sort_scale, _segment_sort_scale)
self.database.new_deferred_root("file1", "field1")
self.assertEqual(
self.database.table["file1_field1"],
["file1_field1", "t_0_file1_field1"],
)
cursor = self.database.dbenv.cursor()
cursor.execute(
" ".join(
(
"insert into t_0_file1_field1 ( field1 , Segment ,"
"RecordCount , file1 )",
"values ( ? , ? , ? , ? )",
)
),
("list", 5, 2, 2),
)
cursor.execute(
" ".join(
(
"insert into file1__segment ( rowid , RecordNumbers )",
"values ( ? , ? )",
)
),
(2, b"\x00\x01\x00\x04"),
)
self.database.merge("file1", "field1")
if hasattr(self.database, "_path_marker"):
self.assertEqual(
self.database._path_marker,
{
"p7",
"p9",
"p4",
"p5",
"p2",
"p21",
"p19",
"p11",
"p3",
"p8",
"p6",
"p13",
"p12",
"p14",
"p10",
},
)
def test_06(self):
SegmentSize._segment_sort_scale = 1
self.assertEqual(SegmentSize._segment_sort_scale, 1)
self.database.new_deferred_root("file1", "field1")
self.database.new_deferred_root("file1", "field1")
self.assertEqual(
self.database.table["file1_field1"],
["file1_field1", "t_0_file1_field1", "t_1_file1_field1"],
)
cursor = self.database.dbenv.cursor()
cursor.execute(
" ".join(
(
"insert into t_0_file1_field1 ( field1 , Segment ,"
"RecordCount , file1 )",
"values ( ? , ? , ? , ? )",
)
),
("list", 5, 2, 2),
)
cursor.execute(
" ".join(
(
"insert into file1__segment ( rowid , RecordNumbers )",
"values ( ? , ? )",
)
),
(2, b"\x00\x01\x00\x04"),
)
self.database.merge("file1", "field1")
if hasattr(self.database, "_path_marker"):
self.assertEqual(
self.database._path_marker,
{"p9", "p4", "p5", "p2", "p21", "p19", "p11", "p3", "p20"},
)
def test_07(self):
SegmentSize._segment_sort_scale = 2
self.assertEqual(SegmentSize._segment_sort_scale, 2)
self.merge_07_08()
if hasattr(self.database, "_path_marker"):
self.assertEqual(
self.database._path_marker,
{
"p9",
"p4",
"p5",
"p2",
"p21",
"p17",
"p19",
"p11",
"p3",
"p6",
"p13",
"p12",
"p15",
"p18",
"p10",
"p16",
},
)
# Verify test_07 is passed with the default SegmentSize.segment_sort_scale.
def test_08(self):
self.assertEqual(SegmentSize._segment_sort_scale, _segment_sort_scale)
self.merge_07_08()
if hasattr(self.database, "_path_marker"):
self.assertEqual(
self.database._path_marker,
{
"p9",
"p4",
"p5",
"p2",
"p21",
"p7",
"p19",
"p11",
"p3",
"p6",
"p13",
"p12",
"p14",
"p8",
"p10",
},
)
def merge_07_08(self):
self.database.new_deferred_root("file1", "field1")
self.database.new_deferred_root("file1", "field1")
self.assertEqual(
self.database.table["file1_field1"],
["file1_field1", "t_0_file1_field1", "t_1_file1_field1"],
)
cursor = self.database.dbenv.cursor()
cursor.execute(
" ".join(
(
"insert into t_0_file1_field1 ( field1 , Segment ,"
"RecordCount , file1 )",
"values ( ? , ? , ? , ? )",
)
),
("list", 5, 2, 2),
)
cursor.execute(
" ".join(
(
"insert into file1__segment ( rowid , RecordNumbers )",
"values ( ? , ? )",
)
),
(2, b"\x00\x01\x00\x04"),
)
cursor.execute(
" ".join(
(
"insert into t_1_file1_field1 ( field1 , Segment ,"
"RecordCount , file1 )",
"values ( ? , ? , ? , ? )",
)
),
("list1", 5, 2, 3),
)
cursor.execute(
" ".join(
(
"insert into file1__segment ( rowid , RecordNumbers )",
"values ( ? , ? )",
)
),
(3, b"\x00\x01\x00\x04"),
)
cursor.execute(
" ".join(
(
"insert into t_0_file1_field1 ( field1 , Segment ,"
"RecordCount , file1 )",
"values ( ? , ? , ? , ? )",
)
),
("list1", 6, 2, 4),
)
cursor.execute(
" ".join(
(
"insert into file1__segment ( rowid , RecordNumbers )",
"values ( ? , ? )",
)
),
(4, b"\x00\x01\x00\x04"),
)
self.database.merge("file1", "field1")
if __name__ == "__main__":
runner = unittest.TextTestRunner
loader = unittest.defaultTestLoader.loadTestsFromTestCase
for dbe_module in sqlite3, apsw:
if dbe_module is None:
continue
runner().run(loader(Database___init__))
runner().run(loader(Database_open_database))
runner().run(loader(Database_methods))
runner().run(loader(Database__rows))
runner().run(loader(Database_do_final_segment_deferred_updates))
runner().run(loader(Database_sort_and_write))
runner().run(loader(Database_merge))
| StarcoderdataPython |
5069352 | # Copyright 2020 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains
# certain rights in this software.
from .parameter import AnnotatedValue, ParamType
from jaqalpaq import JaqalError
class Constant(AnnotatedValue):
"""
Bases: :class:`AnnotatedValue`
Represents a Jaqal let statement.
:param str name: The name to bind the constant to.
:param value: The numeric value to bind to that name; can be either a literal value or another Constant.
:type value: Constant, int, or float
"""
def __init__(self, name, value):
if isinstance(value, Constant):
super().__init__(name, value.kind)
elif isinstance(value, float):
super().__init__(name, ParamType.FLOAT)
elif isinstance(value, int):
super().__init__(name, ParamType.INT)
else:
raise JaqalError(f"Invalid/non-numeric value {value} for constant {name}!")
self._value = value
def __repr__(self):
return f"Constant({repr(self.name)}, {self.value})"
def __eq__(self, other):
try:
return self.name == other.name and self.value == other.value
except AttributeError:
return False
@property
def value(self):
"""
The fixed value of the constant.
"""
return self._value
def __int__(self):
"""Resolve this value to an integer. Raise an error if this is not an
integer, rather than rounding."""
if isinstance(self._value, int):
return self._value
else:
raise JaqalError(f"Could not convert {type(self._value)} to int")
def __float__(self):
"""Resolve this value converted to a float."""
return float(self._value)
def resolve_value(self, context=None):
"""
Overrides: :meth:`AnnotatedValue.resolve_value`
Unlike the superclass, ignores the context and simply returns the fixed value of
the constant.
"""
return self.value
| StarcoderdataPython |
1621148 | """
pygame-menu
https://github.com/ppizarror/pygame-menu
EXAMPLE - CALCULATOR
Simple calculator app.
"""
__all__ = ['main']
import pygame
import pygame_menu
from pygame_menu.examples import create_example_window
from typing import Union, List
class CalculatorApp(object):
"""
Simple calculator app.
"""
op: str # Operation
prev: str # Prev value
curr: str # Current value
menu: 'pygame_menu.Menu'
screen: 'pygame_menu.widgets.Label'
surface: 'pygame.Surface'
# noinspection PyArgumentEqualDefault
def __init__(self) -> None:
self.surface = create_example_window('Example - Calculator', (320, 480))
# Configure theme
theme = pygame_menu.Theme()
theme.background_color = (43, 43, 43)
theme.title_background_color = (43, 43, 43)
theme.title_bar_style = pygame_menu.widgets.MENUBAR_STYLE_SIMPLE
theme.title_close_button_cursor = pygame_menu.locals.CURSOR_HAND
theme.title_font_size = 35
theme.widget_alignment = pygame_menu.locals.ALIGN_LEFT
theme.widget_background_color = None
theme.widget_font = pygame_menu.font.FONT_DIGITAL
theme.widget_font_color = (255, 255, 255)
theme.widget_font_size = 40
theme.widget_padding = 0
theme.widget_selection_effect = \
pygame_menu.widgets.HighlightSelection(1, 0, 0).set_color((120, 120, 120))
self.menu = pygame_menu.Menu('', 320, 480,
center_content=False,
mouse_motion_selection=True,
onclose=pygame_menu.events.EXIT,
overflow=False,
theme=theme,
)
menu_deco = self.menu.get_scrollarea().get_decorator()
# Add the layout
self.menu.add.vertical_margin(40)
menu_deco.add_rectangle(10, 88, 300, 55, (60, 63, 65), use_center_positioning=False)
self.screen = self.menu.add.label('0', background_color=None, margin=(10, 0),
selectable=True, selection_effect=None)
self.menu.add.vertical_margin(20)
cursor = pygame_menu.locals.CURSOR_HAND
# Add horizontal frames
f1 = self.menu.add.frame_h(299, 54, margin=(10, 0))
b1 = f1.pack(self.menu.add.button('1', lambda: self._press(1), cursor=cursor))
b2 = f1.pack(self.menu.add.button('2', lambda: self._press(2), cursor=cursor),
align=pygame_menu.locals.ALIGN_CENTER)
b3 = f1.pack(self.menu.add.button('3', lambda: self._press(3), cursor=cursor),
align=pygame_menu.locals.ALIGN_RIGHT)
self.menu.add.vertical_margin(10)
f2 = self.menu.add.frame_h(299, 54, margin=(10, 0))
b4 = f2.pack(self.menu.add.button('4', lambda: self._press(4), cursor=cursor))
b5 = f2.pack(self.menu.add.button('5', lambda: self._press(5), cursor=cursor),
align=pygame_menu.locals.ALIGN_CENTER)
b6 = f2.pack(self.menu.add.button('6', lambda: self._press(6), cursor=cursor),
align=pygame_menu.locals.ALIGN_RIGHT)
self.menu.add.vertical_margin(10)
f3 = self.menu.add.frame_h(299, 54, margin=(10, 0))
b7 = f3.pack(self.menu.add.button('7', lambda: self._press(7), cursor=cursor))
b8 = f3.pack(self.menu.add.button('8', lambda: self._press(8), cursor=cursor),
align=pygame_menu.locals.ALIGN_CENTER)
b9 = f3.pack(self.menu.add.button('9', lambda: self._press(9), cursor=cursor),
align=pygame_menu.locals.ALIGN_RIGHT)
self.menu.add.vertical_margin(10)
f4 = self.menu.add.frame_h(299, 54, margin=(10, 0))
b0 = f4.pack(self.menu.add.button('0', lambda: self._press(0), cursor=cursor))
b_plus = f4.pack(self.menu.add.button('+', lambda: self._press('+'), cursor=cursor),
align=pygame_menu.locals.ALIGN_CENTER)
b_minus = f4.pack(self.menu.add.button('-', lambda: self._press('-'), cursor=cursor),
align=pygame_menu.locals.ALIGN_RIGHT)
self.menu.add.vertical_margin(10)
f5 = self.menu.add.frame_h(299, 54, margin=(10, 0))
b_times = f5.pack(self.menu.add.button('x', lambda: self._press('x'), cursor=cursor))
b_div = f5.pack(self.menu.add.button('/', lambda: self._press('/'), cursor=cursor),
align=pygame_menu.locals.ALIGN_CENTER)
beq = f5.pack(self.menu.add.button('=', lambda: self._press('='), cursor=cursor),
align=pygame_menu.locals.ALIGN_RIGHT)
# Add decorator for each object
for widget in (b1, b2, b3, b4, b5, b6, b7, b8, b9, b0, beq, b_plus,
b_minus, b_times, b_div):
w_deco = widget.get_decorator()
if widget != beq:
w_deco.add_rectangle(-37, -27, 74, 54, (15, 15, 15))
on_layer = w_deco.add_rectangle(-37, -27, 74, 54, (84, 84, 84))
else:
w_deco.add_rectangle(-37, -27, 74, 54, (38, 96, 103))
on_layer = w_deco.add_rectangle(-37, -27, 74, 54, (40, 171, 187))
w_deco.disable(on_layer)
widget.set_attribute('on_layer', on_layer)
def widget_select(sel: bool, wid: 'pygame_menu.widgets.Widget', _):
"""
Function triggered if widget is selected
"""
lay = wid.get_attribute('on_layer')
if sel:
wid.get_decorator().enable(lay)
else:
wid.get_decorator().disable(lay)
widget.set_onselect(widget_select)
widget.set_padding((2, 19, 0, 23))
widget._keyboard_enabled = False
self.prev = ''
self.curr = ''
self.op = ''
self.menu.set_onupdate(self.process_events)
self.menu.set_onwindowmouseleave(lambda m: self.screen.select(update_menu=True))
def process_events(self, events: List['pygame.event.Event'], _=None) -> None:
"""
Process events from user.
"""
for event in events:
if event.type == pygame.KEYDOWN:
# noinspection PyUnresolvedReferences
if event.key == pygame.K_0:
self._press(0)
elif event.key == pygame.K_1:
self._press(1)
elif event.key == pygame.K_2:
self._press(2)
elif event.key == pygame.K_3:
self._press(3)
elif event.key == pygame.K_4:
self._press(4)
elif event.key == pygame.K_5:
self._press(5)
elif event.key == pygame.K_6:
self._press(6)
elif event.key == pygame.K_7:
self._press(7)
elif event.key == pygame.K_8:
self._press(8)
elif event.key == pygame.K_9:
self._press(9)
elif event.key == pygame.K_PLUS:
self._press('+')
elif event.key == pygame.K_MINUS:
self._press('-')
elif event.key == pygame.K_SLASH or \
(hasattr(pygame, 'K_PERCENT') and event.key == pygame.K_PERCENT):
self._press('/')
elif event.key == pygame.K_ASTERISK or event.key == pygame.K_x:
self._press('x')
elif event.key == pygame.K_EQUALS or event.key == pygame.K_RETURN:
self._press('=')
elif event.key == pygame.K_BACKSPACE:
self._press('=')
self._press('=')
def _operate(self) -> Union[int, float]:
"""
Operate current and previous values.
:return: Operation result
"""
a = 0 if self.curr == '' else float(self.curr)
b = 0 if self.prev == '' else float(self.prev)
c = 0
if self.op == '+':
c = a + b
elif self.op == '-':
c = b - a
elif self.op == 'x':
c = a * b
elif self.op == '/':
if a != 0:
c = b / a
else:
self.screen.set_title('Error')
return int(c)
def _press(self, digit: Union[int, str]) -> None:
"""
Press calculator digit.
:param digit: Number or symbol
:return: None
"""
if digit in ('+', '-', 'x', '/'):
if self.curr != '':
if self.op != '':
self.prev = str(self._operate())
else:
self.prev = self.curr
self.curr = ''
self.op = digit
if len(self.prev) <= 8:
self.screen.set_title(self.prev + self.op)
else:
self.screen.set_title('Ans' + self.op)
elif digit == '=':
if self.prev == '':
self.curr = ''
self.screen.set_title('0')
return
c = self._operate()
self.screen.set_title(str(c))
if len(str(c)) > 8:
c = 0
self.screen.set_title('Overflow')
self.prev = ''
self.curr = str(c)
self.op = ''
else:
if self.op == '':
if len(self.prev) <= 7:
self.prev += str(digit)
self.prev = self._format(self.prev)
self.screen.set_title(self.prev)
else:
if len(self.curr) <= 7:
self.curr += str(digit)
self.curr = self._format(self.curr)
self.screen.set_title(self.curr)
@staticmethod
def _format(x: str) -> str:
"""
Format number.
:param x: Number
:return: Str
"""
try:
if int(x) == float(x):
return str(int(x))
except ValueError:
pass
try:
x = float(x)
except ValueError:
pass
return str(round(int(x), 0))
def mainloop(self, test: bool) -> None:
"""
App mainloop.
:param test: Test status
"""
self.menu.mainloop(self.surface, disable_loop=test)
def main(test: bool = False) -> 'CalculatorApp':
"""
Main function.
:param test: Indicate function is being tested
:return: App object
"""
app = CalculatorApp()
app.mainloop(test)
return app
if __name__ == '__main__':
main()
| StarcoderdataPython |
4882030 | <reponame>imranslabninjas/python-cook-book-Imranslab-Edition
import heapq
nums = [1, 8, 2, 23, 7, -4, 18, 23, 42, 37, 2, 89]
find_lar = heapq.nlargest(1, nums) # how many
print(find_lar)
# smallest numbers at lest 5
find_sml = heapq.nsmallest(5, nums)
print(find_sml)
| StarcoderdataPython |
3403436 | import torch
import numpy as np
from torch.utils.data import TensorDataset, DataLoader, SequentialSampler
from keras.preprocessing.sequence import pad_sequences
from pytorch_pretrained_bert import BertTokenizer
from Downloader import downloader
import sbnltk_default
import zipfile
import os
class bert_multilingual_cased_postag:
__dl = downloader()
__model = None
__tokenizer = None
__device = None
__tag2idx = {'CC': 10,'CD': 8, 'DT': 6, 'IN': 5, 'JJ': 0, 'NN': 4, 'NNP': 3,'NNS': 1, 'PRE': 12, 'PRF': 9, 'PRP': 13, 'RB': 7, 'VB': 2, 'WH': 11}
__tags2vals = {}
# isinstance
def __init__(self):
self.__device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.__dl.download('bert_multi_cased_postag', sbnltk_default.sbnltk_root_path + 'model/')
self.__dl.download('bert_vocab_postag', sbnltk_default.sbnltk_root_path + 'model/')
self.__tokenizer = BertTokenizer.from_pretrained(
sbnltk_default.sbnltk_root_path + 'model/bert_vocab_postag.txt')
self.__model = torch.load(sbnltk_default.sbnltk_root_path + 'model/bert_multi_cased_postag.pth',
map_location=self.__device)
for i in self.__tag2idx:
self.__tags2vals[self.__tag2idx[i]] = i
self.__model.eval()
def tag(self, sentences):
max_seq_len = 128 # tokens
batch_s = 8
all_sentence_tags = []
for sentence in sentences:
sentence = [sentence]
words = sentence[0].split()
false_labels = []
for w in range(len(words)):
false_labels.append('NN')
labels = [false_labels]
tokenized_texts = [self.__tokenizer.tokenize(sent) for sent in sentence]
X = pad_sequences([self.__tokenizer.convert_tokens_to_ids(txt) for txt in tokenized_texts],
maxlen=max_seq_len, dtype="long", truncating="post", padding="post")
Y = pad_sequences([[self.__tag2idx.get(l) for l in lab] for lab in labels],
maxlen=max_seq_len, value=self.__tag2idx["NN"], padding="post",
dtype="long", truncating="post")
attention_masks = [[float(i > 0) for i in ii] for ii in X]
X_train = torch.tensor(X)
Y_train = torch.tensor(Y)
Mask_train = torch.tensor(attention_masks)
data_valid = TensorDataset(X_train, Mask_train, Y_train)
data_valid_sampler = SequentialSampler(data_valid)
DL_valid = DataLoader(data_valid, sampler=data_valid_sampler, batch_size=batch_s)
predictions = []
for batch in DL_valid:
batch = tuple(t.to(self.__device) for t in batch)
b_input_ids, b_input_mask, b_labels = batch
with torch.no_grad():
logits = self.__model(b_input_ids, token_type_ids=None,
attention_mask=b_input_mask)
logits = logits.detach().cpu().numpy()
predictions.extend([list(p) for p in np.argmax(logits, axis=2)])
pred_tags = [[self.__tags2vals[p_i] for p_i in p] for p in predictions]
pred_tags = pred_tags[0][:(len(words))]
temp_dict = []
for i in range(len(words)):
temp_dict.append((words[i], pred_tags[i]))
all_sentence_tags.append(temp_dict)
return all_sentence_tags
class bert_Multilingual_Uncased_Postag:
__model = None
__dl=downloader()
__device=True if torch.cuda.is_available() else False
__module_found=1
try:
import simpletransformers.ner.ner_model as nermodel
__module_found=1
except:
__module_found=0
def __init__(self):
if self.__module_found==0:
raise ValueError('Please install simpletransformers!! install Command: pip3 install simpletransformers')
if os.path.exists(sbnltk_default.sbnltk_root_path+'model/bert_multi_uncased_postag')==False:
self.__dl.download('bert_multi_uncased_postag', sbnltk_default.sbnltk_root_path + 'model/')
with zipfile.ZipFile(sbnltk_default.sbnltk_root_path+'model/bert_multi_uncased_postag.zip', 'r') as file:
file.extractall(sbnltk_default.sbnltk_root_path+'model/')
os.remove(sbnltk_default.sbnltk_root_path+'model/bert_multi_uncased_postag.zip')
t_h=sbnltk_default.sbnltk_root_path+'model/bert_multi_uncased_postag/model_args.json'
t_g=sbnltk_default.sbnltk_root_path+'model/bert_multi_uncased_postag/'
self.__model = self.nermodel.NERModel('bert',t_g, use_cuda=self.__device, args=t_h)
def tag(self,sentences):
d, f = self.__model.predict(sentences)
return d
| StarcoderdataPython |
174610 | """Cutting plane solution algorithm for the lower-level bilevel MILP or LP.
Includes a LLCuttingPLane class which applies the cutting plane solution method
given a protection vector. Returns the objective value and attack vector
obtained from the lower-level bilevel maximization.
The class can be used to model either the bilevel interdependency MILP or its
LP relaxation.
Requires a reference to the main Network object, which includes all needed
information to define the trilevel network interdiction game based on the MILP
binary interdependence model.
"""
import cplex
import upper.lower.network.network as net
#==============================================================================
class LLCuttingPlane:
"""Class to implement the cutting plane method for the lower LP or MILP.
This class also includes a local Cplex object to represent the lower-level
program. The object remains active for the entirety of the trilevel
solution algorithm as the lower-level program is repeatedly solved and re-
solved, which saves time since previous results may be retained.
The end() method should be called before finishing work with this object in
order to close the Cplex objects.
"""
#--------------------------------------------------------------------------
def __init__(self, net_in, mode, big_m=1.0e10):
"""LP or MILP cutting plane solution object constructor.
Initializes the Cplex objects associated with the lower-level
subproblem, which is constructed either as the LP or the MILP version
of the interdependent network flows program, depending on the selected
option.
Requires the following positional arguments:
net_in -- Reference to the Network object that defines the problem
instance.
mode -- Selects whether to construct the lower-level program as the
binary interdependency MILP or the linear interdependency LP.
The numerical codes are as follows:
1: binary interdependency MILP
2: linear interdependency LP
Accepts the following optional keyword arguments:
big_m -- Large constant for use in the big-M method. Defaults to
1.0e10.
"""
self.Net = net_in # set reference to network object
self.big_m = big_m # penalty value for use in relaxed master problem
# Initialize attack vector and related lists
self.attack = [False for a in self.Net.att_arcs]
self.attack_rhs = [a.bound for a in self.Net.arcs]
# Initialize Cplex objects
self._upper_cplex_setup()
self._lower_cplex_setup(mode)
#--------------------------------------------------------------------------
def _upper_cplex_setup(self):
"""Initializes Cplex object for the relaxed master problem.
The relaxed master problem is the attacker's maximization MILP which
includes an expanding set of constraints based on previously-calculated
lower level solutions.
The MILP is initialized with no constraints on the objective value. A
constraint is added after each solution of the lower-level program.
"""
# Initialize object
self.UpperModel = cplex.Cplex()
# Silence CPLEX output streams
self.UpperModel.set_log_stream(None)
self.UpperModel.set_results_stream(None)
self.UpperModel.set_error_stream(None)
self.UpperModel.set_warning_stream(None)
# Set as maximization
self.UpperModel.objective.set_sense(
self.UpperModel.objective.sense.maximize)
# Note: In order to avoid problems with the behavior of CPLEX with
# big-M constraints, for each attack variable we also define a
# continuous penalty variable on [0, M] along with an indicator
# constraint that forces it to be 0 whenever the attack variable is 0.
# Within the relaxed master problem's constraint set, we use the
# penalty variables rather than a product of M and an attack variable
# in order to avoid the possibility of a very small nonzero attack
# decision being multiplied by M to erroneously nullify one of the
# objective bound constraints.
# Define a list of variable names
self.obj_var = "ob"
self.att_vars = ["at("+str(a.id)+")" for a in self.Net.att_arcs]
self.pen_vars = ["pt("+str(a.id)+")" for a in self.Net.att_arcs]
# Add objective bound variable to Cplex object, with a finite but large
# upper bound in order to ensure dual feasibility
self.UpperModel.variables.add(obj=[1.0], names=[self.obj_var],
lb=[-cplex.infinity],
ub=[1000*self.big_m])
# Add binary attack decision variables to Cplex object
self.UpperModel.variables.add(names=self.att_vars,
types="B"*len(self.att_vars))
# Add penalty variables to Cplex object
self.UpperModel.variables.add(names=self.pen_vars,
lb=[0.0 for a in self.Net.att_arcs],
ub=[self.big_m for a in self.Net.att_arcs])
# Define a list of attack variable constraint names for defensible arcs
self.att_con = ["df("+str(a.id)+")" for a in self.Net.def_arcs]
# Define a list of penalty variable indicator constraint names
pen_con = ["ap("+str(a.id)+")" for a in self.Net.att_arcs]
# Define sense string for attack constraints (all <=)
att_sense = "L"*len(self.Net.def_arcs)
# Define attack constraint righthand sides (all 1)
att_rhs = [1 for a in self.Net.def_arcs]
# Define attack constraints for each arc (initially just bounds)
att_expr = [[["at("+str(a.id)+")"], [1]] for a in self.Net.def_arcs]
# Define attack constraints to limit the total number of attacks
att_lim_expr = [[[v for v in self.att_vars],
[1 for v in self.att_vars]]]
# Define penalty variable constraints to limit value when activated
pen_expr = [[[v], [1]] for v in self.pen_vars]
# Add attack constraints to Cplex object
self.UpperModel.linear_constraints.add(names=self.att_con,
lin_expr=att_expr,
senses=att_sense, rhs=att_rhs)
self.UpperModel.linear_constraints.add(names=["ab"],
lin_expr=att_lim_expr,
senses=["L"],
rhs=[self.Net.att_limit])
# Add penalty variable indicator constraints to Cplex object
for i in range(len(pen_con)):
self.UpperModel.indicator_constraints.add(name=pen_con[i],
indvar=self.att_vars[i],
complemented=1,
lin_expr=pen_expr[i],
sense="L",
rhs=0.0)
# Keep track of the number of side constraints generated so far
self.side_constraints = 0
#--------------------------------------------------------------------------
def _lower_cplex_setup(self, mode):
"""Initializes Cplex object for interdependent min-cost flow problem.
The interdependent network problem is the defender's minimization MILP
in which they respond to the attacker's destruction to optimize the
resulting network.
The MILP is initialized with all arcs intact. The constraints are
updated before each solve to reflect the damage caused by the
attacker's decisions.
Requires the following positional arguments:
mode -- Selects whether to construct the lower-level program as the
binary interdependency MILP or the linear interdependency LP.
The numerical codes are as follows:
1: binary interdependency MILP
2: linear interdependency LP
"""
# Initialize object
self.LowerModel = cplex.Cplex()
# Silence CPLEX output streams
self.LowerModel.set_log_stream(None)
self.LowerModel.set_results_stream(None)
self.LowerModel.set_error_stream(None)
self.LowerModel.set_warning_stream(None)
# Set as minimization
self.LowerModel.objective.set_sense(
self.LowerModel.objective.sense.minimize)
# Define a list of variable names
self.flow_vars = ["x("+str(a.id)+")" for a in self.Net.arcs]
# Define objective coefficients
flow_costs = [a.cost for a in self.Net.arcs]
# Define flow bounds
flow_lb = [0.0 for a in self.Net.arcs]
flow_ub = [a.bound for a in self.Net.arcs]
# Add variables to Cplex object
self.LowerModel.variables.add(names=self.flow_vars, obj=flow_costs,
lb=flow_lb, ub=flow_ub)
# Define a list of common constraint names (flow attack constraints
# need to change during the solution process, so that list is saved)
flow_con = ["c("+str(n.id)+")" for n in self.Net.nodes]
self.flow_att = ["a("+str(a.id)+")" for a in self.Net.att_arcs]
# Define sense strings for common constraints (== for flow
# conservation, <= for all others)
flow_con_sense = ["E" for c in flow_con]
flow_att_sense = "L"*len(self.flow_att)
# Define common constraint righthand sides
flow_con_rhs = [n.supply for n in self.Net.nodes]
flow_att_rhs = [a.bound for a in self.Net.att_arcs]
# Define flow conservation constraints for each node
flow_con_expr = [[[], []] for n in self.Net.nodes]
i = 0
for n in self.Net.nodes:
# Get variable names of outgoing/incoming arcs
var_out = [self.flow_vars[a.id] for a in n.out_arcs]
var_in = [self.flow_vars[a.id] for a in n.in_arcs]
# Set coefficients
coef = [1 for i in var_out] + [-1 for i in var_in]
# Update constraint list
flow_con_expr[i] = [var_out+var_in, coef]
i += 1
# Define flow attack constraints for each arc (initially just bounds)
flow_att_expr = [[[self.flow_vars[a.id]], [1.0]]
for a in self.Net.att_arcs]
# If using nodes as parents, relax supply constraints
if self.Net.parent_type == 0:
flow_con_lb = []
flow_con_lb_expr = []
for i in range(len(flow_con_sense)):
if self.Net.nodes[i].supply > 0:
flow_con_sense[i] = "L"
flow_con_lb.append("lb"+flow_con[i])
flow_con_lb_expr.append(flow_con_expr[i])
# Add common constraints to Cplex object
self.LowerModel.linear_constraints.add(names=flow_con,
lin_expr=flow_con_expr,
senses=flow_con_sense,
rhs=flow_con_rhs)
self.LowerModel.linear_constraints.add(names=self.flow_att,
lin_expr=flow_att_expr,
senses=flow_att_sense,
rhs=flow_att_rhs)
if self.Net.parent_type == 0:
self.LowerModel.linear_constraints.add(names=flow_con_lb,
lin_expr=flow_con_lb_expr,
senses="G"*len(flow_con_lb),
rhs=[0.0 for i in
range(len(flow_con_lb))])
# Add interdependencies for chosen model type
if mode == 1:
# MILP formulation
# The binary interdependency formulation requires defining slack
# variables and binary linking variables along with logical
# constraints. For the CPLEX model we implement this by using
# indicator constraints rather than the binary linking variables
# in the original model.
# We now use binary slack variables in a constraint of the form:
# u_ij s_ij^kl + x_ij >= u_ij
# If x_ij = u_ij, then s_ij^kl is free.
# If x_ij < u_ij, then s_ij^kl = 1.
# We will include an indicator constraint that forces x_kl <= 0
# when s_ij^kl = 1.
parents = [a[0] for a in self.Net.int] # parent arc objects
children = [a[1] for a in self.Net.int] # child arc objects
# Add binary slack indicator variables to Cplex object
slack_vars = ["sl("+str(a.id)+")" for a in parents]
# Add variables to Cplex object
self.LowerModel.variables.add(names=slack_vars,
types="B"*len(parents))
# Define slack constraint names, senses, and righthand sides
slack_con = ["sc("+str(a.id)+")" for a in parents]
slack_con_sense = "G"*len(parents)
slack_con_rhs = [a.bound for a in parents]
# Define slack constraint linear expressions
slack_con_expr = [[[slack_vars[i], self.flow_vars[parents[i].id]],
[parents[i].bound, 1]] for i in range(len(parents))]
# Add slack constraints to Cplex object
self.LowerModel.linear_constraints.add(names=slack_con,
lin_expr=slack_con_expr,
senses=slack_con_sense,
rhs=slack_con_rhs)
# Define indicator constraint names
child_con = ["i("+str(a.id)+")" for a in children]
# Define interdependency constraints
child_expr = [[[self.flow_vars[a.id]], [1]] for a in children]
# Add interdependency indicator constraints to Cplex object
for i in range(len(child_con)):
self.LowerModel.indicator_constraints.add(name=child_con[i],
indvar=slack_vars[i],
complemented=0,
lin_expr=child_expr[i],
sense="L",
rhs=0.0)
elif mode == 2:
# LP formulation
# The liner interdependency formulation does not require any
# variables beyond the existing flow variables. We simply need to
# define flow bounds that link pairs of interdependent arcs.
# Define constraint names, senses, and righthand sides
flow_int = ["i("+str(i)+")" for i in range(len(self.Net.int))]
flow_int_sense = "L"*len(flow_int)
flow_int_rhs = [0.0 for i in self.Net.int]
# Define interdependency constraints (with variables on LHS)
flow_int_expr = [[[], []] for i in self.Net.int]
i = 0
for intd in self.Net.int:
if intd[0].bound <= 0:
i += 1
continue
# Get parent/child arc names
var_pair = [self.flow_vars[intd[0].id],
self.flow_vars[intd[1].id]]
# Set coefficients
coef = [-(1.0*intd[1].bound/intd[0].bound), 1.0]
# Update constraint list
flow_int_expr[i] = [var_pair, coef]
i += 1
# Add interdependencies to model
self.LowerModel.linear_constraints.add(names=flow_int,
lin_expr=flow_int_expr,
senses=flow_int_sense,
rhs=flow_int_rhs)
#--------------------------------------------------------------------------
def solve(self, defend, cutoff=100, gap=0.01, cplex_epsilon=0.001):
"""Bilevel subproblem solution method.
The main cutting plane loop consists of alternating between solving the
upper-level relaxed master problem and solving the lower-level response
problem. The lower-level problem is adjusted to reflect the upper-level
attack decisions, and its objective value represents the objective
value associated with that attack vector, which then becomes a
constraint in the relaxed master problem.
The main loop of the cutting plane algorithm proceeds until either
reaching an iteration cutoff or achieving a sufficiently small
optimality gap, both of which can be adjusted. We can also terminate
the loop early if the lower-level response is infeasible, in which case
the upper-level objective is infinite.
Requires the following positional arguments:
defend -- Vector of defended arcs, as a boolean list.
Accepts the following optional keyword arguments:
cutoff -- Iteration cutoff for the overall cutting plane main loop.
Defaults to 100.
gap -- Optimality gap tolerance for the overall cutting plane main
loop. Defaults to 0.01.
cplex_epsilon -- Epsilon value for CPLEX solver's cleanup method.
Values generated by the solver falling below this absolute
value are deleted between solves. Defaults to 0.001.
Returns a tuple containing the following elements:
objective -- Objective value of the lower-level bilevel program.
destroy -- Vector of destroyed arcs, as a boolean list.
status -- Numerical code to describe the results of the solution
process, including the following:
0: Successful exit with finite objective value.
1: Successful exit with infinite objective value.
2: Exit due to error.
3: Exit due to iteration cutoff
iterations -- Number of iterations of main cutting plane loop.
"""
# Set local variables
obj_ub = -cplex.infinity # objective upper bound (upper-level problem)
obj_lb = cplex.infinity # objective lower bound (lower-level problem)
iteration= 1 # current iteration number
status = 0 # exit code
###
print("P2-3>\t\tIteration "+str(iteration-1))
# Solve the upper-level problem once for the given defense vector
(obj_ub, destroy) = self._upper_solve(defend=defend,
cplex_epsilon=cplex_epsilon)
###
print(" P2>\t\t\tobj = "+str(obj_ub))
# Find the lower-level response for the given attack vector
(obj_lb, nonzero, feasible) = self.lower_solve(destroy=destroy,
cplex_epsilon=cplex_epsilon)
###
print(" P3>\t\t\t\tobj = "+str(obj_lb))
obj_gap = abs(obj_ub - obj_lb) # current optimality gap
###
print("P2-3>\t\t\tgap = "+str(obj_gap))
if feasible == False:
obj_ub = self.big_m
obj_lb = self.big_m
status = 1
#----------------------------------------------------------------------
# Main cutting plane loop begin
while (iteration < cutoff) and (obj_gap > gap) and (feasible == True):
iteration += 1
###
print("P2-3>\t\tIteration "+str(iteration-1))
# Add a constraint based on the nonzero flow vector
self._upper_add_constraint(obj_lb, nonzero)
# Re-solve the relaxed master problem
(obj_ub, destroy) = self._upper_solve(cplex_epsilon=cplex_epsilon)
###
print(" P2>\t\t\tobj = "+str(obj_ub))
# Re-solve the lower-level response
(obj_lb, nonzero, feasible) = self.lower_solve(destroy=destroy,
cplex_epsilon=cplex_epsilon)
# Break if lower level is infeasible
if feasible == False:
obj_ub = cplex.infinity
obj_lb = cplex.infinity
status = 1
break
###
print(" P3>\t\t\t\tobj = "+str(obj_lb))
# Recalculate the optimality gap
obj_gap = abs(obj_ub - obj_lb)
###
print("P2-3>\t\t\tgap = "+str(obj_gap))
if (iteration >= cutoff) and (obj_gap > gap):
# If ending due to iteration cutoff without reaching optimality
# gap, use average of bounds as the best guess
status = 3
obj_lb = (obj_ub+obj_lb)/2
# Main cutting plane loop end
#----------------------------------------------------------------------
return (obj_lb, destroy, status, iteration)
#--------------------------------------------------------------------------
def _upper_solve(self, defend=[], cplex_epsilon=0.001):
"""Solves the upper-level relaxed master MILP.
Uses the upper-level Cplex object to solve the MILP defined by the
current defense vector. This process involves cleaning up the model,
modifying the constraints, calling the CPLEX solver, and then
interpreting and returning the results.
Accepts the following optional keyword arguments:
defend -- Vector of defended arcs, as a boolean list. Defaults to
an empty list, in which case no constraints are updated.
cplex_epsilon -- Epsilon value for CPLEX solver's cleanup method.
Values generated by the solver falling below this absolute
value are deleted between solves. Defaults to 0.001.
Returns a tuple containing the following elements:
objective -- Objective value upper-level program.
destroy -- Vector of arc destruction decisions, as a boolean list.
"""
# Clean up the model
self.UpperModel.cleanup(cplex_epsilon)
# Update constraints based on arc defense vector
if len(defend) == len(self.Net.def_arcs):
new_rhs = [1 for a in self.Net.def_arcs]
for i in range(len(new_rhs)):
if defend[i] == True:
new_rhs[i] = 0
self.UpperModel.linear_constraints.set_rhs([(self.att_con[i],
new_rhs[i]) for i in range(len(self.Net.def_arcs))])
# Solve the MILP
self.UpperModel.solve()
# Get the objective value
obj = self.UpperModel.solution.get_objective_value()
# Get the solution vector
destroy = [False for a in self.Net.att_arcs]
for i in range(len(self.Net.att_arcs)):
if self.UpperModel.solution.get_values(self.att_vars[i]) == 1:
destroy[i] = True
return (obj, destroy)
#--------------------------------------------------------------------------
def lower_solve(self, destroy=[], cplex_epsilon=0.001):
"""Solves the lower-level interdependent network flows LP or MILP.
Uses the lower-level Cplex object to solve the LP or MILP defined by
the current attack vector. This process involves cleaning up the model,
modifying the constraints, calling the CPLEX solver, and then
interpreting and returning the results.
Accepts the following optional keyword arguments:
destroy -- Vector of destroyed arcs, as a boolean list. Defaults to
an empty list, in which case no constraints are updated.
cplex_epsilon -- Epsilon value for CPLEX solver's cleanup method.
Values generated by the solver falling below this absolute
value are deleted between solves. Defaults to 0.001.
Returns a tuple containing the following elements:
objective -- Objective value lower-level program.
nonzero -- Vector indicating nonzero flow values, as a boolean
list.
feasible -- Indicator of whether the lower-level program is
feasible.
"""
# Clean up the model
self.LowerModel.cleanup(cplex_epsilon)
# Update constraints based on arc destruction vector
if len(destroy) == len(self.Net.att_arcs):
new_rhs = [a.bound for a in self.Net.att_arcs]
for i in range(len(new_rhs)):
if destroy[i] == True:
new_rhs[i] = 0
self.LowerModel.linear_constraints.set_rhs([(self.flow_att[i],
new_rhs[i]) for i in range(len(self.flow_att))])
# Solve the LP or MILP
self.LowerModel.solve()
# Set up containers for objective, nonzero flow indicator, and
# feasibility status
obj = cplex.infinity
nonzero = [False for a in self.Net.arcs]
status = self.LowerModel.solution.is_primal_feasible()
# Update outputs if the problem is feasible (if infeasible, they will
# retain their initialized values)
if status == True:
obj = self.LowerModel.solution.get_objective_value()
for i in range(len(self.Net.arcs)):
if self.LowerModel.solution.get_values(self.flow_vars[i]) > 0:
nonzero[i] = True
return (obj, nonzero, status)
#--------------------------------------------------------------------------
def _upper_add_constraint(self, objective, arcs):
"""Adds a constraint to the relaxed master problem.
The constraints added to the relaxed master problem during the course
of the cutting plane algorithm bound the upper level objective
variable. They are based on the solutions of the lower-level problem,
and consist of the objective value plus a series of penalty terms for
each nonzero flow arc.
Requires the following positional arguments:
objective -- Objective value from lower-level program.
arcs -- Vector of arcs to include in the penalty term, as a boolean
list. This should correspond to the arcs which carried nonzero
flow for the solution in question, so that attack vectors which
destroy such arcs ignore the corresponding objective bound.
"""
# Define new constraint variables and coefficients
new_con_vars = [self.obj_var]
new_con_coef = [1]
for i in range(len(self.Net.att_arcs)):
if arcs[self.Net.att_arcs[i].id] == True:
new_con_vars.append(self.pen_vars[i])
new_con_coef.append(-1)
# Add constraints to Cplex object
self.UpperModel.linear_constraints.add(names=[
"s("+str(self.side_constraints)+")"],
lin_expr=[[new_con_vars, new_con_coef]],
senses=["L"], rhs=[objective])
self.side_constraints += 1
#--------------------------------------------------------------------------
def end(self):
"""Closes all internal Cplex models.
This should be called before the LLCuttingPlane object is discarded.
"""
self.LowerModel.end()
self.UpperModel.end()
| StarcoderdataPython |
11274012 | from django.urls import path
from . import views
urlpatterns = [
path('', views.getRoutes, name="routes"),
path('needs/', views.getNeeds, name="needs"),
path('needs/<str:pk>', views.getNeed, name="need"),
] | StarcoderdataPython |
395857 | <filename>kicker/control_human_automatic_strategy.py
import math
from kicker.CONST_BALL import Coordinate
from kicker.CONST_KICKER import COURT_HEIGHT
from kicker.CONST_GAME_FIGURES import FIGURE_FOOT_HEIGHT
class HumanStrategy:
def __init__(self, kicker):
self.kicker = kicker
def next_move(self):
if - math.pi / 2 < self.kicker.ball.angle < math.pi / 2:
if self.kicker.ball.pos[Coordinate.Y] < COURT_HEIGHT / 2:
new_pos_keeper = self.kicker.ball.pos[Coordinate.Y] - self.kicker.human_keeper.POSITION_ON_BAR - \
FIGURE_FOOT_HEIGHT / 2
new_pos_defender = self.kicker.ball.pos[Coordinate.Y] - \
self.kicker.human_defender.POSITION_ON_BAR_DEFENDER_LEFT + FIGURE_FOOT_HEIGHT / 2
elif self.kicker.ball.pos[Coordinate.Y] > COURT_HEIGHT / 2:
new_pos_keeper = self.kicker.ball.pos[Coordinate.Y] - self.kicker.human_keeper.POSITION_ON_BAR \
+ FIGURE_FOOT_HEIGHT / 2
new_pos_defender = self.kicker.ball.pos[Coordinate.Y] - \
self.kicker.human_defender.POSITION_ON_BAR_DEFENDER_RIGHT - FIGURE_FOOT_HEIGHT / 2
else:
new_pos_keeper = self.kicker.ball.pos[Coordinate.Y] - self.kicker.human_keeper.POSITION_ON_BAR
new_pos_defender = self.kicker.ball.pos[Coordinate.Y] - \
self.kicker.human_defender.POSITION_ON_BAR_DEFENDER_RIGHT - FIGURE_FOOT_HEIGHT / 2
if new_pos_keeper > self.kicker.human_keeper.MAX_POS_KEEPER:
new_pos_keeper = self.kicker.human_keeper.MAX_POS_KEEPER
elif new_pos_keeper < 0:
new_pos_keeper = 0
if new_pos_defender > self.kicker.human_defender.MAX_POS_DEFENDER:
new_pos_defender = self.kicker.human_defender.MAX_POS_DEFENDER
elif new_pos_defender < 0:
new_pos_defender = 0
else:
new_pos_keeper = self.kicker.human_keeper.MAX_POS_KEEPER / 2
new_pos_defender = self.kicker.human_defender.MAX_POS_DEFENDER / 2
self.kicker.human_keeper.next_position = new_pos_keeper
self.kicker.human_defender.next_position = new_pos_defender
self.kicker.human_keeper.move_bar()
self.kicker.human_defender.move_bar()
| StarcoderdataPython |
9681968 | <filename>wagtailautocomplete/urls/admin.py
from django.urls import path
try:
from wagtail.admin.auth import require_admin_access
except ImportError:
from wagtail.admin.decorators import require_admin_access
from wagtailautocomplete.views import create, objects, search
urlpatterns = [
path('create/', require_admin_access(create)),
path('objects/', require_admin_access(objects)),
path('search/', require_admin_access(search)),
]
| StarcoderdataPython |
6621389 | <gh_stars>1-10
#!/usr/bin/env python3
"""
List all projects registered in <CWL_ICA_REPO_PATH>/config/projects.yaml
"""
from classes.command import Command
from utils.logging import get_logger
import pandas as pd
from utils.repo import get_tenant_yaml_path, read_yaml, get_project_yaml_path
import os
import sys
from utils.errors import TenantNotFoundError
logger = get_logger()
class ListProjects(Command):
"""Usage:
cwl-ica [options] list-projects help
cwl-ica [options] list-projects [--tenant-name=<"tenant_name">]
Description:
List all available projects, if --tenant-name is not set then tenants from all projects are returned.
If env var CWL_ICA_DEFAULT_TENANT is set and you wish to view projects across all tenants, set --tenant-name to 'all'
Options:
--tenant-name=<tenant name> Optional, filter by tenant-name.
Environment Variables:
CWL_ICA_DEFAULT_TENANT Can be used as an alternative for --tenant-name.
Example:
cwl-ica list-projects --tenant-name "all"
cwl-ica list-projects --tenant-name "tenant name"
cwl-ica list-projects
"""
def __init__(self, command_argv):
# Collect args from doc strings
super().__init__(command_argv)
# Initialise values
self.tenant_name = None
# Check args
self.check_args()
def __call__(self):
"""
Just run through this
:return:
"""
# Check project.yaml exists
project_yaml_path = get_project_yaml_path()
project_list = read_yaml(project_yaml_path)['projects']
# Create pandas df of project yaml path
project_df = pd.DataFrame(project_list)
# Write project to stdout
project_df[["project_name", "project_id", "project_description", "production"]].\
to_markdown(sys.stdout, index=False)
# Create a new line character
print()
def check_args(self):
"""
Check if --tenant-name is defined or CWL_ICA_DEFAULT_TENANT is present
Or if --tenant-name is set to 'all'
:return:
"""
tenant_arg = self.args.get("--tenant-name", None)
# Check if tenant arg is set
if tenant_arg is None:
tenant_arg = os.environ.get("CWL_ICA_DEFAULT_TENANT", None)
# Check if tenant arg is set to all
if tenant_arg is None or tenant_arg == "all":
self.tenant_name = None
else:
self.tenant_name = tenant_arg
# If tenant_name is set, make sure it's present in tenant.yaml
if self.tenant_name is not None:
tenant_yaml_path = get_tenant_yaml_path()
tenant_list = read_yaml(tenant_yaml_path)['tenants']
for tenant in tenant_list:
if tenant.get("tenant_name", None) == self.tenant_name:
break
else:
logger.error(f"Tenant name set to \"{self.tenant_name}\" but "
f"could not find this tenant name in \"{tenant_yaml_path}\"\n")
raise TenantNotFoundError
# Just make sure the project.yaml path exists
_ = get_project_yaml_path()
| StarcoderdataPython |
6523215 | import numpy as np
import numpy.linalg as la
import torch
import torch.nn.functional as F
import torchvision
import json
import time
from matplotlib import pyplot as plt
#from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm, trange
from lietorch import SE3, LieGroupParameter
from scipy.spatial.transform import Rotation as R
import cv2
from nerf import (get_ray_bundle, run_one_iter_of_nerf)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def mahalanobis(u, v, cov):
delta = u - v
m = torch.dot(delta, torch.matmul(torch.inverse(cov), delta))
return m
rot_x = lambda phi: torch.tensor([
[1., 0., 0.],
[0., torch.cos(phi), -torch.sin(phi)],
[0., torch.sin(phi), torch.cos(phi)]], dtype=torch.float32)
rot_x_np = lambda phi: np.array([
[1., 0., 0.],
[0., np.cos(phi), -np.sin(phi)],
[0., np.sin(phi), np.cos(phi)]], dtype=np.float32)
rot_psi = lambda phi: np.array([
[1, 0, 0, 0],
[0, np.cos(phi), -np.sin(phi), 0],
[0, np.sin(phi), np.cos(phi), 0],
[0, 0, 0, 1]])
rot_theta = lambda th: np.array([
[np.cos(th), 0, -np.sin(th), 0],
[0, 1, 0, 0],
[np.sin(th), 0, np.cos(th), 0],
[0, 0, 0, 1]])
rot_phi = lambda psi: np.array([
[np.cos(psi), -np.sin(psi), 0, 0],
[np.sin(psi), np.cos(psi), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
trans_t = lambda t: np.array([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, t],
[0, 0, 0, 1]])
def SE3_to_trans_and_quat(data):
rot = data[:3, :3]
trans = data[:3, 3]
r = R.from_matrix(rot)
quat = r.as_quat()
return np.concatenate([trans, quat])
def find_POI(img_rgb, DEBUG=False): # img - RGB image in range 0...255
img = np.copy(img_rgb)
#img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
#sift = cv2.SIFT_create()
#keypoints = sift.detect(img, None)
# Initiate ORB detector
orb = cv2.ORB_create()
# find the keypoints with ORB
keypoints2 = orb.detect(img,None)
#if DEBUG:
# img = cv2.drawKeypoints(img_gray, keypoints, img)
#keypoints = keypoints + keypoints2
keypoints = keypoints2
xy = [keypoint.pt for keypoint in keypoints]
xy = np.array(xy).astype(int)
# Remove duplicate points
xy_set = set(tuple(point) for point in xy)
xy = np.array([list(point) for point in xy_set]).astype(int)
return xy # pixel coordinates
def nearestPD(A):
"""Find the nearest positive-definite matrix to input
A Python/Numpy port of <NAME>'s `nearestSPD` MATLAB code [1], which
credits [2].
[1] https://www.mathworks.com/matlabcentral/fileexchange/42885-nearestspd
[2] <NAME>, "Computing a nearest symmetric positive semidefinite
matrix" (1988): https://doi.org/10.1016/0024-3795(88)90223-6
"""
B = (A + A.T) / 2
_, s, V = la.svd(B)
H = np.dot(V.T, np.dot(np.diag(s), V))
A2 = (B + H) / 2
A3 = (A2 + A2.T) / 2
if isPD(A3):
return A3
spacing = np.spacing(la.norm(A))
# The above is different from [1]. It appears that MATLAB's `chol` Cholesky
# decomposition will accept matrixes with exactly 0-eigenvalue, whereas
# Numpy's will not. So where [1] uses `eps(mineig)` (where `eps` is Matlab
# for `np.spacing`), we use the above definition. CAVEAT: our `spacing`
# will be much larger than [1]'s `eps(mineig)`, since `mineig` is usually on
# the order of 1e-16, and `eps(1e-16)` is on the order of 1e-34, whereas
# `spacing` will, for Gaussian random matrixes of small dimension, be on
# othe order of 1e-16. In practice, both ways converge, as the unit test
# below suggests.
I = np.eye(A.shape[0])
k = 1
while not isPD(A3):
mineig = np.min(np.real(la.eigvals(A3)))
A3 += I * (-mineig * k**2 + spacing)
k += 1
return A3
def isPD(B):
"""Returns true when input is positive-definite, via Cholesky"""
try:
_ = la.cholesky(B)
return True
except la.LinAlgError:
return False
class Estimator():
def __init__(self, filter_cfg, agent, start_state, filter=True) -> None:
# Parameters
self.batch_size = filter_cfg['batch_size']
self.kernel_size = filter_cfg['kernel_size']
self.dil_iter = filter_cfg['dil_iter']
self.lrate = filter_cfg['lrate']
self.sampling_strategy = filter_cfg['sampling_strategy']
self.reject_thresh = filter_cfg['reject_thresh']
self.agent = agent
self.is_filter = filter
#State initial estimate at time t=0
self.xt = start_state #Size 18
self.sig = 1e-1*torch.eye(start_state.shape[0])
self.Q = 1e-1*torch.eye(start_state.shape[0])
#self.sig = filter_cfg['sig0'] #State covariance 18x18
#self.Q = filter_cfg['Q'] #Process noise covariance
self.R = filter_cfg['R'] #Measurement covariance
self.iter = filter_cfg['N_iter']
#NERF SPECIFIC CONFIGS
# create meshgrid from the observed image
self.W, self.H, self.focal = filter_cfg['W'], filter_cfg['H'], filter_cfg['focal']
#self.coords = np.asarray(np.stack(np.meshgrid(np.linspace(0, self.W - 1, self.W), np.linspace(0, self.H - 1, self.H)), -1),
# dtype=int)
#Storage for plots
self.pixel_losses = {}
self.dyn_losses = {}
self.covariance = []
self.state_estimates = []
self.states = {}
self.predicted_states = []
self.actions = []
self.iteration = 0
def estimate_relative_pose(self, sensor_image, start_state, sig, obs_img_pose=None, obs_img=None, model_coarse=None, model_fine=None,cfg=None,
encode_position_fn=None, encode_direction_fn=None):
b_print_comparison_metrics = obs_img_pose is not None
b_generate_overlaid_images = b_print_comparison_metrics and obs_img is not None
obs_img_noised = sensor_image
W_obs = sensor_image.shape[0]
H_obs = sensor_image.shape[1]
# find points of interest of the observed image
POI = find_POI(obs_img_noised, False) # xy pixel coordinates of points of interest (N x 2)
### IF FEATURE DETECTION CANT FIND POINTS, RETURN INITIAL
if len(POI.shape) == 1:
self.pixel_losses[f'{self.iteration}'] = []
self.dyn_losses[f'{self.iteration}'] = []
self.states[f'{self.iteration}'] = []
return start_state.clone().detach(), False
obs_img_noised = (np.array(obs_img_noised) / 255.).astype(np.float32)
obs_img_noised = torch.tensor(obs_img_noised).cuda()
#sensor_image[POI[:, 1], POI[:, 0]] = [0, 255, 0]
# create meshgrid from the observed image
coords = np.asarray(np.stack(np.meshgrid(np.linspace(0, W_obs - 1, W_obs), np.linspace(0, H_obs - 1, H_obs)), -1), dtype=int)
# create sampling mask for interest region sampling strategy
interest_regions = np.zeros((H_obs, W_obs, ), dtype=np.uint8)
interest_regions[POI[:,1], POI[:,0]] = 1
I = self.dil_iter
interest_regions = cv2.dilate(interest_regions, np.ones((self.kernel_size, self.kernel_size), np.uint8), iterations=I)
interest_regions = np.array(interest_regions, dtype=bool)
interest_regions = coords[interest_regions]
# not_POI contains all points except of POI
coords = coords.reshape(H_obs * W_obs, 2)
#not_POI = set(tuple(point) for point in coords) - set(tuple(point) for point in POI)
#not_POI = np.array([list(point) for point in not_POI]).astype(int)
#Break up state into components
start_trans = start_state[:3].reshape((3, 1))
### IMPORTANT: ROTATION MATRIX IS ROTATED BY SOME AMOUNT TO ACCOUNT FOR CAMERA ORIENTATION
start_rot = rot_x_np(np.pi/2) @ start_state[6:15].reshape((3, 3))
start_pose = np.concatenate((start_rot, start_trans), axis=1)
start_vel = torch.tensor(start_state[3:6]).cuda()
start_omega = torch.tensor(start_state[15:]).cuda()
# Create pose transformation model
start_pose = SE3_to_trans_and_quat(start_pose)
starting_pose = SE3(torch.from_numpy(start_pose).float().cuda())
starting_pose = LieGroupParameter(starting_pose).cuda()
#print('Start pose', start_pose, start_vel, start_omega)
# Add velocities, omegas, and pose object to optimizer
if self.is_filter is True:
optimizer = torch.optim.Adam(params=[starting_pose, start_vel, start_omega], lr=self.lrate, betas=(0.9, 0.999))
else:
optimizer = torch.optim.Adam(params=[starting_pose], lr=self.lrate, betas=(0.9, 0.999))
# calculate angles and translation of the observed image's pose
if b_print_comparison_metrics:
phi_ref = np.arctan2(obs_img_pose[1,0], obs_img_pose[0,0])*180/np.pi
theta_ref = np.arctan2(-obs_img_pose[2, 0], np.sqrt(obs_img_pose[2, 1]**2 + obs_img_pose[2, 2]**2))*180/np.pi
psi_ref = np.arctan2(obs_img_pose[2, 1], obs_img_pose[2, 2])*180/np.pi
translation_ref = np.sqrt(obs_img_pose[0,3]**2 + obs_img_pose[1,3]**2 + obs_img_pose[2,3]**2)
#Store data
pix_losses = []
dyn_losses = []
states = []
for k in range(self.iter):
model_coarse.eval()
if model_fine:
model_fine.eval()
rgb_coarse, rgb_fine = None, None
# TODO: IMPLEMENT INERF WITH USE_CACHED DATSET!!!
rand_inds = np.random.choice(interest_regions.shape[0], size=self.batch_size, replace=False)
batch = interest_regions[rand_inds]
target_s = obs_img_noised[batch[:, 1], batch[:, 0]]
#target_s = torch.Tensor(target_s).to(device)
pose = starting_pose.retr().matrix()[:3, :4]
ray_origins, ray_directions = get_ray_bundle(self.H, self.W, self.focal, pose) # (H, W, 3), (H, W, 3)
#with torch.no_grad():
# r_o, r_d = ray_origins, ray_directions
#print('Ray origins cuda', ray_origins.is_cuda)
ray_origins = ray_origins[batch[:, 1], batch[:, 0], :]
ray_directions = ray_directions[batch[:, 1], batch[:, 0], :]
then = time.time()
rgb_coarse, _, _, rgb_fine, _, _ = run_one_iter_of_nerf(
self.H,
self.W,
self.focal,
model_coarse,
model_fine,
ray_origins,
ray_directions,
cfg,
mode="validation",
encode_position_fn=encode_position_fn,
encode_direction_fn=encode_direction_fn,
)
#target_ray_values = target_s
#print(time.time() - then)
### OUTLIER REJECTION
threshold = self.reject_thresh
with torch.no_grad():
coarse_sample_loss = torch.sum(torch.abs(rgb_coarse[..., :3] - target_s[..., :3]), 1)/3
fine_sample_loss = torch.sum(torch.abs(rgb_fine[..., :3] - target_s[..., :3]), 1)/3
csl = F.relu(-(coarse_sample_loss-threshold))
fsl = F.relu(-(fine_sample_loss-threshold))
coarse_ind = torch.nonzero(csl)
fine_ind = torch.nonzero(fsl)
### ---------------- ###
coarse_loss = torch.nn.functional.mse_loss(
rgb_coarse[coarse_ind, :3], target_s[coarse_ind, :3]
)
fine_loss = None
if rgb_fine is not None:
fine_loss = torch.nn.functional.mse_loss(
rgb_fine[fine_ind, :3], target_s[fine_ind, :3]
)
loss = coarse_loss + (fine_loss if fine_loss is not None else 0.0)
pix_losses.append(loss.clone().cpu().detach().numpy().tolist())
#Add dynamics loss
state = torch.cat((pose[:3, 3], start_vel, (rot_x(torch.tensor(-np.pi/2)) @ pose[:3, :3]).reshape(-1), start_omega), dim=0)
dyn_loss = mahalanobis(state, torch.tensor(start_state), sig)
states.append(state.clone().cpu().detach().numpy().tolist())
dyn_losses.append(dyn_loss.clone().cpu().detach().numpy().tolist())
if self.is_filter is True:
loss += dyn_loss
loss.backward()
optimizer.step()
optimizer.zero_grad()
new_lrate = self.lrate * (0.8 ** ((k + 1) / 100))
#new_lrate = extra_arg_dict['lrate'] * np.exp(-(k)/1000)
for param_group in optimizer.param_groups:
param_group['lr'] = new_lrate
# print results periodically
if b_print_comparison_metrics and ((k + 1) % 20 == 0 or k == 0):
print('Step: ', k)
print('Loss: ', loss)
with torch.no_grad():
pose_dummy = starting_pose.retr().matrix().clone().cpu().detach().numpy()
# calculate angles and translation of the optimized pose
phi = np.arctan2(pose_dummy[1, 0], pose_dummy[0, 0]) * 180 / np.pi
theta = np.arctan2(-pose_dummy[2, 0], np.sqrt(pose_dummy[2, 1] ** 2 + pose_dummy[2, 2] ** 2)) * 180 / np.pi
psi = np.arctan2(pose_dummy[2, 1], pose_dummy[2, 2]) * 180 / np.pi
translation = np.sqrt(pose_dummy[0,3]**2 + pose_dummy[1,3]**2 + pose_dummy[2,3]**2)
#translation = pose_dummy[2, 3]
# calculate error between optimized and observed pose
phi_error = abs(phi_ref - phi) if abs(phi_ref - phi)<300 else abs(abs(phi_ref - phi)-360)
theta_error = abs(theta_ref - theta) if abs(theta_ref - theta)<300 else abs(abs(theta_ref - theta)-360)
psi_error = abs(psi_ref - psi) if abs(psi_ref - psi)<300 else abs(abs(psi_ref - psi)-360)
rot_error = phi_error + theta_error + psi_error
translation_error = abs(translation_ref - translation)
print('Rotation error: ', rot_error)
print('Translation error: ', translation_error)
print('Number of rays accepted', len(fine_ind))
print('-----------------------------------')
'''
if (k+1) % 100 == 0:
_, _, _, rgb_fine, _, _ = run_one_iter_of_nerf(
self.H,
self.W,
self.focal,
model_coarse,
model_fine,
r_o,
r_d,
cfg,
mode="validation",
encode_position_fn=encode_position_fn,
encode_direction_fn=encode_direction_fn,
)
rgb = rgb_fine.cpu().detach().numpy()
f, axarr = plt.subplots(2)
axarr[0].imshow(rgb)
axarr[1].imshow(sensor_image)
plt.show()
'''
print("Done with main relative_pose_estimation loop")
self.target_s = target_s.detach()
self.batch = batch
self.pixel_losses[f'{self.iteration}'] = pix_losses
self.dyn_losses[f'{self.iteration}'] = dyn_losses
self.states[f'{self.iteration}'] = states
return state.clone().detach(), True
def measurement_function(self, state, start_state, sig, model_coarse=None, model_fine=None,cfg=None,
encode_position_fn=None, encode_direction_fn=None):
target_s = self.target_s
batch = self.batch
# Breaking state into pieces
rot_mat = rot_x(torch.tensor(np.pi/2)) @ state[6:15].reshape((3, 3))
trans = state[:3].reshape((3, 1))
pose_mat = torch.cat((rot_mat, trans), dim=1)
#Process loss.
loss_dyn = mahalanobis(state, torch.tensor(start_state), sig)
#TODO: CONVERT STATE INTO POSE
ray_origins, ray_directions = get_ray_bundle(self.H, self.W, self.focal, pose_mat) # (H, W, 3), (H, W, 3)
ray_origins = ray_origins[batch[:, 1], batch[:, 0], :]
ray_directions = ray_directions[batch[:, 1], batch[:, 0], :]
rgb_coarse, _, _, rgb_fine, _, _ = run_one_iter_of_nerf(
self.H,
self.W,
self.focal,
model_coarse,
model_fine,
ray_origins,
ray_directions,
cfg,
mode="validation",
encode_position_fn=encode_position_fn,
encode_direction_fn=encode_direction_fn,
)
target_ray_values = target_s
### OUTLIER REJECTION
threshold = self.reject_thresh
with torch.no_grad():
coarse_sample_loss = torch.sum(torch.abs(rgb_coarse[..., :3] - target_ray_values[..., :3]), 1)/3
fine_sample_loss = torch.sum(torch.abs(rgb_fine[..., :3] - target_ray_values[..., :3]), 1)/3
csl = F.relu(-(coarse_sample_loss-threshold))
fsl = F.relu(-(fine_sample_loss-threshold))
coarse_ind = torch.nonzero(csl)
fine_ind = torch.nonzero(fsl)
### ---------------- ###
coarse_loss = torch.nn.functional.mse_loss(
rgb_coarse[coarse_ind, :3], target_ray_values[coarse_ind, :3]
)
fine_loss = None
if rgb_fine is not None:
fine_loss = torch.nn.functional.mse_loss(
rgb_fine[fine_ind, :3], target_ray_values[fine_ind, :3]
)
loss_rgb = coarse_loss + (fine_loss if fine_loss is not None else 0.0)
loss = loss_rgb + loss_dyn
return loss
def estimate_state(self, sensor_img, obs_img_pose, action, model_coarse=None, model_fine=None,cfg=None,
encode_position_fn=None, encode_direction_fn=None):
# Computes Jacobian w.r.t dynamics are time t-1. Then update state covariance Sig_{t|t-1}.
# Perform grad. descent on J = measurement loss + process loss
# Compute state covariance Sig_{t} by hessian at state at time t.
#with torch.no_grad():
#Propagated dynamics. x t|t-1
start_state = self.agent.drone_dynamics(self.xt, action)
start_state = start_state.cpu().numpy()
#State estimate at t-1 is self.xt. Find jacobian wrt dynamics
t1 = time.time()
A = torch.autograd.functional.jacobian(lambda x: self.agent.drone_dynamics(x, action), self.xt)
#with torch.no_grad():
t2 = time.time()
#print('Elapsed time for Jacobian', t2-t1)
#Propagate covariance
sig_prop = A @ self.sig @ A.T + self.Q
#Argmin of total cost. Encapsulate this argmin optimization as a function call
then = time.time()
xt, success_flag = self.estimate_relative_pose(sensor_img, start_state, sig_prop, obs_img_pose=obs_img_pose, obs_img=None,
model_coarse=model_coarse, model_fine=model_fine,cfg=cfg, encode_position_fn=encode_position_fn, encode_direction_fn=encode_direction_fn)
print('Optimization step for filter', time.time()-then)
#with torch.no_grad():
#Update state estimate
self.xt = xt
#Hessian to get updated covariance
t3 = time.time()
if self.is_filter is True and success_flag is True:
hess = torch.autograd.functional.hessian(lambda x: self.measurement_function(x, start_state, sig_prop, model_coarse=model_coarse,
model_fine=model_fine,cfg=cfg, encode_position_fn=encode_position_fn, encode_direction_fn=encode_direction_fn), self.xt)
#with torch.no_grad():
#Turn covariance into positive definite
hess_np = hess.clone().cpu().detach().numpy()
hess = nearestPD(hess_np)
t4 = time.time()
print('Elapsed time for hessian', t4-t3)
#self.sig_det.append(np.linalg.det(sig.cpu().numpy()))
#Update state covariance
self.sig = torch.inverse(torch.tensor(hess))
#print(self.sig)
#print('Start state', start_state)
self.actions.append(action.clone().cpu().detach().numpy().tolist())
self.predicted_states.append(start_state.tolist())
self.covariance.append(self.sig.clone().cpu().detach().numpy().tolist())
self.state_estimates.append(self.xt.clone().cpu().detach().numpy().tolist())
self.iteration += 1
return self.xt.clone().detach()
def save_data(self, filename):
data = {}
data['pixel_losses'] = self.pixel_losses
data['dyn_losses'] = self.dyn_losses
data['covariance'] = self.covariance
data['state_estimates'] = self.state_estimates
data['states'] = self.states
data['predicted_states'] = self.predicted_states
data['actions'] = self.actions
with open(filename,"w+") as f:
json.dump(data, f)
return
| StarcoderdataPython |
3437726 | <gh_stars>0
# coding: utf-8
"""
MasterMind Service Manager
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0.0
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.api.volume_api import VolumeApi # noqa: E501
from swagger_client.rest import ApiException
class TestVolumeApi(unittest.TestCase):
"""VolumeApi unit test stubs"""
def setUp(self):
self.api = swagger_client.api.volume_api.VolumeApi() # noqa: E501
def tearDown(self):
pass
def test_create_volume(self):
"""Test case for create_volume
Create a volume with the given name # noqa: E501
"""
pass
def test_delete_volume(self):
"""Test case for delete_volume
Remove volume with given name # noqa: E501
"""
pass
def test_get_volumes(self):
"""Test case for get_volumes
Obtain a list of defined volumes # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
243883 | from os import path
import re
import xmltodict
HERE = path.dirname(path.abspath(__file__))
class CiqualDatasource():
def generate(self):
print('- Generating Ciqual data... ', end='', flush=True)
# Load data
print('- Loading files... ', end='', flush=True)
data_raw = CiqualDatasource.load_files()
print('OK')
# Reformat data
print('- Format data... ', end='', flush=True)
data_formatted = CiqualDatasource.format_data(data_raw)
print('OK')
# Filter data
print('- Filter data... ', end='', flush=True)
data_filtered = CiqualDatasource.filter_data(data_formatted)
print('OK')
return data_filtered
# Insert data in database
#print('- Populate database... ', end='', flush=True)
##CiqualDatasource.populate_database(data_filtered)
#print('OK')
#print('-- Generation finished --')
def load_files():
filenames = [
# Contains the list of all aliments with group id
'alim_2017_11_21.xml',
# Contains all the groups and sub groups
'alim_grp_2017_11_21.xml',
# Contains nutrition information with aliment id and constant id
'compo_2017_11_21.xml',
# Containe all the constant used in composition
'const_2017_11_21.xml'
]
ciqual_out = {}
for filename in filenames:
with open(path.join(HERE, filename), 'rb') as f:
r = f.read()
ciqual_out.update(xmltodict.parse(r)['TABLE'])
return ciqual_out
def format_data(data_raw):
# ALIM []:
# alim_code, alim_nom_fr, alim_nom_index_fr, alim_grp_code
# alim_ssgrp_code, alim_ssssgrp_code
#
# ALIM_GRP []:
# alim_grp_code, alim_grp_nom_fr, alim_ssgrp_code,
# alim_ssgrp_nom_fr, alim_ssssgrp_code, alim_ssssgrp_nom_fr
#
# COMPO []:
# alim_code, const_code, teneur, min, max, code_confiance,
# source_code
#
# CONST []:
# const_code, const_nom_fr
groups = {
'0000': '-'
}
for g in data_raw['ALIM_GRP']:
groups[g['alim_grp_code']] = g['alim_grp_nom_fr']
groups[g['alim_ssgrp_code']] = g['alim_ssgrp_nom_fr']
groups[g['alim_ssssgrp_code']] = g['alim_ssssgrp_nom_fr']
# Name in xml to column name in database
# The second element in the tuple is the multiplicator
# All in microgram
mapping_nutrition = {
'Protéines (g/100g)': ('protein', 10000),
'Glucides (g/100g)': ('glucid', 10000),
'Lipides (g/100g)': ('lipid', 10000)
}
mapping_constant = {}
for x in data_raw['CONST']:
cn = x['const_nom_fr']
if cn in mapping_nutrition:
mapping_constant[x['const_code']] = mapping_nutrition[cn]
data = []
for a in data_raw['ALIM']:
code_ciqual = a['alim_code']
if groups[a['alim_ssssgrp_code']] != '-':
group_name = groups[a['alim_ssssgrp_code']]
elif groups[a['alim_ssgrp_code']] != '-':
group_name = groups[a['alim_ssgrp_code']]
else:
group_name = groups[a['alim_grp_code']]
nutrition = {}
for x in data_raw['COMPO']:
if x['alim_code'] == code_ciqual:
if x['const_code'] not in mapping_constant:
continue
val = x['teneur']
if val == '-':
val = ''
# remove non decimal char
val = re.sub(r'[^\d.]+', '', val)
if not val:
val = 0
else:
val = float(val.replace(',', '.'))
nutrition_name = mapping_constant[x['const_code']][0]
multilicator = mapping_constant[x['const_code']][1]
nutrition[nutrition_name] = int(val * multilicator)
data.append({
'ciqual_code': code_ciqual,
'name_fr': a['alim_nom_fr'],
'name_en': a['alim_nom_eng'],
'group_name_fr': group_name,
'nutrition': nutrition
})
return data
def filter_data(data):
# groups classified fresh
fresh_group = [
'fruits crus',
'herbes fraîches',
'légumes cuits',
'légumes crus',
'poissons crus',
]
# Keep only selected group
accepted_group = [
'fruits crus',
'herbes fraîches',
'légumes cuits',
'légumes crus',
'poissons crus',
'épices',
'fruits à coque et graines oléagineuses',
'huiles et graisses végétales',
'chocolats et produits à base de chocolat',
'beurres',
'légumes séchés ou déshydratés',
'fruits séchés',
'œufs cuits',
'herbes séchées',
'laits autres que de vache',
'pommes de terre et autres tubercules',
'sels',
'algues',
'légumineuses fraîches',
'pâtes, riz et céréales crus',
'condiments',
'sucres, miels et assimilés',
'boissons végétales',
'légumineuses cuites',
'poissons cuits',
'farines',
'autres matières grasses',
'poulet',
'légumineuses sèches',
'pâtes, riz et céréales cuits',
'mollusques et crustacés crus',
# 'agneau et mouton',
# 'cocktails',
# 'café, thé, cacao etc. prêts à consommer',
# 'fromages à pâte persillée',
# 'mollusques et crustacés cuits',
# 'pizzas, tartes et crêpes salées',
# 'laits et boissons infantiles',
# 'viennoiseries',
# 'gibier',
# 'laits de vache concentrés ou en poudre',
# 'plats de poisson et féculents',
# 'margarines',
# 'desserts glacés',
# 'compotes et assimilés',
# 'biscottes et pains grillés',
# 'sandwichs',
# 'salades composées et crudités',
# 'desserts lactés',
# 'plats de poisson sans garniture',
# 'pains',
# 'boissons rafraîchissantes sans alcool',
# 'plats de viande sans garniture',
# 'pâtes à tarte',
# 'saucisses et assimilés',
# 'huiles de poissons',
# 'abats',
# 'boissons à reconstituer',
# 'autres fromages et spécialités',
# 'autres desserts',
# 'laits de vaches liquides (non concentrés)',
# 'petits pots salés et plats infantiles',
# 'céréales de petit-déjeuner',
# 'plats de viande et légumes/légumineuses',
# 'eaux',
# 'sorbets',
# 'autres viandes',
# 'gâteaux et pâtisseries',
# 'confiseries non chocolatées',
# 'biscuits sucrés',
# 'jambons cuits',
# 'porc',
# 'quenelles',
# 'saucisson secs',
# 'céréales et biscuits infantiles',
# 'jus',
# 'nectars',
# 'sauces sucrées',
# 'crèmes et spécialités à base de crème',
# 'soupes',
# 'desserts infantiles',
# 'autres spécialités charcutières',
# 'confitures et assimilés',
# 'denrées destinées à une alimentation particulière',
# 'plats de fromage',
# 'dinde',
# 'charcuteries',
# 'glaces',
# 'fromages',
# 'boissons rafraîchissantes lactées',
# 'jambons secs et crus',
# 'biscuits apéritifs',
# 'aides culinaires',
# 'rillettes',
# 'vins',
# 'produits à base de poissons et produits de la mer',
# 'viandes cuites',
# 'sauces chaudes',
# 'plats de viande et féculents',
# 'plats de céréales/pâtes',
# 'fromages à pâte molle',
# 'bières et cidres',
# 'autres produits à base de viande',
# 'fromages à pâte pressée',
# 'œufs crus',
# 'fruits appertisés',
# 'ingrédients divers',
# 'fromage fondus',
# 'feuilletées et autres entrées',
# 'pâtés et terrines',
# 'fromages blancs',
# 'omelettes et autres ovoproduits',
# 'sauces condimentaires',
# 'bœuf et veau',
# 'barres céréalières',
# 'glaces et sorbets'
]
filtered_data = []
for d in data:
if d['group_name_fr'] in accepted_group:
d['fresh'] = d['group_name_fr'] in fresh_group
filtered_data.append(d)
return filtered_data
def populate_database(data):
# Insert group
group_mapping = {}
for x in data:
gname = x['group_name']
gfresh = x['group_fresh']
if gname in group_mapping:
continue
try:
cat = AlimentCategory.objects.get(name=gname)
except AlimentCategory.DoesNotExist:
cat = AlimentCategory(name=gname, fresh=gfresh)
cat.save()
group_mapping[gname] = cat
# Insert aliment
for x in data:
cat = group_mapping[x['group_name']]
n = x['name']
s = unidecode.unidecode(n)
try:
ingredient = Aliment.objects.get(name=n)
if ingredient.name_search != s:
ingredient.name_search = s
ingredient.save()
continue
except Aliment.DoesNotExist:
ingredient = Aliment(name=n, name_search=s, category=cat)
ingredient.save()
n = x['nutrition']
nutrition = AlimentNutrition(
aliment=ingredient, protein=n['protein'],
glucid=n['glucid'], lipid=n['lipid'])
nutrition.save()
| StarcoderdataPython |
8186353 | # TODO: Write a function that loads a checkpoint and rebuilds the model
import numpy as np
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
import argparse
import json
from PIL import Image
parser = argparse.ArgumentParser()
parser.add_argument('--load_dir', action = 'store', type = str, default = 'checkpoint.pth', help = 'Load file directory')
parser.add_argument('--json_dir', action = 'store', type = str, default = 'ImageClassifier/cat_to_name.json', help = 'JSON file to map class values to category names' )
parser.add_argument('--image_dir', action = 'store', type = str, default = 'ImageClassifier/flowers/test/88/image_00540.jpg', help = 'Image test file for model prediction' )
parser.add_argument('--topk_classes', action = 'store', type = int, default = 5, help = 'Top K classes')
parser.add_argument('--gpu', action = 'store', default = 'cuda', help = 'Type of device to be used')
args = parser.parse_args()
with open(args.json_dir, 'r') as f:
cat_to_name = json.load(f)
checkpoint = torch.load(args.load_dir)
if checkpoint['arch'] == 'vgg16':
model = models.vgg16(pretrained=True)
if checkpoint['arch'] == 'vgg19':
model = models.vgg19(pretrained=True)
if checkpoint['arch'] == 'alexnet':
model = models.alexnet(pretrained=True)
for param in model.parameters():
param.requires_grad = False
model.classifier = nn.Sequential(nn.Linear(25088, checkpoint['hidden_units_1']),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(checkpoint['hidden_units_1'], checkpoint['hidden_units_2']),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(checkpoint['hidden_units_2'], checkpoint['hidden_units_3']),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(checkpoint['hidden_units_3'], 102),
nn.LogSoftmax(dim=1))
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), checkpoint['learning_rate'])
device = torch.device(args.gpu)
model.to(device);
model.load_state_dict(checkpoint['model_state_dict'])
model_class_to_idx = checkpoint['model_class_to_index']
model_index_to_class = dict([[x,y] for y,x in model_class_to_idx.items()])
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns a Numpy array
'''
# TODO: Process a PIL image for use in a PyTorch model
img = Image.open(image)
img.load()
size = (256, 256)
img.thumbnail(size)
x1 = img.width/2 - 224/2
x2 = img.width/2 + 224/2
y1 = img.height/2 - 224/2
y2 = img.height/2 + 224/2
area = (x1, y1, x2, y2)
img = img.crop(area)
img = np.array(img)
img = img/255
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
img = (img - mean)/std
final_image = img.transpose(2,0,1)
final_image = torch.from_numpy(final_image).float()
return final_image
def imshow(image, ax=None, title=None):
"""Imshow for Tensor."""
if ax is None:
fig, ax = plt.subplots()
# PyTorch tensors assume the color channel is the first dimension
# but matplotlib assumes is the third dimension
image = image.numpy().transpose((1, 2, 0))
# Undo preprocessing
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = std * image + mean
# Image needs to be clipped between 0 and 1 or it looks like noise when displayed
image = np.clip(image, 0, 1)
ax.imshow(image)
return ax
def predict(image_path, model, topk):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
device = torch.device(args.gpu)
model.to(device)
model.eval()
image = process_image(image_path)
image.unsqueeze_(0)
image = image.to(device)
logps = model.forward(image)
ps = torch.exp(logps)
probabilities, indices = ps.topk(topk, dim=1)
probabilities = probabilities[0].tolist()
indices = indices[0].tolist()
classes = []
for index in indices:
classes.append(model_index_to_class[index])
flower_names = []
for element in classes:
flower_names.append(cat_to_name[element])
return probabilities, flower_names
probs, flower_names = predict(args.image_dir, model, args.topk_classes)
print("The most likely image class for the image is: {}\n".format(flower_names[0]),\
"The associated probability is: {}\n".format(probs[0]),\
"The Top K Classes along with their probabilites are:\n{}\n{}".format(flower_names, probs)) | StarcoderdataPython |
5188632 | <filename>test_commit.py
#!/usr/bin/env python
print ("Hello World!")
print ("change to this file")
| StarcoderdataPython |
3346208 | <reponame>peterezzo/splunk-pwn-app
import splunk.entity as entity
import splunk.auth, splunk.search
import getpass
def huntPasswords(sessionKey):
entities = entity.getEntities(
['admin','passwords'],owner="nobody", namespace="-",sessionKey=sessionKey)
return entities
def getSessionKeyFromCreds():
user = raw_input("Username:")
password = getpass.getpass()
sessionKey = splunk.auth.getSessionKey(user,password)
return sessionKey
if __name__ == "__main__":
sessionKey = getSessionKeyFromCreds()
print huntPasswords(sessionKey)
| StarcoderdataPython |
3218047 | <filename>chemception/models/compound.py
from rdkit import Chem
import cv2
import os
import numpy as np
import os.path
from rdkit import Chem
from rdkit.Chem import AllChem
class Compound:
extension = 'png'
#compound identifier in the dataset
id=""
#compound SMILE
_SMILE=""
#mutagen
mutagen=False
#rdk model
rdkMolecule = None
def __init__(self,id,SMILE,mut):
self.id=id
self._SMILE=SMILE
self.description = self.id + ": "+ self._SMILE
self.mutagen = mut
self.rdkMolecule = Chem.MolFromSmiles(self._SMILE)
#print(SMILE)
def __repr__(self):
return self.description
def __str__(self):
return self.description
def fileExist(self,path):
img = path+self.id+'.'+ Compound.extension
return os.path.isfile(img)
def image(self,path):
img = path+self.id+'.'+Compound.extension
return cv2.imread(str(img))
def input(self, path='',t='image'):
if t == 'image':
return self.image(path),1 if self.mutagen else 0
else:
return self._SMILE,1 if self.mutagen else 0
def InitialiseNeutralisationReactions(self):
patts= (
# Imidazoles
('[n+;H]','n'),
# Amines
('[N+;!H0]','N'),
# Carboxylic acids and alcohols
('[$([O-]);!$([O-][#7])]','O'),
# Thiols
('[S-;X1]','S'),
# Sulfonamides
('[$([N-;X2]S(=O)=O)]','N'),
# Enamines
('[$([N-;X2][C,N]=C)]','N'),
# Tetrazoles
('[n-]','[nH]'),
# Sulfoxides
('[$([S-]=O)]','S'),
# Amides
('[$([N-]C=O)]','N'),
)
return [(Chem.MolFromSmarts(x),Chem.MolFromSmiles(y,False)) for x,y in patts]
reac=None
def NeutraliseCharges(self, reactions=None):
if reactions is None:
if self.reac is None:
self.reac=self.InitialiseNeutralisationReactions()
reactions=self.reac
mol = Chem.MolFromSmiles(self._SMILE)
replaced = False
for i,(reactant, product) in enumerate(reactions):
while mol.HasSubstructMatch(reactant):
replaced = True
rms = AllChem.ReplaceSubstructs(mol, reactant, product)
mol = rms[0]
if replaced:
return (Chem.MolToSmiles(mol,True), True)
else:
return (self._SMILE, False)
| StarcoderdataPython |
1671692 |
from PIL import ImageGrab
import numpy as np
class GetDisplay:
def __init__(self):
self.img = np.zeros([1, 1, 3])
pass
def grabDisplay(self):
self.img = ImageGrab.grab()
pass
def img(self):
return self.img
def getSize(self):
self.grabDisplay()
return self.img.width, self.img.height
| StarcoderdataPython |
1775407 | <reponame>themattrix/discord-caravan
"""Discord Caravan Bot
Usage:
caravan_bot --gyms=JSON [--server-filter=REGEX] [--channel-filter=REGEX]
caravan_bot (-h | --help)
caravan_bot --version
Options:
-h --help Show this screen.
--version Show version.
--gyms=JSON JSON file containing gyms names, coordinates, and
optional aliases.
--server-filter=REGEX Restrict bot to servers matching this pattern
[default: .*].
--channel-filter=REGEX Restrict bot to channels matching this pattern
[default: .*caravan.*].
"""
import os
import logging
import pathlib
import re
import sys
from .log import log
from . import client
from . import places
import docopt
def main():
args = docopt.docopt(__doc__, version='1.0.0')
try:
server_re = re.compile(args['--server-filter'], re.IGNORECASE)
channel_re = re.compile(args['--channel-filter'], re.IGNORECASE)
except re.error as e:
log.critical(
f'The provided regular expression is invalid: {e}')
return 1
try:
client.CaravanClient(
gyms=places.Places.from_json(pathlib.Path(args['--gyms'])),
server_re=server_re,
channel_re=channel_re,
).run(
os.environ['DISCORD_BOT_TOKEN']
)
except KeyError as e:
log.critical(
f'The following environment variable must be set: {e.args[0]}')
return 2
else:
return 0
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
sys.exit(main())
| StarcoderdataPython |
9653517 | # -*- coding: utf-8 -*-
# This file is part of Viper - https://github.com/viper-framework/viper
# See the file 'LICENSE' for copying permission.
import os
import time
import shutil
from os.path import expanduser
from viper.common.abstracts import Command
from viper.common.colors import bold
from viper.core.database import Database
from viper.core.session import __sessions__
from viper.core.project import __project__
from viper.core.config import __config__
class Projects(Command):
"""
This command retrieves a list of all projects.
You can also switch to a different project.
"""
cmd = "projects"
description = "List or switch existing projects"
def __init__(self):
super(Projects, self).__init__()
group = self.parser.add_mutually_exclusive_group()
group.add_argument('-l', '--list', action='store_true', help="List all existing projects")
group.add_argument('-s', '--switch', metavar='PROJECT NAME', help="Switch to the specified project")
group.add_argument('-c', '--close', action='store_true', help="Close the currently opened project")
group.add_argument('-d', '--delete', metavar='PROJECT NAME', help="Delete the specified project")
def run(self, *args):
try:
args = self.parser.parse_args(args)
except SystemExit:
return
if __config__.get('paths').storage_path:
base_path = __config__.get('paths').storage_path
else:
base_path = os.path.join(expanduser("~"), '.viper')
projects_path = os.path.join(base_path, 'projects')
if args.list:
if not os.path.exists(projects_path):
self.log('info', "The projects directory does not exist yet")
return
self.log('info', "Projects Available:")
rows = []
for project in os.listdir(projects_path):
project_path = os.path.join(projects_path, project)
if os.path.isdir(project_path):
current = ''
if __project__.name and project == __project__.name:
current = 'Yes'
rows.append([project, time.ctime(os.path.getctime(project_path)), current])
self.log('table', dict(header=['Project Name', 'Creation Time', 'Current'], rows=rows))
elif args.switch:
if __sessions__.is_set():
__sessions__.close()
self.log('info', "Closed opened session")
__project__.open(args.switch)
self.log('info', "Switched to project {0}".format(bold(args.switch)))
# Need to re-initialize the Database to open the new SQLite file.
Database().__init__()
elif args.close:
if __project__.name != "default":
if __sessions__.is_set():
__sessions__.close()
__project__.close()
elif args.delete:
project_to_delete = args.delete
if project_to_delete == "default":
self.log('error', "You can't delete the \"default\" project")
return
# If it's the currently opened project, we close it.
if project_to_delete == __project__.name:
# We close any opened session.
if __sessions__.is_set():
__sessions__.close()
__project__.close()
project_path = os.path.join(projects_path, project_to_delete)
if not os.path.exists(project_path):
self.log('error', "The folder for project \"{}\" does not seem to exist".format(project_to_delete))
return
self.log('info', "You asked to delete project with name \"{}\" located at \"{}\"".format(project_to_delete, project_path))
confirm = input("Are you sure you want to delete the project? You will permanently delete all associated files! [y/N] ")
if confirm.lower() != 'y':
return
try:
shutil.rmtree(project_path)
except Exception as e:
self.log('error', "Something failed while trying to delete folder: {}".format(e))
return
self.log('info', "Project \"{}\" was delete successfully".format(project_to_delete))
else:
self.log('info', self.parser.print_usage())
| StarcoderdataPython |
3524070 | from feincms.module.page.models import Page
from feincms.content.raw.models import RawContent
from feincms_bounds.models import Template
Page.register_templates(
Template(
key='internalpage',
title='Internal Page',
path='pages/internal.html',
regions=(
('main', 'Main Content'),
('sidebar', 'Sidebar'),
)
), Template(
key='homepage',
title='Home Page',
path='pages/home_page.html',
regions=(
('main', 'Main Content'),
),
unique=True,
first_level_only=True,
no_children=True
)
)
Page.create_content_type(RawContent)
| StarcoderdataPython |
3219959 | from build.management.commands.update_alignments import Command as UpdateAlignments
class Command(UpdateAlignments):
pass | StarcoderdataPython |
1861053 | <gh_stars>0
import logging
import uuid
from pathlib import Path
from datetime import datetime
from queue import Queue
from threading import Event
import sounddevice as sd
import soundfile as sf
import sqlite_utils as su
import numpy as np
from .settings import settings
logger = logging.getLogger('panauricon.recorder')
def start_recording():
"""
Begin recording audio using existing settings.
"""
device = _get_recording_device()
soundfile_kwargs = _get_soundfile_kwargs(device)
samplerate = soundfile_kwargs['samplerate']
queue = Queue()
context = {'silence': 0}
def callback(indata, frame_count, time_info, status):
if status:
logger.info(f"Status in callback: {status}")
# Mover el procesamiento aqui en lugar del loop abajo
data = _process_block(indata.copy(), context)
if data is not None:
queue.put(data)
with sd.InputStream(**settings.recorder, channels=1, callback=callback):
now = datetime.utcnow()
path = _get_recording_path(now)
while True:
id, filename = _get_uuid_filename(now)
_insert_db_recording(id, path, filename, now, samplerate)
try:
with sf.SoundFile(path / filename, mode='w', **soundfile_kwargs) as f:
while True:
now = datetime.utcnow()
if queue:
f.write(queue.get())
if (nextpath := _get_recording_path(now)) != path:
logger.info("Expired pathlist restart.")
path = nextpath
break
finally:
logger.info(f"Closed soundfile.")
def start_playback(start):
"""
Begin recording audio using existing settings.
"""
buffersize = int(settings.playback.buffersize or 20)
blocksize = int(settings.playback.blocksize or 2048)
device = _get_playback_device()
hostapi = sd.query_hostapis(device['hostapi'])
count = 0
try:
logger.info(f"Looking up playback fragments.")
for r in _select_db_recordings_after(start):
logger.info(f'Begin playback for {r["id"]}.')
count += 1
path = Path(r['path']) / r['filename']
logger.info(f"Playback file: {str(path)}")
_playback_fragment(path, device, hostapi, buffersize, blocksize)
finally:
logger.info(f"Played {count} recordings.")
def _playback_fragment(path, device, hostapi, buffersize, blocksize):
queue = Queue(maxsize=buffersize)
event = Event()
device_name = f"{device['name']} {hostapi['name']}"
def callback(outdata, frames, time, status):
assert frames == blocksize
if status.output_underflow:
logger.warning('Output underflow: increase blocksize?')
raise sd.CallbackAbort
assert not status
try:
data = queue.get_nowait()
except queue.Empty as e:
logger.warning('Buffer is empty: increase buffersize?')
raise sd.CallbackAbort from e
if len(data) < len(outdata):
outdata[:len(data)] = data
outdata[len(data):].fill(0)
raise sd.CallbackStop
else:
outdata[:] = data
with sf.SoundFile(path, mode='r') as f:
for _ in range(buffersize):
data = f.read(blocksize, always_2d=True)
if not len(data):
break
queue.put_nowait(data) # Pre-fill queue
stream = sd.OutputStream(
samplerate=f.samplerate, blocksize=blocksize,
channels=f.channels, device=device_name,
callback=callback, finished_callback=event.set)
with stream:
timeout = blocksize * buffersize / f.samplerate
while len(data):
data = f.read(blocksize, always_2d=True)
queue.put(data, timeout=timeout)
def _process_block(data, context):
"""
Return the processed audio block or None.
Raises StopIteration when the recording must restart.
Context is used to hold state between blocks.
"""
return data
def _get_recording_path(now: datetime):
"""
Calculate the path and filename for the next sound file.
"""
base_path = Path(settings.base_path or '.')
path = base_path / settings.soundfile.root
if not path.exists():
path.mkdir()
logger.info("Recordings path missing, created.")
for part in settings.soundfile.pathlist:
path = path / now.strftime(part)
if not path.exists():
path.mkdir()
logger.info(f"New recording subfolder: {str(path)}")
return path
def _get_uuid_filename(now: datetime):
"""
Calculate the filename for the sound file.
"""
prefix = now.strftime(settings.soundfile.prefix)
id = uuid.uuid4()
extension = settings.soundfile.format
filename = f"{prefix}_{id}.{extension}"
return id, filename
def __get_device(kind):
_, portaudio_version = sd.get_portaudio_version()
logger.debug(f"{portaudio_version}")
device = sd.query_devices(device=settings.recorder.device, kind=kind)
return device
def _get_recording_device():
"""
Determine which device to use for the recording.
"""
device = __get_device('input')
logger.info(f"Using device='{device['name']}' for recording.")
return device
def _get_playback_device():
"""
Determine which device to use for the recording.
"""
device = __get_device('output')
logger.info(f"Using device='{device['name']}' for playback.")
return device
def _get_soundfile_kwargs(device):
"""
Return the soundfile options required by device.
"""
assert device
samplerate = int(device['default_samplerate'])
return {
'channels': 1,
'samplerate': samplerate
}
# Database configuration
def _sql_tracer(sql, params):
logger.debug("SQL: {} - params: {}".format(sql, params))
database = su.Database(settings.database, tracer=_sql_tracer)
def _insert_db_recording(id, path, filename, now, samplerate):
database["recording"].insert({
"id": id,
"path": str(path),
"filename": filename,
"start": now.strftime(r'%Y%m%d%H%M%S'),
"format": settings.soundfile.format,
"samplerate": samplerate,
"app_version": settings.version,
}, pk='id')
def _select_db_recordings_after(start):
start = start.strftime(r'%Y%m%d%H%M%S')
yield from database.query(
"select * from recording where start >= :start",
{'start': start}
)
| StarcoderdataPython |
8033038 | from .data_warehouse import DataWarehouse
from .dw_table import DWTable
from .dw_sql import DWSql
| StarcoderdataPython |
1817720 | # -*- coding:utf-8 -*-
from __future__ import print_function, absolute_import, division
| StarcoderdataPython |
24185 | import glob
import os
import os.path
import sys
import shutil
import cPickle
from types import StringType, UnicodeType
from distutils.core import setup
from distutils.extension import Extension
from distutils.command.install import install
PY3K = sys.version_info[0] > 2
with open('README.rst') as inp:
long_description = inp.read()
__version__ = ''
inp = open('prody/__init__.py')
for line in inp:
if (line.startswith('__version__')):
exec(line.strip())
break
inp.close()
def isInstalled(module_name):
"""Check if a required package is installed, by trying to import it."""
try:
return __import__(module_name)
except ImportError:
return False
else:
return True
if not isInstalled('numpy'):
print("""NumPy is not installed. This package is required for main ProDy
features and needs to be installed before you can use ProDy.
You can find NumPy at: http://numpy.scipy.org""")
PACKAGES = ['prody', 'prody.atomic', 'prody.dynamics', 'prody.ensemble',
'prody.measure', 'prody.proteins', 'prody.trajectory',
'prody.routines', 'prody.utilities']
PACKAGE_DATA = {}
if sys.version_info[:2] > (2,6):
PACKAGES.extend(['prody.tests', 'prody.tests.test_kdtree',
'prody.tests.test_measure'])
PACKAGE_DATA['prody.tests'] = ['data/pdb*.pdb', 'data/*.dat',
'data/*.coo', 'data/dcd*.dcd']
EXTENSIONS = []
if os.name != 'java' and sys.version_info[0] == 2:
pairwise2 = ['cpairwise2.c', 'pairwise2.py']
if all([os.path.isfile(os.path.join('prody', 'proteins', fn))
for fn in pairwise2]):
EXTENSIONS.append(
Extension('prody.proteins.cpairwise2',
['prody/proteins/cpairwise2.c'],
include_dirs=["prody"]
))
else:
raise Exception('one or more pairwise2 module files are missing')
if isInstalled('numpy'):
import numpy
kdtree_files = ['__init__.py', 'KDTree.c', 'KDTree.h',
'KDTreemodule.c', 'Neighbor.h', 'kdtree.py']
if all([os.path.isfile(os.path.join('prody', 'kdtree', fn))
for fn in kdtree_files]):
EXTENSIONS.append(
Extension('prody.kdtree._CKDTree',
['prody/kdtree/KDTree.c',
'prody/kdtree/KDTreemodule.c'],
include_dirs=[numpy.get_include()],
))
else:
raise Exception('one or more kdtree module files are missing')
PACKAGES.append('prody.kdtree')
elif isInstalled('numpy'):
raise ImportError('numpy is not installed')
SCRIPTS = ['scripts/prody']
setup(
name='ProDy',
version=__version__,
author='<NAME>',
author_email='ahb12 at pitt dot edu',
description='A Python Package for Protein Dynamics Analysis',
long_description=long_description,
url='http://www.csb.pitt.edu/ProDy',
packages=PACKAGES,
package_data=PACKAGE_DATA,
ext_modules=EXTENSIONS,
license='GPLv3',
keywords=('protein, dynamics, elastic network model, '
'Gaussian network model, anisotropic network model, '
'essential dynamics analysis, principal component analysis, '
'Protein Data Bank, PDB, GNM, ANM, PCA'),
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Chemistry',
],
scripts=SCRIPTS,
requires=['NumPy', ],
provides=['ProDy({0:s})'.format(__version__)]
)
| StarcoderdataPython |
7144 | from sqlalchemy import create_engine, Column, Integer, String, ForeignKey
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship
# TODO: db_uri
# dialect+driver://username:password@host:port/database?charset=utf8
DB_URI = 'mysql+pymysql://root:root123@127.0.0.1:3300/first_sqlalchemy?charset=utf8'
engine = create_engine(DB_URI)
Base = declarative_base(bind=engine)
session = sessionmaker(bind=engine)()
# TODO: 定义User模型
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(50), nullable=False)
def __repr__(self):
return '<User(id={id}, name={name})>'.format(id=self.id, name=self.name)
# TODO: 创建Article模型
class Article(Base):
__tablename__ = 'article'
id = Column(Integer, primary_key=True, autoincrement=True)
title = Column(String(50), nullable=False)
# TODO: 外键约束
uid = Column(Integer, ForeignKey('user.id'), nullable=False)
authors = relationship('User', backref='articles')
# TODO: 删除数据库
# Base.metadata.drop_all()
# TODO: 创建数据库
# Base.metadata.create_all()
#
# user = User(name='zhiliao')
# article1 = Article(title='python')
# article2 = Article(title='flask')
#
# user.articles.append(article1)
# user.articles.append(article2)
# TODO: 提交数据
# session.add(user)
# session.commit()
# TODO: 1.session.delete进行删除,不指定`nullable=False`
# TODO: 2.session.delete进行删除,指定`nullable=False`,避免删除行为
user = session.query(User).first()
print(user)
session.delete(user)
session.commit()
| StarcoderdataPython |
3370461 | <reponame>jamesrharwood/journal-guidelines
import io
from urllib.parse import urlparse
import pdftotext
from bs4 import BeautifulSoup
def get_text_from_response(response):
return get_text_from_xml(response.text.encode("utf-8"))
def get_text_from_xml(html):
# from https://stackoverflow.com/questions/328356/extracting-text-from-html-file-using-python
soup = BeautifulSoup(html, features="html.parser")
# kill all script and style elements
for script in soup(["script", "style"]):
script.extract() # rip it out
# get text
text = soup.get_text()
# break into lines and remove leading and trailing space on each
lines = (line.strip() for line in text.splitlines())
# break multi-headlines into a line each
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
# drop blank lines
text = "\n".join(chunk for chunk in chunks if chunk)
return text
def get_text_from_pdf(response):
pdf = pdftotext.PDF(io.BytesIO(response.body))
text = "\n\n".join(pdf)
return text
def get_bytes_from_pdf(response):
text = get_text_from_pdf(response)
return text.encode()
def extract_match_from_text(match, text):
buffer = 50
start = max(0, match.start() - buffer)
stop = min(len(text) - 1, match.end())
quote = text[start:stop]
assert len(quote) < 200
return text[start:stop]
def clean_url(url):
url = url.lower()
url = urlparse(url)
url = url._replace(query=None, fragment=None).geturl()
return url
| StarcoderdataPython |
8172173 | # This code is copied and adapted from <NAME>'s code for learning to
# play Pong https://gist.github.com/karpathy/a4166c7fe253700972fcbc77e4ea32c5.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import numpy as np
import os
import ray
import time
import gym
# Define some hyperparameters.
# The number of hidden layer neurons.
H = 200
learning_rate = 1e-4
# Discount factor for reward.
gamma = 0.99
# The decay factor for RMSProp leaky sum of grad^2.
decay_rate = 0.99
# The input dimensionality: 80x80 grid.
D = 80 * 80
def sigmoid(x):
# Sigmoid "squashing" function to interval [0, 1].
return 1.0 / (1.0 + np.exp(-x))
def preprocess(I):
"""Preprocess 210x160x3 uint8 frame into 6400 (80x80) 1D float vector."""
# Crop the image.
I = I[35:195]
# Downsample by factor of 2.
I = I[::2, fdf8:f53e:61e4::18, 0]
# Erase background (background type 1).
I[I == 144] = 0
# Erase background (background type 2).
I[I == 109] = 0
# Set everything else (paddles, ball) to 1.
I[I != 0] = 1
return I.astype(np.float).ravel()
def discount_rewards(r):
"""take 1D float array of rewards and compute discounted reward"""
discounted_r = np.zeros_like(r)
running_add = 0
for t in reversed(range(0, r.size)):
# Reset the sum, since this was a game boundary (pong specific!).
if r[t] != 0:
running_add = 0
running_add = running_add * gamma + r[t]
discounted_r[t] = running_add
return discounted_r
def policy_forward(x, model):
h = np.dot(model["W1"], x)
h[h < 0] = 0 # ReLU nonlinearity.
logp = np.dot(model["W2"], h)
p = sigmoid(logp)
# Return probability of taking action 2, and hidden state.
return p, h
def policy_backward(eph, epx, epdlogp, model):
"""backward pass. (eph is array of intermediate hidden states)"""
dW2 = np.dot(eph.T, epdlogp).ravel()
dh = np.outer(epdlogp, model["W2"])
# Backprop relu.
dh[eph <= 0] = 0
dW1 = np.dot(dh.T, epx)
return {"W1": dW1, "W2": dW2}
@ray.remote
class PongEnv(object):
def __init__(self):
# Tell numpy to only use one core. If we don't do this, each actor may try
# to use all of the cores and the resulting contention may result in no
# speedup over the serial version. Note that if numpy is using OpenBLAS,
# then you need to set OPENBLAS_NUM_THREADS=1, and you probably need to do
# it from the command line (so it happens before numpy is imported).
os.environ["MKL_NUM_THREADS"] = "1"
self.env = gym.make("Pong-v0")
def compute_gradient(self, model):
# Reset the game.
observation = self.env.reset()
# Note that prev_x is used in computing the difference frame.
prev_x = None
xs, hs, dlogps, drs = [], [], [], []
reward_sum = 0
done = False
while not done:
cur_x = preprocess(observation)
x = cur_x - prev_x if prev_x is not None else np.zeros(D)
prev_x = cur_x
aprob, h = policy_forward(x, model)
# Sample an action.
action = 2 if np.random.uniform() < aprob else 3
# The observation.
xs.append(x)
# The hidden state.
hs.append(h)
y = 1 if action == 2 else 0 # A "fake label".
# The gradient that encourages the action that was taken to be taken (see
# http://cs231n.github.io/neural-networks-2/#losses if confused).
dlogps.append(y - aprob)
observation, reward, done, info = self.env.step(action)
reward_sum += reward
# Record reward (has to be done after we call step() to get reward for
# previous action).
drs.append(reward)
epx = np.vstack(xs)
eph = np.vstack(hs)
epdlogp = np.vstack(dlogps)
epr = np.vstack(drs)
# Reset the array memory.
xs, hs, dlogps, drs = [], [], [], []
# Compute the discounted reward backward through time.
discounted_epr = discount_rewards(epr)
# Standardize the rewards to be unit normal (helps control the gradient
# estimator variance).
discounted_epr -= np.mean(discounted_epr)
discounted_epr /= np.std(discounted_epr)
# Modulate the gradient with advantage (the policy gradient magic happens
# right here).
epdlogp *= discounted_epr
return policy_backward(eph, epx, epdlogp, model), reward_sum
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Train an RL agent on Pong.")
parser.add_argument("--batch-size", default=10, type=int,
help="The number of rollouts to do per batch.")
parser.add_argument("--redis-address", default=None, type=str,
help="The Redis address of the cluster.")
args = parser.parse_args()
batch_size = args.batch_size
ray.init(redis_address=args.redis_address, redirect_output=True)
# Run the reinforcement learning.
running_reward = None
batch_num = 1
model = {}
# "Xavier" initialization.
model["W1"] = np.random.randn(H, D) / np.sqrt(D)
model["W2"] = np.random.randn(H) / np.sqrt(H)
# Update buffers that add up gradients over a batch.
grad_buffer = {k: np.zeros_like(v) for k, v in model.items()}
# Update the rmsprop memory.
rmsprop_cache = {k: np.zeros_like(v) for k, v in model.items()}
actors = [PongEnv.remote() for _ in range(batch_size)]
while True:
model_id = ray.put(model)
actions = []
# Launch tasks to compute gradients from multiple rollouts in parallel.
start_time = time.time()
for i in range(batch_size):
action_id = actors[i].compute_gradient.remote(model_id)
actions.append(action_id)
for i in range(batch_size):
action_id, actions = ray.wait(actions)
grad, reward_sum = ray.get(action_id[0])
# Accumulate the gradient over batch.
for k in model:
grad_buffer[k] += grad[k]
running_reward = (reward_sum if running_reward is None
else running_reward * 0.99 + reward_sum * 0.01)
end_time = time.time()
print("Batch {} computed {} rollouts in {} seconds, "
"running mean is {}".format(batch_num, batch_size,
end_time - start_time, running_reward))
for k, v in model.items():
g = grad_buffer[k]
rmsprop_cache[k] = (decay_rate * rmsprop_cache[k] +
(1 - decay_rate) * g ** 2)
model[k] += learning_rate * g / (np.sqrt(rmsprop_cache[k]) + 1e-5)
# Reset the batch gradient buffer.
grad_buffer[k] = np.zeros_like(v)
batch_num += 1
| StarcoderdataPython |
4870595 | <filename>cartografo/__init__.py
CONFIG_MAP_KIND = 'ConfigMap'
SECRET_KIND = 'Secret'
DEFAULT_OBJECT = CONFIG_MAP_KIND
DEFAULT_TARGET = 'new.yaml'
DEFAULT_CONFIG_MAP = {'kind': 'ConfigMap', 'apiVersion': 'v1', 'data': {}, 'metadata': {'name': 'type the name'}}
DEFAULT_SECRET = {'kind': 'Secret', 'apiVersion': 'v1', 'data': {}, 'metadata': {'name': 'type the name'}} | StarcoderdataPython |
136519 | """
Trains a model, acording to the experiment file input
The arguments are loaded from a .yaml file, which is the input argument of this script
(Instructions to run: `python train_model.py <path to .yaml file>`)
"""
import os
import sys
import time
import logging
import yaml
from bff_positioning.data import Preprocessor, PathCreator, create_noisy_features, undersample_bf,\
undersample_space, get_95th_percentile, sample_paths
from bff_positioning.models import CNN, LSTM, TCN
def main():
"""Main block of code, which runs the training"""
start = time.time()
logging.basicConfig(level="INFO")
# Load the .yaml data and unpacks it
assert len(sys.argv) == 2, "Exactly one experiment configuration file must be "\
"passed as a positional argument to this script. \n\n"\
"E.g. `python run_non_tracking_experiment.py <path to .yaml file>`"
with open(sys.argv[1], "r") as yaml_config_file:
logging.info("Loading simulation settings from %s", sys.argv[1])
experiment_config = yaml.load(yaml_config_file)
experiment_settings = experiment_config['experiment_settings']
data_parameters = experiment_config['data_parameters']
ml_parameters = experiment_config['ml_parameters']
path_parameters = experiment_config['path_parameters'] \
if 'path_parameters' in experiment_config else None
# Loads the raw dataset
logging.info("Loading the dataset...")
data_preprocessor = Preprocessor(data_parameters)
features, labels = data_preprocessor.load_dataset()
if path_parameters:
path_creator = PathCreator(data_parameters, path_parameters, labels)
paths = path_creator.load_paths()
# Undersamples the dataset (if requested)
if "undersample_bf" in experiment_settings and experiment_settings["undersample_bf"]:
features = undersample_bf(features, data_parameters["beamformings"])
if "undersample_space" in experiment_settings:
assert not path_parameters, "This option is not supported for tracking experiments, "\
"unless the code for the path creation is updated"
features, labels = undersample_space(features, labels, data_parameters["undersample_space"])
# Initializes the model and prepares it for training
logging.info("Initializing the model (type = %s)...", experiment_settings["model_type"].lower())
if experiment_settings["model_type"].lower() == "cnn":
ml_parameters["input_type"] = "float"
model = CNN(ml_parameters)
elif experiment_settings["model_type"].lower() in ("lstm", "tcn"):
assert path_parameters, "This model requires `paths_parameters`. See the example."
assert path_parameters["time_steps"] == ml_parameters["input_shape"][0], "The ML model "\
"first input dimention must match the length of the paths! (path length = {}, model)"\
"input = {})".format(path_parameters["time_steps"], ml_parameters["input_shape"][0])
ml_parameters["input_type"] = "bool"
if experiment_settings["model_type"].lower() == "lstm":
model = LSTM(ml_parameters)
else:
model = TCN(ml_parameters)
else:
raise ValueError("The simulation settings specified 'model_type'={}. Currently, only "
"'cnn', 'lstm', and 'tcn' are supported.".format(experiment_settings["model_type"]))
model.set_graph()
# Creates the validation set
logging.info("Creating validation set...")
if path_parameters:
features_val, labels_val, _ = sample_paths(
paths["validation"],
features,
labels,
experiment_settings,
data_parameters,
path_parameters,
)
else:
features_val, labels_val = create_noisy_features(
features,
labels,
experiment_settings,
data_parameters,
)
# Runs the training loop
logging.info("\nStaring the training loop!\n")
keep_training = True
while keep_training:
logging.info("Creating noisy set for this epoch...")
if path_parameters:
features_train, labels_train, _ = sample_paths(
paths["train"],
features,
labels,
experiment_settings,
data_parameters,
path_parameters,
sample_fraction=experiment_settings["train_sample_fraction"]
)
else:
features_train, labels_train = create_noisy_features(
features,
labels,
experiment_settings,
data_parameters,
)
model.train_epoch(features_train, labels_train)
predictions_val = model.predict(features_val, validation=True)
keep_training, val_avg_dist = model.epoch_end(labels_val, predictions_val)
if predictions_val is not None:
# Upscales the validation score back to the original scale and gets the 95th percentile
val_avg_dist *= data_parameters["pos_grid"][0]
val_95_perc = get_95th_percentile(
labels_val,
predictions_val,
rescale_factor=data_parameters["pos_grid"][0]
)
logging.info("Current avg val. distance: %.5f m || 95th percentile: %.5f m\n",
val_avg_dist, val_95_perc)
# Store the trained model and cleans up
logging.info("Saving and closing model.")
experiment_name = os.path.basename(sys.argv[1]).split('.')[0]
model.save(model_name=experiment_name)
model.close()
# Prints elapsed time
end = time.time()
exec_time = (end-start)
logging.info("Total execution time: %.5E seconds", exec_time)
if __name__ == '__main__':
main()
| StarcoderdataPython |
12831358 | class TranslateFilePolicy():
allowed_formats = ['txt']
def is_allowed(self, filename):
split = filename.split('.')
format_ = split[1]
return (format_ in self.allowed_formats)
| StarcoderdataPython |
1922386 | <filename>plugins/poclbm/HttpTransport.py<gh_stars>1-10
from Transport import Transport
from base64 import b64encode
from json import dumps, loads
from log import *
from sha256 import *
from threading import Thread
from time import sleep, time
from urlparse import urlsplit
from util import *
import httplib
import traceback
class NotAuthorized(Exception): pass
class RPCError(Exception): pass
class HttpTransport(Transport):
def __init__(self, miner):
self.connection = self.lp_connection = None
super(HttpTransport, self).__init__(miner)
self.timeout = 5
self.long_poll_timeout = 3600
self.long_poll_max_askrate = 60 - self.timeout
self.max_redirects = 3
self.postdata = {'method': 'getwork', 'id': 'json'}
self.long_poll_active = False
self.long_poll_url = ''
def loop(self):
self.should_stop = False
thread = Thread(target=self.long_poll_thread)
thread.daemon = True
thread.start()
while True:
if self.should_stop: return
try:
with self.lock:
update = self.update = (self.update or (time() - self.last_work) > if_else(self.long_poll_active, self.long_poll_max_askrate, self.config.askrate))
if update:
work = self.getwork()
if self.update:
self.queue_work(work)
while not self.result_queue.empty():
result = self.result_queue.get(False)
with self.lock:
rv = self.send(result)
sleep(1)
except Exception:
say_line("Unexpected error:")
traceback.print_exc()
def connect(self, proto, host, timeout):
if proto == 'https': connector = httplib.HTTPSConnection
else: connector = httplib.HTTPConnection
return connector(host, strict=True, timeout=timeout)
def request(self, connection, url, headers, data=None):
result = response = None
try:
if data: connection.request('POST', url, data, headers)
else: connection.request('GET', url, headers=headers)
response = connection.getresponse()
if response.status == httplib.UNAUTHORIZED: raise NotAuthorized()
r = self.max_redirects
while response.status == httplib.TEMPORARY_REDIRECT:
response.read()
url = response.getheader('Location', '')
if r == 0 or url == '': raise HTTPException('Too much or bad redirects')
connection.request('GET', url, headers=headers)
response = connection.getresponse();
r -= 1
self.long_poll_url = response.getheader('X-Long-Polling', '')
self.miner.update_time = bool(response.getheader('X-Roll-NTime', ''))
hostList = response.getheader('X-Host-List', '')
if (not self.config.nsf) and hostList: self.add_servers(loads(hostList))
result = loads(response.read())
if result['error']: raise RPCError(result['error']['message'])
return (connection, result)
finally:
if not result or not response or (response.version == 10 and response.getheader('connection', '') != 'keep-alive') or response.getheader('connection', '') == 'close':
connection.close()
connection = None
def getwork(self, data=None):
save_server = None
try:
if self.server != self.servers[0] and self.config.failback > 0:
if self.failback_getwork_count >= self.config.failback:
save_server = self.server
say_line("Attempting to fail back to primary server")
self.set_server(self.servers[0])
self.failback_getwork_count += 1
if not self.connection:
self.connection = self.connect(self.proto, self.host, self.timeout)
self.postdata['params'] = if_else(data, [data], [])
(self.connection, result) = self.request(self.connection, '/', self.headers, dumps(self.postdata))
self.errors = 0
if self.server == self.servers[0]:
self.backup_server_index = 1
self.failback_getwork_count = 0
self.failback_attempt_count = 0
return result['result']
except NotAuthorized:
self.failure('Wrong username or password')
except RPCError as e:
say('%s', e)
except (IOError, httplib.HTTPException, ValueError):
if save_server:
self.failback_attempt_count += 1
self.set_server(save_server)
say_line('Still unable to reconnect to primary server (attempt %s), failing over', self.failback_attempt_count)
self.failback_getwork_count = 0
return
say('Problems communicating with bitcoin RPC %s %s', (self.errors, self.config.tolerance))
self.errors += 1
if self.errors > self.config.tolerance + 1:
self.errors = 0
if self.backup_server_index >= len(self.servers):
say_line("No more backup pools left. Using primary and starting over.")
pool = self.servers[0]
self.backup_server_index = 1
else:
pool = self.servers[self.backup_server_index]
self.backup_server_index += 1
self.set_server(pool)
def send_internal(self, result, nonce):
data = ''.join([result.header.encode('hex'), pack('III', long(result.time), long(result.difficulty), long(nonce)).encode('hex'), '000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000'])
accepted = self.getwork(data)
if accepted != None:
self.report(nonce, accepted)
def long_poll_thread(self):
last_host = None
while True:
sleep(1)
url = self.long_poll_url
if url != '':
proto = self.proto
host = self.host
parsedUrl = urlsplit(url)
if parsedUrl.scheme != '':
proto = parsedUrl.scheme
if parsedUrl.netloc != '':
host = parsedUrl.netloc
url = url[url.find(host) + len(host):]
if url == '': url = '/'
try:
if host != last_host: self.close_lp_connection()
if not self.lp_connection:
self.lp_connection = self.connect(proto, host, self.long_poll_timeout)
say_line("LP connected to %s", self.server[4])
last_host = host
self.long_poll_active = True
(self.lp_connection, result) = self.request(self.lp_connection, url, self.headers)
self.long_poll_active = False
if self.should_stop:
return
self.queue_work(result['result'])
if self.config.verbose:
say_line('long poll: new block %s%s', (result['result']['data'][56:64], result['result']['data'][48:56]))
except NotAuthorized:
say_line('long poll: Wrong username or password')
except RPCError as e:
say_line('long poll: %s', e)
except (IOError, httplib.HTTPException, ValueError):
say_line('long poll: IO error')
#traceback.print_exc()
self.close_lp_connection()
def stop(self):
self.should_stop = True
self.close_lp_connection()
def set_server(self, server):
super(HttpTransport, self).set_server(server)
user, pwd = server[1:3]
self.headers = {"User-Agent": self.user_agent, "Authorization": 'Basic ' + b64encode('%s:%s' % (user, pwd))}
self.long_poll_url = ''
if self.connection:
self.connection.close()
self.connection = None
self.close_lp_connection()
def close_lp_connection(self):
if self.lp_connection:
self.lp_connection.close()
self.lp_connection = None
def decode(self, work):
if work:
job = Object()
if not 'target' in work:
work['target'] = 'ffffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000'
binary_data = work['data'].decode('hex')
data0 = np.zeros(64, np.uint32)
data0 = np.insert(data0, [0] * 16, unpack('IIIIIIIIIIIIIIII', binary_data[:64]))
job.target = np.array(unpack('IIIIIIII', work['target'].decode('hex')), dtype=np.uint32)
job.header = binary_data[:68]
job.merkle_end = np.uint32(unpack('I', binary_data[64:68])[0])
job.time = np.uint32(unpack('I', binary_data[68:72])[0])
job.difficulty = np.uint32(unpack('I', binary_data[72:76])[0])
job.state = sha256(STATE, data0)
job.f = np.zeros(8, np.uint32)
job.state2 = partial(job.state, job.merkle_end, job.time, job.difficulty, job.f)
job.targetQ = 2**256 / int(''.join(list(chunks(work['target'], 2))[::-1]), 16)
calculateF(job.state, job.merkle_end, job.time, job.difficulty, job.f, job.state2)
return job | StarcoderdataPython |
9780333 | # Copyright 2017 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains base classes used to parse and convert arguments.
Do NOT import this module directly. Import the flags package and use the
aliases defined at the package level instead.
"""
import collections
import csv
import io
import string
from absl.flags import _helpers
def _is_integer_type(instance):
"""Returns True if instance is an integer, and not a bool."""
return (isinstance(instance, int) and
not isinstance(instance, bool))
class _ArgumentParserCache(type):
"""Metaclass used to cache and share argument parsers among flags."""
_instances = {}
def __call__(cls, *args, **kwargs):
"""Returns an instance of the argument parser cls.
This method overrides behavior of the __new__ methods in
all subclasses of ArgumentParser (inclusive). If an instance
for cls with the same set of arguments exists, this instance is
returned, otherwise a new instance is created.
If any keyword arguments are defined, or the values in args
are not hashable, this method always returns a new instance of
cls.
Args:
*args: Positional initializer arguments.
**kwargs: Initializer keyword arguments.
Returns:
An instance of cls, shared or new.
"""
if kwargs:
return type.__call__(cls, *args, **kwargs)
else:
instances = cls._instances
key = (cls,) + tuple(args)
try:
return instances[key]
except KeyError:
# No cache entry for key exists, create a new one.
return instances.setdefault(key, type.__call__(cls, *args))
except TypeError:
# An object in args cannot be hashed, always return
# a new instance.
return type.__call__(cls, *args)
# NOTE about Genericity and Metaclass of ArgumentParser.
# (1) In the .py source (this file)
# - is not declared as Generic
# - has _ArgumentParserCache as a metaclass
# (2) In the .pyi source (type stub)
# - is declared as Generic
# - doesn't have a metaclass
# The reason we need this is due to Generic having a different metaclass
# (for python versions <= 3.7) and a class can have only one metaclass.
#
# * Lack of metaclass in .pyi is not a deal breaker, since the metaclass
# doesn't affect any type information. Also type checkers can check the type
# parameters.
# * However, not declaring ArgumentParser as Generic in the source affects
# runtime annotation processing. In particular this means, subclasses should
# inherit from `ArgumentParser` and not `ArgumentParser[SomeType]`.
# The corresponding DEFINE_someType method (the public API) can be annotated
# to return FlagHolder[SomeType].
class ArgumentParser(metaclass=_ArgumentParserCache):
"""Base class used to parse and convert arguments.
The parse() method checks to make sure that the string argument is a
legal value and convert it to a native type. If the value cannot be
converted, it should throw a 'ValueError' exception with a human
readable explanation of why the value is illegal.
Subclasses should also define a syntactic_help string which may be
presented to the user to describe the form of the legal values.
Argument parser classes must be stateless, since instances are cached
and shared between flags. Initializer arguments are allowed, but all
member variables must be derived from initializer arguments only.
"""
syntactic_help = ''
def parse(self, argument):
"""Parses the string argument and returns the native value.
By default it returns its argument unmodified.
Args:
argument: string argument passed in the commandline.
Raises:
ValueError: Raised when it fails to parse the argument.
TypeError: Raised when the argument has the wrong type.
Returns:
The parsed value in native type.
"""
if not isinstance(argument, str):
raise TypeError('flag value must be a string, found "{}"'.format(
type(argument)))
return argument
def flag_type(self):
"""Returns a string representing the type of the flag."""
return 'string'
def _custom_xml_dom_elements(self, doc):
"""Returns a list of minidom.Element to add additional flag information.
Args:
doc: minidom.Document, the DOM document it should create nodes from.
"""
del doc # Unused.
return []
class ArgumentSerializer(object):
"""Base class for generating string representations of a flag value."""
def serialize(self, value):
"""Returns a serialized string of the value."""
return _helpers.str_or_unicode(value)
class NumericParser(ArgumentParser):
"""Parser of numeric values.
Parsed value may be bounded to a given upper and lower bound.
"""
def is_outside_bounds(self, val):
"""Returns whether the value is outside the bounds or not."""
return ((self.lower_bound is not None and val < self.lower_bound) or
(self.upper_bound is not None and val > self.upper_bound))
def parse(self, argument):
"""See base class."""
val = self.convert(argument)
if self.is_outside_bounds(val):
raise ValueError('%s is not %s' % (val, self.syntactic_help))
return val
def _custom_xml_dom_elements(self, doc):
elements = []
if self.lower_bound is not None:
elements.append(_helpers.create_xml_dom_element(
doc, 'lower_bound', self.lower_bound))
if self.upper_bound is not None:
elements.append(_helpers.create_xml_dom_element(
doc, 'upper_bound', self.upper_bound))
return elements
def convert(self, argument):
"""Returns the correct numeric value of argument.
Subclass must implement this method, and raise TypeError if argument is not
string or has the right numeric type.
Args:
argument: string argument passed in the commandline, or the numeric type.
Raises:
TypeError: Raised when argument is not a string or the right numeric type.
ValueError: Raised when failed to convert argument to the numeric value.
"""
raise NotImplementedError
class FloatParser(NumericParser):
"""Parser of floating point values.
Parsed value may be bounded to a given upper and lower bound.
"""
number_article = 'a'
number_name = 'number'
syntactic_help = ' '.join((number_article, number_name))
def __init__(self, lower_bound=None, upper_bound=None):
super(FloatParser, self).__init__()
self.lower_bound = lower_bound
self.upper_bound = upper_bound
sh = self.syntactic_help
if lower_bound is not None and upper_bound is not None:
sh = ('%s in the range [%s, %s]' % (sh, lower_bound, upper_bound))
elif lower_bound == 0:
sh = 'a non-negative %s' % self.number_name
elif upper_bound == 0:
sh = 'a non-positive %s' % self.number_name
elif upper_bound is not None:
sh = '%s <= %s' % (self.number_name, upper_bound)
elif lower_bound is not None:
sh = '%s >= %s' % (self.number_name, lower_bound)
self.syntactic_help = sh
def convert(self, argument):
"""Returns the float value of argument."""
if (_is_integer_type(argument) or isinstance(argument, float) or
isinstance(argument, str)):
return float(argument)
else:
raise TypeError(
'Expect argument to be a string, int, or float, found {}'.format(
type(argument)))
def flag_type(self):
"""See base class."""
return 'float'
class IntegerParser(NumericParser):
"""Parser of an integer value.
Parsed value may be bounded to a given upper and lower bound.
"""
number_article = 'an'
number_name = 'integer'
syntactic_help = ' '.join((number_article, number_name))
def __init__(self, lower_bound=None, upper_bound=None):
super(IntegerParser, self).__init__()
self.lower_bound = lower_bound
self.upper_bound = upper_bound
sh = self.syntactic_help
if lower_bound is not None and upper_bound is not None:
sh = ('%s in the range [%s, %s]' % (sh, lower_bound, upper_bound))
elif lower_bound == 1:
sh = 'a positive %s' % self.number_name
elif upper_bound == -1:
sh = 'a negative %s' % self.number_name
elif lower_bound == 0:
sh = 'a non-negative %s' % self.number_name
elif upper_bound == 0:
sh = 'a non-positive %s' % self.number_name
elif upper_bound is not None:
sh = '%s <= %s' % (self.number_name, upper_bound)
elif lower_bound is not None:
sh = '%s >= %s' % (self.number_name, lower_bound)
self.syntactic_help = sh
def convert(self, argument):
"""Returns the int value of argument."""
if _is_integer_type(argument):
return argument
elif isinstance(argument, str):
base = 10
if len(argument) > 2 and argument[0] == '0':
if argument[1] == 'o':
base = 8
elif argument[1] == 'x':
base = 16
return int(argument, base)
else:
raise TypeError('Expect argument to be a string or int, found {}'.format(
type(argument)))
def flag_type(self):
"""See base class."""
return 'int'
class BooleanParser(ArgumentParser):
"""Parser of boolean values."""
def parse(self, argument):
"""See base class."""
if isinstance(argument, str):
if argument.lower() in ('true', 't', '1'):
return True
elif argument.lower() in ('false', 'f', '0'):
return False
else:
raise ValueError('Non-boolean argument to boolean flag', argument)
elif isinstance(argument, int):
# Only allow bool or integer 0, 1.
# Note that float 1.0 == True, 0.0 == False.
bool_value = bool(argument)
if argument == bool_value:
return bool_value
else:
raise ValueError('Non-boolean argument to boolean flag', argument)
raise TypeError('Non-boolean argument to boolean flag', argument)
def flag_type(self):
"""See base class."""
return 'bool'
class EnumParser(ArgumentParser):
"""Parser of a string enum value (a string value from a given set)."""
def __init__(self, enum_values, case_sensitive=True):
"""Initializes EnumParser.
Args:
enum_values: [str], a non-empty list of string values in the enum.
case_sensitive: bool, whether or not the enum is to be case-sensitive.
Raises:
ValueError: When enum_values is empty.
"""
if not enum_values:
raise ValueError(
'enum_values cannot be empty, found "{}"'.format(enum_values))
super(EnumParser, self).__init__()
self.enum_values = enum_values
self.case_sensitive = case_sensitive
def parse(self, argument):
"""Determines validity of argument and returns the correct element of enum.
Args:
argument: str, the supplied flag value.
Returns:
The first matching element from enum_values.
Raises:
ValueError: Raised when argument didn't match anything in enum.
"""
if self.case_sensitive:
if argument not in self.enum_values:
raise ValueError('value should be one of <%s>' %
'|'.join(self.enum_values))
else:
return argument
else:
if argument.upper() not in [value.upper() for value in self.enum_values]:
raise ValueError('value should be one of <%s>' %
'|'.join(self.enum_values))
else:
return [value for value in self.enum_values
if value.upper() == argument.upper()][0]
def flag_type(self):
"""See base class."""
return 'string enum'
class EnumClassParser(ArgumentParser):
"""Parser of an Enum class member."""
def __init__(self, enum_class, case_sensitive=True):
"""Initializes EnumParser.
Args:
enum_class: class, the Enum class with all possible flag values.
case_sensitive: bool, whether or not the enum is to be case-sensitive. If
False, all member names must be unique when case is ignored.
Raises:
TypeError: When enum_class is not a subclass of Enum.
ValueError: When enum_class is empty.
"""
# Users must have an Enum class defined before using EnumClass flag.
# Therefore this dependency is guaranteed.
import enum
if not issubclass(enum_class, enum.Enum):
raise TypeError('{} is not a subclass of Enum.'.format(enum_class))
if not enum_class.__members__:
raise ValueError('enum_class cannot be empty, but "{}" is empty.'
.format(enum_class))
if not case_sensitive:
members = collections.Counter(
name.lower() for name in enum_class.__members__)
duplicate_keys = {
member for member, count in members.items() if count > 1
}
if duplicate_keys:
raise ValueError(
'Duplicate enum values for {} using case_sensitive=False'.format(
duplicate_keys))
super(EnumClassParser, self).__init__()
self.enum_class = enum_class
self._case_sensitive = case_sensitive
if case_sensitive:
self._member_names = tuple(enum_class.__members__)
else:
self._member_names = tuple(
name.lower() for name in enum_class.__members__)
@property
def member_names(self):
"""The accepted enum names, in lowercase if not case sensitive."""
return self._member_names
def parse(self, argument):
"""Determines validity of argument and returns the correct element of enum.
Args:
argument: str or Enum class member, the supplied flag value.
Returns:
The first matching Enum class member in Enum class.
Raises:
ValueError: Raised when argument didn't match anything in enum.
"""
if isinstance(argument, self.enum_class):
return argument
elif not isinstance(argument, str):
raise ValueError(
'{} is not an enum member or a name of a member in {}'.format(
argument, self.enum_class))
key = EnumParser(
self._member_names, case_sensitive=self._case_sensitive).parse(argument)
if self._case_sensitive:
return self.enum_class[key]
else:
# If EnumParser.parse() return a value, we're guaranteed to find it
# as a member of the class
return next(value for name, value in self.enum_class.__members__.items()
if name.lower() == key.lower())
def flag_type(self):
"""See base class."""
return 'enum class'
class ListSerializer(ArgumentSerializer):
def __init__(self, list_sep):
self.list_sep = list_sep
def serialize(self, value):
"""See base class."""
return self.list_sep.join([_helpers.str_or_unicode(x) for x in value])
class EnumClassListSerializer(ListSerializer):
"""A serializer for MultiEnumClass flags.
This serializer simply joins the output of `EnumClassSerializer` using a
provided separator.
"""
def __init__(self, list_sep, **kwargs):
"""Initializes EnumClassListSerializer.
Args:
list_sep: String to be used as a separator when serializing
**kwargs: Keyword arguments to the `EnumClassSerializer` used to serialize
individual values.
"""
super(EnumClassListSerializer, self).__init__(list_sep)
self._element_serializer = EnumClassSerializer(**kwargs)
def serialize(self, value):
"""See base class."""
if isinstance(value, list):
return self.list_sep.join(
self._element_serializer.serialize(x) for x in value)
else:
return self._element_serializer.serialize(value)
class CsvListSerializer(ArgumentSerializer):
def __init__(self, list_sep):
self.list_sep = list_sep
def serialize(self, value):
"""Serializes a list as a CSV string or unicode."""
output = io.StringIO()
writer = csv.writer(output, delimiter=self.list_sep)
writer.writerow([str(x) for x in value])
serialized_value = output.getvalue().strip()
# We need the returned value to be pure ascii or Unicodes so that
# when the xml help is generated they are usefully encodable.
return _helpers.str_or_unicode(serialized_value)
class EnumClassSerializer(ArgumentSerializer):
"""Class for generating string representations of an enum class flag value."""
def __init__(self, lowercase):
"""Initializes EnumClassSerializer.
Args:
lowercase: If True, enum member names are lowercased during serialization.
"""
self._lowercase = lowercase
def serialize(self, value):
"""Returns a serialized string of the Enum class value."""
as_string = _helpers.str_or_unicode(value.name)
return as_string.lower() if self._lowercase else as_string
class BaseListParser(ArgumentParser):
"""Base class for a parser of lists of strings.
To extend, inherit from this class; from the subclass __init__, call
BaseListParser.__init__(self, token, name)
where token is a character used to tokenize, and name is a description
of the separator.
"""
def __init__(self, token=None, name=None):
assert name
super(BaseListParser, self).__init__()
self._token = token
self._name = name
self.syntactic_help = 'a %s separated list' % self._name
def parse(self, argument):
"""See base class."""
if isinstance(argument, list):
return argument
elif not argument:
return []
else:
return [s.strip() for s in argument.split(self._token)]
def flag_type(self):
"""See base class."""
return '%s separated list of strings' % self._name
class ListParser(BaseListParser):
"""Parser for a comma-separated list of strings."""
def __init__(self):
super(ListParser, self).__init__(',', 'comma')
def parse(self, argument):
"""Parses argument as comma-separated list of strings."""
if isinstance(argument, list):
return argument
elif not argument:
return []
else:
try:
return [s.strip() for s in list(csv.reader([argument], strict=True))[0]]
except csv.Error as e:
# Provide a helpful report for case like
# --listflag="$(printf 'hello,\nworld')"
# IOW, list flag values containing naked newlines. This error
# was previously "reported" by allowing csv.Error to
# propagate.
raise ValueError('Unable to parse the value %r as a %s: %s'
% (argument, self.flag_type(), e))
def _custom_xml_dom_elements(self, doc):
elements = super(ListParser, self)._custom_xml_dom_elements(doc)
elements.append(_helpers.create_xml_dom_element(
doc, 'list_separator', repr(',')))
return elements
class WhitespaceSeparatedListParser(BaseListParser):
"""Parser for a whitespace-separated list of strings."""
def __init__(self, comma_compat=False):
"""Initializer.
Args:
comma_compat: bool, whether to support comma as an additional separator.
If False then only whitespace is supported. This is intended only for
backwards compatibility with flags that used to be comma-separated.
"""
self._comma_compat = comma_compat
name = 'whitespace or comma' if self._comma_compat else 'whitespace'
super(WhitespaceSeparatedListParser, self).__init__(None, name)
def parse(self, argument):
"""Parses argument as whitespace-separated list of strings.
It also parses argument as comma-separated list of strings if requested.
Args:
argument: string argument passed in the commandline.
Returns:
[str], the parsed flag value.
"""
if isinstance(argument, list):
return argument
elif not argument:
return []
else:
if self._comma_compat:
argument = argument.replace(',', ' ')
return argument.split()
def _custom_xml_dom_elements(self, doc):
elements = super(WhitespaceSeparatedListParser, self
)._custom_xml_dom_elements(doc)
separators = list(string.whitespace)
if self._comma_compat:
separators.append(',')
separators.sort()
for sep_char in separators:
elements.append(_helpers.create_xml_dom_element(
doc, 'list_separator', repr(sep_char)))
return elements
| StarcoderdataPython |
3354106 | from setuptools import setup
#name1 = 'sspredict_local.plot = sspredict.plot_ss_ternary:main'
name1 = 'sspredict.predict = sspredict.master:main'
setup(
name='SSPredict',
version='v1.1.0',
license ='MIT',
author='<NAME>, <NAME>',
author_email='<EMAIL>, <EMAIL>',
description='Python-based Solid-Solution Strengthening Prediction Tool',
packages = ['sspredict','sspredict/make_prediction'],
entry_points={
'console_scripts': [name1]
},
platforms='any',
install_requires=[
'numpy >= 1.13.3',
'matplotlib >= 2.1.0',
'pandas >= 0.23.4',
'python-ternary >= 1.0.8',
'scipy >= 1.5.1']
)
| StarcoderdataPython |
6437509 | """Support for Huawei LTE sensors."""
import logging
import re
import attr
import voluptuous as vol
from homeassistant.const import (
CONF_URL, CONF_MONITORED_CONDITIONS, STATE_UNKNOWN,
)
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
from . import DATA_KEY, RouterData
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME_TEMPLATE = 'Huawei {} {}'
DEFAULT_SENSORS = [
"device_information.WanIPAddress",
"device_signal.rsrq",
"device_signal.rsrp",
"device_signal.rssi",
"device_signal.sinr",
]
SENSOR_META = {
"device_information.SoftwareVersion": dict(
name="Software version",
),
"device_information.WanIPAddress": dict(
name="WAN IP address",
icon="mdi:ip",
),
"device_information.WanIPv6Address": dict(
name="WAN IPv6 address",
icon="mdi:ip",
),
"device_signal.band": dict(
name="Band",
),
"device_signal.cell_id": dict(
name="Cell ID",
),
"device_signal.lac": dict(
name="LAC",
),
"device_signal.mode": dict(
name="Mode",
formatter=lambda x: ({
'0': '2G',
'2': '3G',
'7': '4G',
}.get(x, 'Unknown'), None),
),
"device_signal.pci": dict(
name="PCI",
),
"device_signal.rsrq": dict(
name="RSRQ",
# http://www.lte-anbieter.info/technik/rsrq.php
icon=lambda x:
(x is None or x < -11) and "mdi:signal-cellular-outline"
or x < -8 and "mdi:signal-cellular-1"
or x < -5 and "mdi:signal-cellular-2"
or "mdi:signal-cellular-3"
),
"device_signal.rsrp": dict(
name="RSRP",
# http://www.lte-anbieter.info/technik/rsrp.php
icon=lambda x:
(x is None or x < -110) and "mdi:signal-cellular-outline"
or x < -95 and "mdi:signal-cellular-1"
or x < -80 and "mdi:signal-cellular-2"
or "mdi:signal-cellular-3"
),
"device_signal.rssi": dict(
name="RSSI",
# https://eyesaas.com/wi-fi-signal-strength/
icon=lambda x:
(x is None or x < -80) and "mdi:signal-cellular-outline"
or x < -70 and "mdi:signal-cellular-1"
or x < -60 and "mdi:signal-cellular-2"
or "mdi:signal-cellular-3"
),
"device_signal.sinr": dict(
name="SINR",
# http://www.lte-anbieter.info/technik/sinr.php
icon=lambda x:
(x is None or x < 0) and "mdi:signal-cellular-outline"
or x < 5 and "mdi:signal-cellular-1"
or x < 10 and "mdi:signal-cellular-2"
or "mdi:signal-cellular-3"
),
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_URL): cv.url,
vol.Optional(
CONF_MONITORED_CONDITIONS, default=DEFAULT_SENSORS): cv.ensure_list,
})
def setup_platform(
hass, config, add_entities, discovery_info):
"""Set up Huawei LTE sensor devices."""
data = hass.data[DATA_KEY].get_data(config)
sensors = []
for path in config.get(CONF_MONITORED_CONDITIONS):
data.subscribe(path)
sensors.append(HuaweiLteSensor(data, path, SENSOR_META.get(path, {})))
add_entities(sensors, True)
def format_default(value):
"""Format value."""
unit = None
if value is not None:
# Clean up value and infer unit, e.g. -71dBm, 15 dB
match = re.match(
r"(?P<value>.+?)\s*(?P<unit>[a-zA-Z]+)\s*$", str(value))
if match:
try:
value = float(match.group("value"))
unit = match.group("unit")
except ValueError:
pass
return value, unit
@attr.s
class HuaweiLteSensor(Entity):
"""Huawei LTE sensor entity."""
data = attr.ib(type=RouterData)
path = attr.ib(type=list)
meta = attr.ib(type=dict)
_state = attr.ib(init=False, default=STATE_UNKNOWN)
_unit = attr.ib(init=False, type=str)
@property
def unique_id(self) -> str:
"""Return unique ID for sensor."""
return "{}_{}".format(
self.data["device_information.SerialNumber"],
".".join(self.path),
)
@property
def name(self) -> str:
"""Return sensor name."""
dname = self.data["device_information.DeviceName"]
vname = self.meta.get("name", self.path)
return DEFAULT_NAME_TEMPLATE.format(dname, vname)
@property
def state(self):
"""Return sensor state."""
return self._state
@property
def unit_of_measurement(self):
"""Return sensor's unit of measurement."""
return self.meta.get("unit", self._unit)
@property
def icon(self):
"""Return icon for sensor."""
icon = self.meta.get("icon")
if callable(icon):
return icon(self.state)
return icon
def update(self):
"""Update state."""
self.data.update()
try:
value = self.data[self.path]
except KeyError:
_LOGGER.warning("%s not in data", self.path)
value = None
formatter = self.meta.get("formatter")
if not callable(formatter):
formatter = format_default
self._state, self._unit = formatter(value)
| StarcoderdataPython |
8196856 | class Node:
def __init__(self, value, left=None, right=None):
self.value = value
self.right = right
self.left = left
def post_order(node, output_list):
if node:
post_order(node.left, output_list)
post_order(node.right, output_list)
output_list.append(node)
tree = Node(4)
tree.left = Node(2)
tree.right = Node(6)
tree.left.left = Node(1)
tree.left.right = Node(3)
tree.right.left = Node(5)
tree.right.right = Node(7)
post_order_list = []
post_order(tree, post_order_list)
for node in post_order_list:
pass
# check node and its descendents for presence of n1 and n2
# need to get rid of post-order list later - no extra memory usage permitted
| StarcoderdataPython |
3370478 | import cv2
import numpy as np
import sys
import time
from classes.process import Process
from classes.video import Video
import pandas as pd
import os
################################################################################
######### Change these depending on where your recordings are located ##########
rec_dir = 'recordings/'
################################################################################
def getVideoHeartRate(video,process,output_name):
frame = np.zeros((10,10,3),np.uint8)
bpm = 0
# for exporting to csv
bpm_all = []
timestamps = []
# Run the loop
process.reset()
video.start()
max_frame_num = int(video.cap.get(cv2.CAP_PROP_FRAME_COUNT))
iter_percent = 0 # for printing percent done
hasNextFrame = True
while hasNextFrame == True:
frame = video.get_frame()
if frame is not None:
process.frame_in = frame
process.run()
f_fr = process.frame_ROI #get the face
bpm = process.bpm #get the bpm change over the time
f_fr = cv2.cvtColor(f_fr, cv2.COLOR_RGB2BGR)
f_fr = np.transpose(f_fr,(0,1,2)).copy()
bpm_all.append(bpm)
curr_frame_num = video.cap.get(cv2.CAP_PROP_POS_FRAMES)
timestamps.append(curr_frame_num/video.fps)
else:
hasNextFrame = False
# every so often, show percent done
percent_done = curr_frame_num/max_frame_num*100
if (percent_done > iter_percent):
print('current frame: %.0f' % curr_frame_num)
print('percent done: %.1f%%' % percent_done)
iter_percent += 20
# Export predicted bpm to .csv format
df = pd.DataFrame({'BPM': bpm_all, 'TIMESTAMP_SEC': timestamps})
df.to_csv(os.path.join('output', 'heartrate_' + output_name + '.csv'), sep=',', index=False)
print('🎉 Done! 🎉')
print('See the output file:')
print('output/' + 'heartrate_' + output_name + '.csv')
if __name__ == '__main__':
# Loop through specific files and analyze their video
files_in_dir = [f for f in os.listdir(rec_dir) if os.path.isfile(os.path.join(rec_dir, f))]
i = 0
for f in files_in_dir:
video = Video()
process = Process()
if f.split('.')[1] == 'avi' or f.split('.')[1] == 'mp4':
video.dirname = os.path.join(rec_dir,f)
output_name = f.split('.')[0]
print(f'Reading from {video.dirname}')
getVideoHeartRate(video, process, output_name)
i += 1
print(f"""Number of files to go: {len(files_in_dir) - i}
Percent files done: {i/len(files_in_dir)*100}\n""") | StarcoderdataPython |
4850491 |
"""
The output is shape 60 by 23589 where 60 is the number of stimulus bins and 23589
is the number of predicted neurons. The out_layer weights have shape 23589 by 480
where 23589 is the number of predicted neurons and 480 is the number of convolutional
units in the conv layer. There are 480 convolutional units because the stimulus
is length 60 and we used 8 $C^{out}$ convolutional channels.
"""; | StarcoderdataPython |
6450255 | # Copyright (C) 2021 ServiceNow, Inc.
""" Vectorization utilties """
import numpy as np
def convert_text_to_vector(text, model, method='sum'):
""" Embed the tokens piece of text with a model.
Tokens are produced by a simple whitespace split on the text
if the text is provided as a string.
:param text: text string or list
:param model: word embedding model - must implement subscription by word
e.g. mode['word']
:param method: how to aggregate the individual token vectors
sum - sum them
mean - average them
None - no aggregation, return a matrix of one vector per token
"""
if type(text) == str:
text = text.split()
elif type(text) == list:
pass
else:
raise ValueError('text must be a str or list')
vectors = [model[word] for word in text if word in model]
if len(vectors) == 0:
vectors = np.zeros(shape=(model.vector_size,))
return vectors
try:
vectors = np.stack(vectors)
except Exception as e:
print(e)
print(vectors)
if method == 'sum':
vectors = np.sum(vectors, axis=0)
elif method == 'mean':
vectors = np.mean(vectors, axis=0)
elif method == None:
vectors = vectors
else:
raise ValueError(f'Unknown method: {method}')
return vectors
def convert_dfcol_text_to_vector(df, col, model, method):
""" Convert a text column of a df (col) to a vector, using
word embedding model model and vector aggregation method method.
:param df: input dataframe
:param col: text column to vectorize
:param model: embedding model, must be subscriptable by word (e.g. model['word'])
:param method: vector aggregation method
:returns: an np.ndarray of shape (n_rows, n_vector_dim)
"""
X = df[col].apply(lambda x: convert_text_to_vector(x, model, method=method))
X = np.stack(X.values)
return X | StarcoderdataPython |
277077 | from adminsortable2.admin import SortableAdminMixin
from django.contrib import admin
from core.admin.filters.event import EventFilter
from core.models import Event
class EventPageMenuAdmin(SortableAdminMixin, admin.ModelAdmin):
list_display = ('title', 'event', 'url', 'position')
list_filter = (EventFilter,)
sortable = 'position'
def get_queryset(self, request):
qs = super(EventPageMenuAdmin, self).get_queryset(request)
if request.user.is_superuser:
return qs
return qs.filter(event__team=request.user)
def get_form(self, request, obj=None, **kwargs):
form = super(EventPageMenuAdmin, self).get_form(request, obj, **kwargs)
if not request.user.is_superuser:
if 'event' in form.base_fields:
form.base_fields['event'].queryset = Event.objects.filter(
team=request.user
)
return form
def get_readonly_fields(self, request, obj=None):
if obj and not request.user.is_superuser:
# Don't let change objects for events that already happened
if not obj.event.is_upcoming():
return set([x.name for x in self.model._meta.fields])
return self.readonly_fields
| StarcoderdataPython |
4858631 | import fnmatch
import os
import re
from string import Template
def get_hole_files():
hole_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'envs', 'assets', 'hole')
return sorted([file for file in os.listdir(hole_dir) if fnmatch.fnmatch(file, '*id=*.xml')])
def get_experiment_files():
hole_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'envs', 'assets')
return sorted([file for file in os.listdir(hole_dir) if fnmatch.fnmatch(file, '*id=*.xml')])
def gen_experiment_files(hole_files):
# Get the template file as a string.
experiment_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'envs', 'assets')
template_filename = os.path.join(experiment_dir, 'full_peg_insertion_experiment_moving_template.xml')
with open(template_filename) as template_file:
template_data = template_file.read()
gravity_options = [('enable', ''), ('disable', '_no_gravity')]
# Convert the template into each file.
s = Template(template_data)
for hole_file in hole_files:
for enable, gravity_file_str in gravity_options:
experiment_data = s.substitute(hole_filename=hole_file, gravity_enable=enable)
hole_id = re.search("id=([0-9]*)", hole_file).group(1)
experiment_filename = os.path.join(experiment_dir,
"full_peg_insertion_experiment{gravity_file_str}_moving_hole_id={hole_id}.xml".format(
gravity_file_str=gravity_file_str, hole_id=hole_id))
with open(experiment_filename, 'w') as experiment_file:
experiment_file.write(experiment_data)
if __name__ == '__main__':
files = get_hole_files()
gen_experiment_files(files)
| StarcoderdataPython |
1669545 | """genomic set member column a2d manifest
Revision ID: d9742926014b
Revises: <PASSWORD>
Create Date: 2020-03-31 08:51:41.082152
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd9742926014b'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('genomic_set_member', sa.Column('gem_a2d_manifest_job_run_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'genomic_set_member', 'genomic_job_run', ['gem_a2d_manifest_job_run_id'], ['id'])
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'genomic_set_member', type_='foreignkey')
op.drop_column('genomic_set_member', 'gem_a2d_manifest_job_run_id')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| StarcoderdataPython |
6675506 | <reponame>altran/Awesome-Competence-System
from django.shortcuts import render_to_response
from django.template import RequestContext
from localsettings import SSO_URL
def error401(request):
return render_to_response('401.html', {'SSO_URL': SSO_URL}, context_instance=RequestContext(request))
def error503(request):
return render_to_response('503.html', {'SSO_URL': SSO_URL}, context_instance=RequestContext(request)) | StarcoderdataPython |
9738046 | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class RUuid(RPackage):
"""Tools for Generating and Handling of UUIDs.
Tools for generating and handling of UUIDs (Universally Unique
Identifiers)."""
cran = "uuid"
version('1.0-3', sha256='456e4633659f20242fd7cd585ad005a3e07265f1d1db383fca6794c8ac2c8346')
version('0.1-4', sha256='98e0249dda17434bfa209c2058e9911e576963d4599be9f7ea946e664f8ca93e')
version('0.1-2', sha256='dd71704dc336b0857981b92a75ed9877d4ca47780b1682def28839304cd3b1be')
depends_on('r@2.9.0:', type=('build', 'run'))
| StarcoderdataPython |
338100 | """This module contains the general information for LsbootStorage ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import ImcVersion, MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class LsbootStorageConsts():
ACCESS_READ_WRITE = "read-write"
TYPE_STORAGE = "storage"
class LsbootStorage(ManagedObject):
"""This is LsbootStorage class."""
consts = LsbootStorageConsts()
naming_props = set([])
mo_meta = MoMeta("LsbootStorage", "lsbootStorage", "storage-read-write", VersionMeta.Version151f, "InputOutput", 0x7f, [], ["admin", "read-only", "user"], [u'lsbootDef'], [], ["Add", "Get", "Remove", "Set"])
prop_meta = {
"access": MoPropertyMeta("access", "access", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x2, None, None, None, ["read-write"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version151f, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x4, 0, 255, None, [], []),
"order": MoPropertyMeta("order", "order", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, ["1", "2", "3", "4", "5"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x10, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x20, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"type": MoPropertyMeta("type", "type", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, ["storage"], []),
}
prop_map = {
"access": "access",
"childAction": "child_action",
"dn": "dn",
"order": "order",
"rn": "rn",
"status": "status",
"type": "type",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.access = None
self.child_action = None
self.order = None
self.status = None
self.type = None
ManagedObject.__init__(self, "LsbootStorage", parent_mo_or_dn, **kwargs)
| StarcoderdataPython |
82731 | # coding: utf-8
"""
DocuSign Rooms API - v2
An API for an integrator to access the features of DocuSign Rooms # noqa: E501
OpenAPI spec version: v2
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..client.configuration import Configuration
from ..client.api_client import ApiClient
class RoomTemplatesApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def get_room_templates(self, account_id, **kwargs):
"""
Returns all room templates that the active user has access to
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_room_templates(account_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str account_id: (required)
:param int office_id: Get all room templates you have access to for this office. Response includes Company and Region level If onlyAssignable is true, and no officeId is provided, user's default office is assumed.
:param bool only_assignable: Get list of templates you have access to. Default value false.
:param bool only_enabled: When set to true, only returns room templates that are not disabled.
:param int count: Number of room templates to return. Defaults to the maximum which is 100.
:param int start_position: Position of the first item in the total results. Defaults to 0.
:return: RoomTemplatesSummaryList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_room_templates_with_http_info(account_id, **kwargs)
else:
(data) = self.get_room_templates_with_http_info(account_id, **kwargs)
return data
def get_room_templates_with_http_info(self, account_id, **kwargs):
"""
Returns all room templates that the active user has access to
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_room_templates_with_http_info(account_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str account_id: (required)
:param int office_id: Get all room templates you have access to for this office. Response includes Company and Region level If onlyAssignable is true, and no officeId is provided, user's default office is assumed.
:param bool only_assignable: Get list of templates you have access to. Default value false.
:param bool only_enabled: When set to true, only returns room templates that are not disabled.
:param int count: Number of room templates to return. Defaults to the maximum which is 100.
:param int start_position: Position of the first item in the total results. Defaults to 0.
:return: RoomTemplatesSummaryList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'office_id', 'only_assignable', 'only_enabled', 'count', 'start_position']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_room_templates" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params) or (params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `get_room_templates`")
collection_formats = {}
resource_path = '/v2/accounts/{accountId}/room_templates'.replace('{format}', 'json')
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id']
query_params = {}
if 'office_id' in params:
query_params['officeId'] = params['office_id']
if 'only_assignable' in params:
query_params['onlyAssignable'] = params['only_assignable']
if 'only_enabled' in params:
query_params['onlyEnabled'] = params['only_enabled']
if 'count' in params:
query_params['count'] = params['count']
if 'start_position' in params:
query_params['startPosition'] = params['start_position']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['text/plain', 'application/json', 'text/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RoomTemplatesSummaryList',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| StarcoderdataPython |
6427028 | <reponame>coagulant/django-recommends<filename>recommends/storages/redis/managers.py
from recommends.managers import DictStorageManager
class RedisStorageManager(DictStorageManager):
def similarity_for_objects(self, score, *args, **kwargs):
spec = super(RedisStorageManager, self).similarity_for_objects(*args, **kwargs)
spec['score'] = score
return spec
def filter_for_object(self, obj):
ctype_id = self.get_ctype_id_for_obj(obj)
return {'object_ctype': ctype_id, 'object_id': obj.id}
def filter_for_related_object(self, related_obj):
ctype_id = self.get_ctype_id_for_obj(related_obj)
return {'related_object_ctype': ctype_id, 'related_object_id': related_obj.id}
| StarcoderdataPython |
3410121 | <reponame>ab1cd2eefre3/stock-trading-website
# Generated by Django 3.0.2 on 2021-02-27 01:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shares', '0004_auto_20210224_0020'),
]
operations = [
migrations.RemoveField(
model_name='closed',
name='closed_at',
),
migrations.RemoveField(
model_name='open',
name='purchased_at',
),
migrations.AddField(
model_name='closed',
name='gains',
field=models.DecimalField(decimal_places=2, default=0, max_digits=10000000000000),
preserve_default=False,
),
migrations.AddField(
model_name='open',
name='position',
field=models.DecimalField(decimal_places=2, default=0, max_digits=10000000000000),
preserve_default=False,
),
migrations.AlterField(
model_name='account',
name='money',
field=models.DecimalField(decimal_places=2, default=10000.0, max_digits=10000000000000),
),
]
| StarcoderdataPython |
5166300 | <filename>app/core/views.py
from rest_framework import status
from rest_framework.response import Response
from django.db.models import ProtectedError
class DestroyProtectedMixin:
"""
Mixin for handling ProtectedError exceptions.
"""
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
try:
self.perform_destroy(instance)
except ProtectedError as e:
return Response(
{"protected": str(e)},
status=status.HTTP_400_BAD_REQUEST,
)
return Response(status=status.HTTP_204_NO_CONTENT)
| StarcoderdataPython |
11260148 | #!/usr/bin/env python3
"""Functionality to create a :class:`Module`-hierarchy."""
import collections
from schedsi.cpu import request as cpurequest
from schedsi import module, threads
class ModuleBuilder:
"""Build static hierarchies."""
def __init__(self, name=None, parent=None, *, scheduler):
"""Create a :class:`ModuleBuilder`."""
if name is None:
name = '0'
self.module = module.Module(name, parent, scheduler)
self.vcpus = []
def add_module(self, name=None, vcpu_add_args=None, *, scheduler, vcpus=1):
"""Attach a child :class:`Module`.
The `name` is auto-generated, if it is `None`,
as `self.spawn_name + "." + len(self.children)`.
`vcpu_add_args` may be a single `dict`, in which
case it is used for all vcpus, or it can be a list
thereof, in which case it must have a length equal
to `vcpus`.
Returns the child-:class:`Module`.
"""
if name is None:
name = self.module.name + '.' + str(self.module.num_children())
madder = ModuleBuilder(name, self.module, scheduler=scheduler)
if not isinstance(vcpu_add_args, collections.abc.Sequence):
vcpu_add_args = [vcpu_add_args] * vcpus
assert len(vcpu_add_args) == vcpus
self.vcpus.append((madder.module, vcpu_add_args))
return madder
def add_thread(self, thread, add_args=None, **kwargs):
"""Add a :class:`Thread`.
`thread` is the class.
All parameters are forwarded to the init-function.
Returns `self`.
"""
if add_args is None:
add_args = {}
self.module.add_thread(thread(self.module, **kwargs), **add_args)
return self
def add_vcpus(self):
"""Create all VCPUs for the attached children.
VCPUs can be added incrementally while attaching modules.
Returns `self`.
"""
for child, vcpu_add_args in self.vcpus:
for i, add_args in enumerate(vcpu_add_args):
tid = child.name + '-VCPU' + str(i)
self.add_thread(threads.VCPUThread, add_args, child=child, tid=tid)
self.vcpus.clear()
return self
class ModuleBuilderThread(threads.Thread):
"""A :class:`Thread` that creates (and attaches) a :class:`Module`.
Can also do computation on the side.
"""
def __init__(self, parent, name=None, *args, time, vcpus=1, scheduler,
units=-1, ready_time=None, **kwargs):
"""Create a :class:`ModuleBuilderThread`.
`time` refers to the time the module should be spawned.
`units` may be `-1` to indicate the thread should be finished when the thread spawns.
`ready_time` may be `None` to indicate it coinciding with `time`.
`time` must be >= `ready_time` and <= `ready_time` + `units` if `units` != `None`.
"""
if units == -1:
assert ready_time is None, 'If units is -1, ready_time must be None.'
units = 0
self.destroy_after_spawn = True
else:
assert ready_time is not None, 'If units is not None, ready_time must not be None.'
self.destroy_after_spawn = False
if ready_time is None:
ready_time = time
if not isinstance(parent, ModuleBuilderThread):
self.init_args = None
super().__init__(parent, *args, ready_time=ready_time, units=units, **kwargs)
self.module.add_thread(self)
else:
# self._late_init() will be called by the parent
# see documentation of _late_init()
self.init_args = (args, kwargs)
self.init_args[1].update({'ready_time': ready_time, 'units': units})
assert time >= ready_time, 'Spawn time must not come before ready_time.'
assert units is None or time <= ready_time + units, \
'Spawn time must not exceed execution time.'
self.spawn_time = time
self.spawn_name = name
self.scheduler = scheduler
self.threads = []
self.vcpus = vcpus
self.spawn_skew = None
def _late_init(self, parent):
"""Call `super().__init__`.
We don't initialize `super()` if :attr:`parent` was a
:class:`ModuleBuilderThread`, since :class:`Thread` expects
a proper :class:`Module`.
"""
assert self.init_args is not None, \
'_late_init called after super() as already initialized.'
super().__init__(parent, *self.init_args[0], **self.init_args[1])
self.init_args = None
def disable_spawning(self):
"""Pass execution directly to `super()`.
This can be called once all :class:`Modules` are spawned.
"""
self._execute = super()._execute
self.suspend = super().suspend
self.end = super().end
def is_spawning_disabled(self):
"""Returns True if :meth:`disable_spawning` was called, False otherwise."""
return self._execute == super()._execute
def is_finished(self):
"""Check if the :class:`Thread` is finished.
See :class:`Thread.is_finished`.
"""
return self.is_spawning_disabled() and super().is_finished()
# this gets overwritten in disable_spawning()
def _execute(self, current_time, run_time): # pylint: disable=method-hidden
"""Simulate execution.
See :meth:`Thread._execute`.
Spawns thread when it's time.
Reduces `run_time` if we would miss the spawn time.
"""
if self.spawn_time <= current_time:
self._spawn_module(current_time)
self.disable_spawning()
if super().is_finished():
self._update_ready_time(current_time)
yield cpurequest.Request.idle()
return
elif run_time is None or current_time + run_time > self.spawn_time:
run_time = self.spawn_time - current_time
return (yield from super()._execute(current_time, run_time))
def suspend(self, current_time): # pylint: disable=method-hidden
"""Become suspended.
See :meth:`Thread.suspend`.
Spawns a :class:`Thread` if it's time.
"""
if self.spawn_time == current_time:
self._spawn_module(current_time)
self.disable_spawning()
else:
assert self.spawn_time > current_time, 'Ran over spawn time.'
super().suspend(current_time)
def end(self): # pylint: disable=method-hidden
"""End execution.
See :meth:`Thread.end`.
This should not be called before disabling spawning.
"""
assert self.is_spawning_disabled(), 'Execution ended before spawning was disabled.'
assert False, 'This function should have been replaced in disable_spawning().'
super().end()
def _spawn_module(self, current_time):
"""Spawn the :class:`Module`."""
assert not self.is_spawning_disabled(), 'Spawning is disabled.'
assert self.spawn_time <= current_time
name = self.spawn_name
if name is None:
name = self.module.name + '.' + str(self.module.num_children())
child = module.Module(name, self.module, scheduler=self.scheduler)
for (thread, args, kwargs) in self.threads:
if isinstance(thread, ModuleBuilderThread):
assert args is None, \
'A ModuleBuilderThread is already created and should not get any arguments.'
assert kwargs is None
thread._late_init(child) # pylint: disable=protected-access
else:
if kwargs.get('ready_time', None) is None:
kwargs['ready_time'] = self.spawn_time
elif kwargs['ready_time'] < 0:
kwargs['ready_time'] = current_time - kwargs['ready_time']
thread = thread(child, *args, **kwargs)
child.add_thread(thread)
for _ in range(0, self.vcpus):
self.module.add_thread(threads.VCPUThread(self.module, child=child))
self.spawn_skew = current_time - self.spawn_time
def get_statistics(self, current_time):
"""Obtain statistics.
See :meth:`Thread.get_statistics`.
"""
stats = super().get_statistics(current_time)
stats.update({'spawn_skew': self.spawn_skew})
def add_thread(self, thread, *args, **kwargs):
"""Add a :class:`Thread`.
See :meth:`ModuleBuilder.add_thread`.
Negative `ready_time`s will be replaced by `current_time - ready_time`
at module spawn.
If `ready_time` is not present, it will be replaced by `current_time`
at module spawn.
Returns `self`.
"""
assert not self.is_spawning_disabled(), 'Spawning was disabled.'
self.threads.append((thread, args, kwargs))
return self
def add_module(self, name=None, time=None, *args, scheduler, **kwargs):
"""Add a :class:`Module`.
No static hierarchy can be built, so all building happens through
a :class:`ModuleBuilderThread` that is returned.
All parameters are forwarded to the :class:`ModuleBuilderThread` constructor.
If `time` is `None`, `time` is set to :attr:`time`.
Returns the :class:`ModuleBuilderThread` for the child-:class:`Module`.
"""
assert not self.is_spawning_disabled(), 'Spawning was disabled.'
if time is None:
time = self.spawn_time
thread = ModuleBuilderThread(self, name, *args, time=time, scheduler=scheduler, **kwargs)
self.threads.append((thread, None, None))
return thread
| StarcoderdataPython |
5094469 | <gh_stars>1-10
import sys
import os
import pickle
import numpy as np
def write_vtk(fn, v, f):
len_v = v.shape[0]
len_f = f.shape[0]
fp = open(fn, 'w')
fp.write("# vtk DataFile Version 3.0\nvtk output\nASCII\nDATASET POLYDATA\n")
fp.write(f"POINTS {len_v} float\n")
for row in v:
fp.write(f"{row[0]} {row[1]} {row[2]}\n")
fp.write(f"POLYGONS {len_f} {len_f * 4}\n")
for row in f:
fp.write(f"3 {row[0]} {row[1]} {row[2]}\n")
fp.close()
def main():
try:
sys.path.append("meshcnn")
from mesh import export_spheres
export_spheres(range(8), "mesh_files")
except ImportError:
print("ImportError occurred. Will download precomputed mesh files instead...")
import subprocess
dest = "mesh_files"
if not os.path.exists(dest):
os.makedirs(dest)
fname = 'icosphere_{}.pkl'
for i in range(8):
url = 'http://island.me.berkeley.edu/ugscnn/mesh_files/' + fname.format(i)
command = ["wget", "--no-check-certificate", "-P", dest, url]
try:
download_state = subprocess.call(command)
except Exception as e:
print(e)
fname_vtk = 'icosphere_{}.vtk'
for i in range(8):
try:
pkl = pickle.load(open(dest + '/' + fname.format(i), "rb"))
write_vtk(dest + '/' + fname_vtk.format(i), pkl['V'], pkl['F'])
except Exception as e:
print(e)
if __name__ == '__main__':
main()
| StarcoderdataPython |
9759786 | <gh_stars>0
__author__ = 'Adam'
| StarcoderdataPython |
5154756 | try:
import netifaces
except:
print "Please install the netifaces module from pypi"
print "e.g. sudo pip install netifaces"
exit(-1)
import gevent
from gevent.lock import RLock
import sys, os
import gtwconfig as CONFIG
import mptnUtils as MPTN
from transport_abstract import Transport
import traceback
import color_logging, logging
logger = logging
from gevent import socket
import struct
import json
# UDP packet format
# BYTE 0-1: Magic number 0xAA 0x55
# BYTE 2: Node ID
# 0: new device
# BYTE 3-6: IP address
# BYTE 7-8: Port
# BYTE 9: payload type
# 1 : Property change
# 2 : Node Info
# Type 1
# BYTE 10: Number of property change
# BYTE 11: Property payload
# BYTE 12: Property number
# BYTE 13: Property Length
# BYTE 14-14+$(11): Property Value
# Type 2:
# No payload
# The complete design document is available at
# https://docs.google.com/document/d/1IrsSE-QA0cvoSgMTKLS3NJvTj24pOzujBMnZ8_1AxWk/edit?usp=sharing
class UDPDevice(object):
def __init__(self,host_id,ip,port):
self.host_id = host_id
self.ip = ip
self.port = port
# self.wuclasses=[]
# self.wuobjects=[]
class UDPTransport(Transport):
def __init__(self, dev_address, name):
super(UDPTransport, self).__init__(dev_address, name)
self.enterLearnMode = False
self.last_host_id = 0
self._device_filename = "table_udp_devices.json"
self.devices=[]
self._devices_lookup = {}
if not os.path.isfile(self._device_filename):
with open(self._device_filename, "w") as f:
json.dump([], f, sort_keys=True,indent=2)
self.loadDevice()
try:
nc_if = netifaces.ifaddresses(dev_address)[netifaces.AF_INET][0]
self._node_id = (nc_if['addr'], nc_if['netmask'])
self._ip = MPTN.ID_FROM_STRING(self._node_id[0])
self._netmask = MPTN.ID_FROM_STRING(self._node_id[1])
except (IndexError, KeyError, ValueError):
logger.error("cannot find any IP address from the IP network interface %s" % CONFIG.TRANSPORT_INTERFACE_ADDR)
self._clear_settings_db()
exit(-1)
self._prefix = self._ip & self._netmask
self._hostmask = ((1 << (MPTN.IP_ADDRESS_LEN * 8)) - 1) ^ self._prefix
self._port = MPTN.MPTN_UDP_PORT
self._init_socket()
def _init_socket(self):
self.sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind(('', self._port))
logger.info("transport interface %s initialized on %s IP=%s PORT=%d with Node ID %s/%s" % (self._name,
self._dev_addr, MPTN.ID_TO_STRING(self._ip), self._port,
MPTN.ID_TO_STRING(self._node_id[0]), str(self._node_id[1])
)
)
def get_addr_len(self):
return MPTN.IP_ADDRESS_LEN
def recv(self):
while True:
try:
data, addr = self.sock.recvfrom(1024)
if ord(data[0]) != 0xAA or ord(data[1]) != 0x55:
# Drop the unknown packet
logger.error("Get unknown packet %s" % (map(ord, data[:2])))
continue
host_id = ord(data[2])
ip = struct.unpack("<I",data[3:7])[0]
port = struct.unpack('<H',data[7:9])[0]
t = ord(data[9])
logger.debug("recv type=%s, data=%s, addr=%s:%d, short_id=%d, ip=%d, port=%d" % (t, map(ord, data), addr[0], addr[1], host_id, ip, port))
node_id = self._prefix | host_id
if data != "":
# logger.debug("recv message %s from address %s" % (data, str(addr)))
if t == 1:
# data[10] is the size of the payload
# block unknown sender
d_ip, d_port = self.getDeviceAddress(host_id)
if d_ip == 0 or d_ip != MPTN.ID_FROM_STRING(addr[0]) or d_port != addr[1]:
logger.debug("drop unknown sender's packet")
continue
data = data[11:]
return (node_id, data)
elif t == 2:
self.refreshDeviceData(host_id, ip, port, t)
continue
else:
continue
except Exception as e:
ret = traceback.format_exc()
logger.error("receives exception %s\n%s" % (str(e), ret))
self.sock.close()
self._init_socket()
return (None, None)
def send_raw(self, address, payload, raw_type=1):
ret = None
with self._global_lock:
try:
host_id = address & self._hostmask
address, port = self.getDeviceAddress(host_id)
if address == 0: return None
header = chr(0xaa) + chr(0x55) + chr(host_id) + struct.pack('<I', self._ip) + struct.pack('<H', self._port) + chr(raw_type) + chr(len(payload))
message = "".join(map(chr, payload))
logger.info("sending %d bytes %s to %s, port %d" % (len(message), map(ord, message), MPTN.ID_TO_STRING(address),port))
sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
#sock.sendto(message, (MPTN.ID_TO_STRING(address), MPTN.MPTN_UDP_PORT))
sock.sendto(header+message, (MPTN.ID_TO_STRING(address), port))
sock.close()
except Exception as e:
ret = traceback.format_exc()
logger.error("send_raw exception %s\n%s" % (str(e), ret))
return ret
def send(self, address, payload):
ret = self.send_raw(address, payload)
if ret is None: return (True, None)
msg = "%s fails to send to address %d with error %s\n\tmsg: %s" % (self._transport.get_name(), address, ret, payload)
logger.error(msg)
return (False, msg)
def getDeviceAddress(self, host_id):
try:
d = self._devices_lookup.get(host_id)
return (d.ip, d.port)
except Exception as e:
logger.error('Address %d is not registered yet' % host_id)
return 0,0
def getDeviceType(self, address):
ret = None
logger.info('get device type for %x' % address)
with self._global_lock:
try:
# ret = pyzwave.getDeviceType(address)
ret = (0xff,0xff,0xff)
pass
except Exception as e:
logger.error("getDeviceType exception %s\n%s" % (str(e), traceback.format_exc()))
return ret
def getNodeRoutingInfo(self, address):
ret = []
with self._global_lock:
# ret = pyzwave.routing(address)
# try:
# ret.remove(gateway_id)
# except ValueError:
pass
return ret
def routing(self):
ret = {}
for node_raddr in self.discover():
ret[node_raddr] = self.getNodeRoutingInfo(node_raddr)
return ret
def refreshDeviceData(self,host_id, ip, port, t):
if host_id == (self._ip & self._hostmask):
return
if t != 2:
return
found = (host_id in self._devices_lookup)
if self.enterLearnMode:
if not found and self._mode == MPTN.ADD_MODE:
newid = 1
while True:
found = False
for d in self.devices:
if d.host_id == newid:
newid = newid + 1
if newid & self._hostmask == 0:
logger.error("ID is exhausted")
return
found = True
break
else:
if newid == (self._ip & self._hostmask):
newid += 1
if newid & self._hostmask == 0:
logger.error("ID is exhausted")
return
found = True
if not found:
newd = UDPDevice(newid,ip,port)
self.devices.append(newd)
self.saveDevice()
self.last_host_id = newid
self.stop()
logger.debug("device added %s %s %s" % (str(newid),str(MPTN.ID_TO_STRING(ip)),str(port)))
break
elif found and self._mode == MPTN.ADD_MODE:
for d in self.devices:
if d.host_id == host_id:
logger.debug("device updated for %s from %s:%s to %s:%s" % (str(host_id),str(MPTN.ID_TO_STRING(d.ip)),str(d.port),str(MPTN.ID_TO_STRING(ip)),str(port)))
d.ip = ip
d.port = port
self.saveDevice()
self.last_host_id = d.host_id
self.stop()
break
elif found and self._mode == MPTN.DEL_MODE:
for i in xrange(len(self.devices)):
d = self.devices[i]
if d.host_id == host_id:
self.send_raw(host_id,[0],raw_type=2)
del self.devices[i]
self.saveDevice()
self.last_host_id = host_id
self.stop()
logger.debug("device deleted %s %s %s" % (str(host_id),str(MPTN.ID_TO_STRING(ip)),str(port)))
self.last_host_id = 0
return
self.send_raw(self.last_host_id,[self.last_host_id],raw_type=2)
return
elif found: # STOP mode
for d in self.devices:
if d.host_id == host_id:
if d.ip == ip and d.port == port:
logger.debug("device rechecked for %s %s:%s" % (str(host_id),str(MPTN.ID_TO_STRING(d.ip)),str(d.port)))
self.last_host_id = 0
self.send_raw(d.host_id,[d.host_id],raw_type=2)
return
logger.error("device %s (%s:%s) not allowed to change/add in STOP mode." % (str(host_id),str(MPTN.ID_TO_STRING(ip)),str(port)))
return
def saveDevice(self):
save_devices = [item.__dict__ for item in self.devices]
with open(self._device_filename,'w') as f:
json.dump(save_devices, f, sort_keys=True,indent=2)
self.updateDeviceLookup()
def loadDevice(self):
with open(self._device_filename,'r+') as f:
try:
load_devices = json.load(f)
except Exception as e:
logger.error("loadDevice: %s, %s", str(e), traceback.format_exc())
load_devices = []
json.dump(load_devices, f, sort_keys=True,indent=2)
self.devices = [UDPDevice(item["host_id"],item["ip"],item["port"]) for item in load_devices]
self.updateDeviceLookup()
def updateDeviceLookup(self):
self._devices_lookup.clear()
for d in self.devices:
self._devices_lookup[d.host_id] = d
def _discover(self):
ret = []
if not self.stop():
logger.error("cannot discover without STOP mode")
return ret
with self._global_lock:
for i in xrange(len(self.devices)):
ret.append(self.devices[i].host_id)
return ret
def poll(self):
ret = None
with self._global_lock:
if self._mode != MPTN.STOP_MODE and self.last_host_id == 0:
ret = 'ready to ' + self._mode[1]
else:
ret = "%s" % self._mode[1]
if self.last_host_id != 0:
tmp_node_id = self._prefix | self.last_host_id
ret = ret + "\nfound node: %d (ID is %s or %d)" % (self.last_host_id, MPTN.ID_TO_STRING(tmp_node_id), tmp_node_id)
logger.info("polled. " + ret)
return ret
def add(self):
ret = False
with self._global_lock:
try:
self.enterLearnMode = True
self._mode = MPTN.ADD_MODE
self.last_host_id = 0
ret = True
except Exception as e:
logger.error("fails to be ADD mode, now in %s mode error: %s\n%s" % (self._mode[1],
str(e), traceback.format_exc()))
return ret
def delete(self):
ret = False
with self._global_lock:
try:
self.enterLearnMode = True
self._mode = MPTN.DEL_MODE
self.last_host_id = 0
ret = True
except Exception as e:
logger.error("fails to be DEL mode, now in %s mode error: %s\n%s" % (self._mode[1],
str(e), traceback.format_exc()))
return ret
def stop(self):
ret = False
with self._global_lock:
try:
self.enterLearnMode = False
self._mode = MPTN.STOP_MODE
ret = True
except Exception as e:
logger.error("fails to be STOP mode, now in %s mode error: %s\n%s" % (self._mode[1],
str(e), traceback.format_exc()))
return ret
| StarcoderdataPython |
9770789 | <reponame>DestinyofYeet/antonstechbot
import subprocess
import json
global VERSION, bot_prefix, ipdata_token, ipdata_url, osu_token, osu_url, lol_token, lol_url, bot_token
def assignVariables():
global VERSION, bot_prefix, ipdata_token, ipdata_url, osu_token, osu_url, lol_token, lol_url, bot_token
VERSION = subprocess.check_output(["git", "describe", "--tags", "--always"]).decode('ascii').strip()
with open('config/config.json', 'r') as f:
json_stuff = json.load(f)
bot_prefix = json_stuff["prefix"]
bot_token = json_stuff["token"]
ipdata_token = json_stuff["ipdata"]
ipdata_url = "https://api.ipdata.co/"
osu_token = json_stuff["<PASSWORD>"]
osu_url = "https://osu.ppy.sh/api/get_user?u="
lol_token = json_stuff["<PASSWORD>"]
lol_url = "https://euw1.api.riotgames.com/lol/"
| StarcoderdataPython |
1673783 | <reponame>indymnv/feature_engine
# Authors: <NAME> <<EMAIL>>
# License: BSD 3 clause
from typing import Optional, List, Union
import pandas as pd
from feature_engine.dataframe_checks import _is_dataframe
from feature_engine.imputation.base_imputer import BaseImputer
from feature_engine.variable_manipulation import _check_input_parameter_variables
class DropMissingData(BaseImputer):
"""
DropMissingData() will delete rows containing missing values. The DropMissingData()
works for both numerical and categorical variables. DropMissingData can
automatically select all the variables, or alternatively, all the variables with
missing data in the train set. Then the observations with NA will be dropped for
these variable groups.
The user has the option to indicate for which variables the observations with NA
should be removed.
Parameters
----------
missing_only : bool, default=True
If true, missing observations will be dropped only for the variables that were
seen to have NA in the train set, during fit. If False, observations with NA
will be dropped from all variables.
variables : list, default=None
The list of variables to be imputed. If None, the imputer will find and
select all variables with missing data.
**Note**
The transformer will first select all variables or all user entered
variables and if `missing_only=True`, it will re-select from the original group
only those that show missing data in during fit, that is in the train set.
Attributes
----------
variables_:
List of variables for which the rows with NA will be deleted.
Methods
-------
fit:
Learn the variables for which the rows with NA will be deleted
transform:
Remove observations with NA
fit_transform:
Fit to the data, then transform it.
return_na_data:
Returns the dataframe with the rows that contain NA .
"""
def __init__(
self,
missing_only: bool = True,
variables: Union[None, int, str, List[Union[str, int]]] = None,
) -> None:
if not isinstance(missing_only, bool):
raise ValueError("missing_only takes values True or False")
self.variables = _check_input_parameter_variables(variables)
self.missing_only = missing_only
def fit(self, X: pd.DataFrame, y: Optional[pd.Series] = None):
"""
Learn the variables for which the rows with NA will be deleted.
Parameters
----------
X : pandas dataframe of shape = [n_samples, n_features]
The training dataset.
y : pandas Series, default=None
y is not needed in this imputation. You can pass None or y.
Raises
------
TypeError
If the input is not a Pandas DataFrame
Returns
-------
self
"""
# check input dataframe
X = _is_dataframe(X)
# find variables for which indicator should be added
if self.missing_only:
if not self.variables:
self.variables_ = [
var for var in X.columns if X[var].isnull().sum() > 0
]
else:
self.variables_ = [
var for var in self.variables if X[var].isnull().sum() > 0
]
else:
if not self.variables:
self.variables_ = [var for var in X.columns]
else:
self.variables_ = self.variables
self.input_shape_ = X.shape
return self
def transform(self, X: pd.DataFrame) -> pd.DataFrame:
"""
Remove rows with missing values.
Parameters
----------
X : pandas dataframe of shape = [n_samples, n_features]
The dataframe to be transformed.
Returns
-------
X_transformed : pandas dataframe
The complete case dataframe for the selected variables, of shape
[n_samples - rows_with_na, n_features]
"""
X = self._check_transform_input_and_state(X)
X.dropna(axis=0, how="any", subset=self.variables_, inplace=True)
return X
def return_na_data(self, X: pd.DataFrame) -> pd.DataFrame:
"""
Returns the subset of the dataframe which contains the rows with missing values.
This method could be useful in production, in case we want to store the
observations that will not be fed into the model.
Parameters
----------
X : pandas dataframe of shape = [n_samples, n_features]
The dataset to from which rows containing NA should be retained.
Raises
------
TypeError
If the input is not a Pandas DataFrame
Returns
-------
X : pandas dataframe of shape = [obs_with_na, features]
The cdataframe portion that contains only the rows with missing values.
"""
X = self._check_transform_input_and_state(X)
idx = pd.isnull(X[self.variables_]).any(1)
idx = idx[idx]
return X.loc[idx.index, :]
| StarcoderdataPython |
12853287 | # Imports
from flask import Flask, render_template, session, redirect, request, flash, url_for, abort
from flask_session import Session
from werkzeug.exceptions import default_exceptions, HTTPException, InternalServerError
from werkzeug.security import check_password_hash, generate_password_hash
from cs50 import SQL
import os
import markdown
from time import sleep
from new import login_required
# App Config
app = Flask(__name__)
db = SQL("sqlite:///school.db")
app.debug = True
app.secret_key = b'\<KEY>'
app.static_folder = 'static'
app.config["TEMPLATES_AUTO_RELOAD"] = True
@app.after_request
def after_request(response):
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response
# Session Config
app.config['SESSION_TYPE'] = 'filesystem'
Session(app)
app.config.from_object(__name__)
# Routes
@app.route('/')
def index():
if session.get("id"):
return redirect(url_for('home'))
return render_template("index.html")
@app.route('/account')
@login_required
def account():
user = db.execute("SELECT * FROM users WHERE id = :id", id=session['id'])[0]
return render_template('account.html', user=user)
@app.route('/changemail', methods=['GET', 'POST'])
@login_required
def mailc():
if request.method == 'POST':
if not request.form.get('mail'):
flash(u'Please fill every credentials.', 'sorry')
return redirect(request.url)
rows2 = db.execute("SELECT * FROM users WHERE mail = :schoolname", schoolname=request.form.get('mail').lower())
if len(rows2) != 0:
flash(u'This mail is already registered.', 'sorry')
return redirect(request.url)
db.execute('UPDATE users SET mail = :mail WHERE id = :id', mail=request.form.get('mail'), id=session['id'])
return redirect(url_for('home'))
else:
return render_template('mail.html')
@app.route('/changepass', methods=['GET', 'POST'])
@login_required
def passc():
if request.method == 'POST':
if not request.form.get('password'):
flash(u'Please fill every credentials.', 'sorry')
return redirect(request.url)
if request.form.get("password") != request.form.get("confirmation"):
flash(u'Passwords do not match.', 'sorry')
return redirect(request.url)
db.execute('UPDATE users SET hash = :passw WHERE id = :id', passw=generate_password_hash(request.form.get('password')), id=session['id'])
return redirect(url_for('home'))
else:
return render_template('pass.html')
@app.route('/changename', methods=['GET', 'POST'])
@login_required
def namec():
if request.method == 'POST':
if not request.form.get('username'):
flash(u'Please fill every credentials.', 'sorry')
return redirect(request.url)
rows = db.execute("SELECT * FROM users WHERE schoolname = :schoolname", schoolname=request.form.get('username').lower())
if len(rows) != 0:
flash(u'This school name is already registered.', 'sorry')
return redirect(request.url)
db.execute('UPDATE users SET schoolname = :name WHERE id = :id', name=request.form.get('username'), id=session['id'])
return redirect(url_for('home'))
else:
return render_template('name.html')
@app.route('/home')
@login_required
def home():
sites = db.execute("SELECT * FROM sites WHERE user_id = :id", id=session['id'])
user = db.execute("SELECT * FROM users WHERE id = :id", id=session['id'])[0]
return render_template('home.html', sites=sites, user=user)
@app.route('/page/<urlheader>')
def pages(urlheader):
if len(db.execute("SELECT * FROM sites WHERE header = :header", header=urlheader)) == 0:
abort(404)
else:
sites = db.execute("SELECT * FROM sites WHERE header = :header", header=urlheader)
file = open('templates/temp.html', 'w')
file.write(sites[0]['content'])
sleep(0.1)
file.close()
return render_template('pages.html')
@app.route('/pages', methods=['GET', 'POST'])
@login_required
def page():
sites = db.execute("SELECT * FROM sites WHERE user_id = :id", id=session['id'])
return render_template('page.html', sites=sites)
@app.route('/login', methods=['GET', 'POST'])
def login():
if session.get("id"):
return redirect(url_for('home'))
if request.method == 'POST':
if not request.form.get("username") or not request.form.get("password"):
flash(u'Please fill every credentials.', 'sorry')
return redirect(request.url)
rows = db.execute("SELECT * FROM users WHERE schoolname = :schoolname", schoolname=request.form.get('username').lower())
if len(rows) != 1 or not check_password_hash(rows[0]["hash"], request.form.get('password')):
flash(u'Invalid username and/or password.', 'sorry')
return redirect(request.url)
session['id'] = rows[0]['id']
flash(u'Logged In!', 'okay')
return redirect(url_for('home'))
else:
return render_template('login.html')
@app.route('/register', methods=['GET', 'POST'])
def register():
if session.get("id"):
return redirect(url_for('home'))
if request.method == 'POST':
# Ensure username was submitted
if not request.form.get("username") or not request.form.get("password") or not request.form.get("mail"):
flash(u'Please fill every credentials.', 'sorry')
return redirect(request.url)
if request.form.get("password") != request.form.get("confirmation"):
flash(u'Passwords do not match.', 'sorry')
return redirect(request.url)
rows = db.execute("SELECT * FROM users WHERE schoolname = :schoolname", schoolname=request.form.get('username').lower())
rows2 = db.execute("SELECT * FROM users WHERE mail = :schoolname", schoolname=request.form.get('mail').lower())
if len(rows) != 0:
flash(u'This school name is already taken.', 'sorry')
return redirect(request.url)
if len(rows2) != 0:
flash(u'This mail is already registered.', 'sorry')
return redirect(request.url)
# Ensure password was submitted
db.execute("INSERT INTO users (schoolname, mail, hash) VALUES (:name, :mail, :hash)", name=request.form.get("username").lower(), mail=request.form.get("mail").lower() , hash=generate_password_hash(request.form.get("password")))
rows = db.execute("SELECT * FROM users WHERE schoolname = :schoolname", schoolname=request.form.get('username').lower())
session["user_id"] = rows[0]["id"]
# Redirect user to home page
os.mkdir('dirs\\' + str(session["user_id"]))
flash(u"Registered!", 'okay')
return redirect(url_for('login'))
else:
return render_template("register.html")
@app.route('/logout')
@login_required
def logout():
session.clear()
return redirect(url_for('login'))
@app.route('/learnmore')
def learnmore():
return render_template('learn.html')
@app.route('/new', methods=['GET', 'POST'])
@login_required
def new():
if request.method == 'POST':
if not request.form.get('header') or not request.form.get('desc') or not request.form.get('content'):
flash('Please fill everything.', 'sorry')
return redirect(url_for('new'))
if len(db.execute("SELECT * FROM sites WHERE header = :header", header=request.form.get('header').lower())) != 0:
flash('Header already exists.', 'sorry')
return redirect(url_for('new'))
db.execute("INSERT INTO sites (header, desc, content, user_id) VALUES (:header, :desc, :content, :id)", header=request.form.get('header'), desc=request.form.get('desc'), content=markdown.markdown(request.form.get('content')), id=session['id'])
flash(u'Created!', 'okay')
return redirect(url_for('home'))
else:
return render_template('new.html')
@app.errorhandler(404)
def page_not_found(e):
# note that we set the 404 status explicitly
return render_template('404.html')
if __name__ == "__main__":
app.run(debug=True)
| StarcoderdataPython |
6416999 | <gh_stars>1-10
# TC007_test - Post comment (pytest)
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
import time
opt = Options()
opt.headless = True
def test_comment():
driver = webdriver.Chrome(ChromeDriverManager().install(), options=opt)
# Load page
driver.get('http://localhost:1667/')
time.sleep(8)
# Enter the data to be uploaded
email = '<EMAIL>'
username = 'testuser1'
pwd = '<PASSWORD>$'
comment_text = 'I written a comment to the post'
# Fields xpath
email_x = '//*[@id="app"]/div/div/div/div/form/fieldset[1]/input'
pwd_x = '//*[@id="app"]/div/div/div/div/form/fieldset[2]/input'
username_x = '//*[@id="app"]/nav/div/ul/li[4]/a'
sign_button_x = '//*[@id="app"]/nav/div/ul/li[2]/a'
sign_in_btn_x = '//*[@id="app"]/div/div/div/div/form/button'
my_title_x = '//*[@id="app"]/div/div[2]/div/div/div[1]/ul/li[1]/a'
post_tilte_x = '//*[@id="app"]/div/div[2]/div/div/div[2]/div/div/div[1]/a/h1'
comment_x = '//*[@id="app"]/div/div[2]/div[2]/div/div/form/div[1]/textarea'
comment_btn_x = '//*[@id="app"]/div/div[2]/div[2]/div/div/form/div[2]/button'
comment_text_x = '//*[@id="app"]/div/div[2]/div[2]/div/div[2]/div[1]'
# Sign in
sign_button = driver.find_element(By.XPATH, sign_button_x)
sign_button.click()
driver.find_element(By.XPATH, email_x).send_keys(email)
driver.find_element(By.XPATH, pwd_x).send_keys(pwd)
sign_in_btn = driver.find_element(By.XPATH, sign_in_btn_x)
sign_in_btn.click()
time.sleep(5)
# Check box
assert username == driver.find_element(By.XPATH, username_x).text
time.sleep(5)
# Post view
driver.find_element(By.XPATH, username_x).click() # username click
time.sleep(5)
driver.find_element(By.XPATH, my_title_x).click() # my title click
time.sleep(5)
driver.find_element(By.XPATH, post_tilte_x).click() # post title click
time.sleep(5)
# Post comment
driver.find_element(By.XPATH, comment_x).send_keys(comment_text)
driver.find_element(By.XPATH, comment_btn_x).click()
time.sleep(5)
# Check
assert comment_text == driver.find_element(By.XPATH, comment_text_x).text
driver.close()
driver.quit() | StarcoderdataPython |
13082 | from django import template
from week.models import SidebarContentPage,SidebarImagePage
register = template.Library()
@register.inclusion_tag('week/announcement.html')
def sidebar():
sidebar_data = SidebarContentPage.objects.get()
return {'sidebar_data':sidebar_data}
@register.inclusion_tag('week/advertisement.html')
def sidebarimage():
sidebar_image = SidebarImagePage.objects.get()
return {'sidebar_image':sidebar_image} | StarcoderdataPython |
3311395 | <filename>models/problem_2a.py<gh_stars>0
import numpy as np
import pickle
from homework4.problem_2 import sample_train
if __name__ == "__main__":
X = np.genfromtxt('data/X_train.txt', delimiter=None)
Y = np.genfromtxt('data/Y_train.txt', delimiter=None)[:, np.newaxis]
raw_data = np.concatenate((X, Y), axis=1)
trn_p = 20
dev_p = 5
resamping = 10
reg = np.logspace(-4, 0, 20)
reg = reg.round(6)
training_auc = np.zeros((resamping, len(reg)), dtype=float)
validating_auc = np.zeros((resamping, len(reg)), dtype=float)
for i in range(resamping):
training_auc[i], validating_auc[i] = sample_train(reg, raw_data, trn_p, dev_p)
with open(f"learners/training_auc.pickle", "wb") as f:
pickle.dump(training_auc, f)
with open(f"learners/validating_auc.pickle", "wb") as f:
pickle.dump(validating_auc, f)
| StarcoderdataPython |
3486448 | <reponame>yofn/pyacm<filename>codeforces/math数学/800/1207A两种汉堡.py<gh_stars>0
#!/usr/bin/env python3
# https://codeforces.com/problemset/problem/1207/A
t = int(input())
for _ in range(t):
b,p,f = list(map(int,input().split()))
h,c = list(map(int,input().split()))
b1 = min(b//2,p) if h>c else min(b//2,f)
b2 = (b//2)-b1
b2 = min(b2,f) if h>c else min(b2,p)
print(b1*h+b2*c if h>c else b1*c+b2*h)
| StarcoderdataPython |
1712619 | <reponame>brianchiang-tw/UD1110_Intro_to_Python_Programming<filename>L4_Control flow/Quiz_List comprehension.py
# Q1:
# Quiz: Extract First Names
# Use a list comprehension to create a new list first_names
# containing just the first names in names in lowercase.
names = ["<NAME>", "<NAME>", "<NAME>", "<NAME>", "<NAME>"]
# write your list comprehension here
first_names = [ x.split()[0].lower() for x in names ]
print(first_names)
# expected output
'''
['rick', 'morty', 'summer', 'jerry', 'beth']
'''
# Q2:
# Quiz: Multiples of Three
# Use a list comprehension to create a list multiples_3 containing the first 20 multiples of 3.
# write your list comprehension here
multiples_3 = [ (x*3) for x in range(1, 21)]
print(multiples_3)
# expected output:
'''
[3, 6, 9, 12, 15, 18, 21, 24, 27, 30, 33, 36, 39, 42, 45, 48, 51, 54, 57, 60]
'''
# Q3:
# Quiz: Filter Names by Scores
# Use a list comprehension to create a list of names passed that only include those that scored at least 65.
scores = {
"<NAME>": 70,
"<NAME>": 35,
"<NAME>": 82,
"<NAME>": 23,
"<NAME>": 98
}
# write your list comprehension here
passed = [ student for student, grade in scores.items() if grade > 65 ]
print(passed)
# expected output:
'''
['<NAME>', '<NAME>', '<NAME>']
''' | StarcoderdataPython |
4945381 | <reponame>Elrophi/flask-quote
@manager.command
def test():
"""Run the unit tests."""
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
from app import app
if __name__ == '__main__':
app.run(debug=True)
| StarcoderdataPython |
9724979 | <reponame>bonitobonita24/Mayan-EDMS
from mayan.apps.authentication.tests.mixins import LoginViewTestMixin, LogoutViewTestMixin
from mayan.apps.testing.tests.base import GenericViewTestCase
from mayan.apps.user_management.permissions import (
permission_user_edit, permission_user_view
)
from ..events import event_user_locale_profile_edited
from .mixins import UserLocaleProfileViewMixin
from .literals import TEST_TRANSLATED_WORD
class CurrentUserViewTestCase(
UserLocaleProfileViewMixin, GenericViewTestCase
):
def test_current_user_locale_profile_detail_view_no_permission(self):
self._clear_events()
response = self._request_test_current_user_locale_profile_detail_view()
self.assertEqual(response.status_code, 200)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_current_user_locale_profile_edit_view_no_permission(self):
language = self._test_case_user.locale_profile.language
timezone = self._test_case_user.locale_profile.timezone
self._clear_events()
response = self._request_test_current_user_locale_profile_edit_view()
self.assertEqual(response.status_code, 302)
self._test_case_user.refresh_from_db()
self.assertNotEqual(
self._test_case_user.locale_profile.language, language
)
self.assertNotEqual(
self._test_case_user.locale_profile.timezone, timezone
)
events = self._get_test_events()
self.assertEqual(events.count(), 1)
self.assertEqual(events[0].action_object, None)
self.assertEqual(events[0].actor, self._test_case_user)
self.assertEqual(events[0].target, self._test_case_user)
self.assertEqual(events[0].verb, event_user_locale_profile_edited.id)
class SuperUserLocaleViewTestCase(
UserLocaleProfileViewMixin, GenericViewTestCase
):
def setUp(self):
super().setUp()
self._create_test_superuser()
def test_superuser_locale_profile_detail_view_no_permission(self):
self._clear_events()
response = self._request_test_superuser_locale_profile_detail_view()
self.assertEqual(response.status_code, 404)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_superuser_locale_profile_detail_view_with_access(self):
self.grant_access(
obj=self.test_superuser, permission=permission_user_view
)
self._clear_events()
response = self._request_test_superuser_locale_profile_detail_view()
self.assertEqual(response.status_code, 404)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_superuser_locale_profile_edit_view_no_permission(self):
language = self.test_superuser.locale_profile.language
timezone = self.test_superuser.locale_profile.timezone
self._clear_events()
response = self._request_test_superuser_locale_profile_edit_view()
self.assertEqual(response.status_code, 404)
self.test_superuser.refresh_from_db()
self.assertEqual(
self.test_superuser.locale_profile.language, language
)
self.assertEqual(
self.test_superuser.locale_profile.timezone, timezone
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_superuser_locale_profile_edit_view_with_access(self):
language = self.test_superuser.locale_profile.language
timezone = self.test_superuser.locale_profile.timezone
self.grant_access(
obj=self.test_superuser, permission=permission_user_edit
)
self._clear_events()
response = self._request_test_superuser_locale_profile_edit_view()
self.assertEqual(response.status_code, 404)
self.test_superuser.refresh_from_db()
self.assertEqual(
self.test_superuser.locale_profile.language, language
)
self.assertEqual(
self.test_superuser.locale_profile.timezone, timezone
)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
class UserLocaleProfileViewTestCase(
UserLocaleProfileViewMixin, GenericViewTestCase
):
auto_create_test_user = True
def test_user_locale_profile_detail_view_no_permission(self):
self._clear_events()
response = self._request_test_user_locale_profile_detail_view()
self.assertEqual(response.status_code, 404)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_user_locale_profile_detail_view_with_access(self):
self.grant_access(
obj=self.test_user, permission=permission_user_view
)
self._clear_events()
response = self._request_test_user_locale_profile_detail_view()
self.assertEqual(response.status_code, 200)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_user_locale_profile_edit_view_no_permission(self):
language = self.test_user.locale_profile.language
timezone = self.test_user.locale_profile.timezone
self._clear_events()
response = self._request_test_user_locale_profile_edit_view()
self.assertEqual(response.status_code, 404)
self.test_user.refresh_from_db()
self.assertEqual(self.test_user.locale_profile.language, language)
self.assertEqual(self.test_user.locale_profile.timezone, timezone)
events = self._get_test_events()
self.assertEqual(events.count(), 0)
def test_user_locale_profile_edit_view_with_access(self):
language = self.test_user.locale_profile.language
timezone = self.test_user.locale_profile.timezone
self.grant_access(
obj=self.test_user, permission=permission_user_edit
)
self._clear_events()
response = self._request_test_user_locale_profile_edit_view()
self.assertEqual(response.status_code, 302)
self.test_user.refresh_from_db()
self.assertNotEqual(self.test_user.locale_profile.language, language)
self.assertNotEqual(self.test_user.locale_profile.timezone, timezone)
events = self._get_test_events()
self.assertEqual(events.count(), 1)
self.assertEqual(events[0].action_object, None)
self.assertEqual(events[0].actor, self._test_case_user)
self.assertEqual(events[0].target, self.test_user)
self.assertEqual(events[0].verb, event_user_locale_profile_edited.id)
class LanguageSelectionViewTestCase(
LoginViewTestMixin, LogoutViewTestMixin, UserLocaleProfileViewMixin,
GenericViewTestCase
):
def test_language_change_view(self):
response = self._request_test_current_user_locale_profile_edit_view(
follow=True
)
self.assertEqual(response.status_code, 200)
response = self._request_test_current_user_locale_profile_detail_view()
self.assertContains(
response=response, text=TEST_TRANSLATED_WORD, status_code=200
)
def test_language_change_after_login_view(self):
response = self._request_test_current_user_locale_profile_edit_view(
follow=True
)
self.assertEqual(response.status_code, 200)
request = self._request_logout_view()
self.assertEqual(request.status_code, 302)
response = self._request_simple_login_view()
self.assertEqual(request.status_code, 302)
response = self._request_test_current_user_locale_profile_detail_view()
self.assertContains(
response=response, text=TEST_TRANSLATED_WORD, status_code=200
)
| StarcoderdataPython |
4972986 | import script
from archive.utils.mock_di_api import mock_api
from archive.utils.operator_test import operator_test
api = mock_api(__file__) # class instance of mock_api
mock_api.print_send_msg = True # set class variable for printing api.send
optest = operator_test(__file__)
# config parameter
api.config.num_files = 'null' # datatype : number
api.config.prefix = None # datatype : string
msg = optest.get_file('test_file.csv')
script.on_input(msg)
# print result list
for mt in mock_api.msg_list :
print('*********************')
print('Port: {}'.format(mt['port']))
print('Attributes: {}'.format(mt['data'].attributes))
print('Data: {}'.format(mt['data'].body))
#print(optest.msgtable2df(mt['data']))
| StarcoderdataPython |
3578449 | <filename>app/currency_trade_volume_service.py<gh_stars>0
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional
from app.currency_trade_volume_store import CurrencyTradeVolumeStore, CurrencyPairAvg
from app.livecoin_api import LivecoinApi
from app.mailer import Mailer
from app.types import CurrencyTradeVolumeRecord
# Free heroku database can only hold 10,000 rows so just store a couple of them
# as an example. These were picked by looking for ones that have a larger
# variance in short time scales
class CurrencyPair(str, Enum):
XEM_TO_BTC = "XEM/BTC"
OTON_TO_BTC = "OTON/BTC"
DGB_TO_BTC = "DGB/BTC"
_TRACKED_PAIRS = [pair.value for pair in CurrencyPair]
class PairNotFoundException(Exception):
pass
@dataclass
class CurrencyPairSnapshot:
currency_pair: str
history: List[CurrencyTradeVolumeRecord]
rank: int
"""
The currency pair's position in the list of all tracked currency pairs
sorted by the standard deviation of their trade volume over the last 24
hours
"""
total_tracked_currency_pairs: int
def _find_avg_volume(
currency_pair: str, avg_trade_volumes: List[CurrencyPairAvg]
) -> Optional[float]:
for avg_volume_record in avg_trade_volumes:
if avg_volume_record.currency_pair == currency_pair:
return avg_volume_record.avg_volume
return None
def _is_notable_volume_change(new_trade_volume: float, avg_trade_volume: float) -> bool:
return avg_trade_volume > 0 and new_trade_volume >= avg_trade_volume * 3
class CurrencyTradeVolumeService:
def __init__(
self,
store: CurrencyTradeVolumeStore,
api: LivecoinApi,
mailer: Mailer,
notify_emails: List[str],
):
self._store = store
self._api = api
self._mailer = mailer
self._notify_emails = notify_emails
async def update_trade_volumes(self):
"""
Load another batch of trade volumes from the API and record them
Send out email alerts for notable changes in trade volume
"""
trade_volumes = await self._api.fetch_trade_volumes(_TRACKED_PAIRS)
await self._store.record_trade_volumes(trade_volumes)
avg_trade_volumes = await self._store.get_currency_pair_averages()
# We could likely trade memory usage for speed if there are huge numbers of tracked currency pairs here by first
# creating a hash map of average trade volumes indexed by currency pair instead of searching for them in the
# list every time
for trade_volume in trade_volumes:
avg_volume = _find_avg_volume(trade_volume.currency_pair, avg_trade_volumes)
if avg_volume is not None and _is_notable_volume_change(
trade_volume.volume, avg_volume
):
for email in self._notify_emails:
self._mailer.send_mail(
email,
"CryptoTracker Alert",
f"{trade_volume.currency_pair} is trading at {trade_volume.volume}",
)
async def get_currency_pair_snapshot(
self, currency_pair: CurrencyPair
) -> CurrencyPairSnapshot:
"""
Get a history trade volume history for the last 24 hours of the given currency pair as well as a ranking for the
amount of fluctuation in the given pair amongst all currency pair trade volumes
"""
ranks = await self._store.get_currency_pair_ranks()
history = await self._store.get_currency_pair_history(currency_pair)
target_rank: Optional[int] = None
for rank in ranks:
if rank.currency_pair == currency_pair:
target_rank = rank.rank
if target_rank is None:
# TODO: Log requested currency pair and results
# This will cause the service to 500 if we have a supported currency pair that we don't have any data for
# yet (or if the syncing process has failed for the last hour). We should make the API return a custom error
# code in this case so that the UI can handle that case and display some information
raise PairNotFoundException()
return CurrencyPairSnapshot(
currency_pair, history, target_rank, len(_TRACKED_PAIRS)
)
| StarcoderdataPython |
336850 | <reponame>huibinshen/autogluon
from autogluon.tabular.models.tabular_nn.torch.tabular_nn_torch import TabularNeuralNetTorchModel
def test_tabular_nn_binary(fit_helper):
fit_args = dict(
hyperparameters={TabularNeuralNetTorchModel: {}},
)
dataset_name = 'adult'
fit_helper.fit_and_validate_dataset(dataset_name=dataset_name, fit_args=fit_args)
def test_tabular_nn_multiclass(fit_helper):
fit_args = dict(
hyperparameters={TabularNeuralNetTorchModel: {}},
)
dataset_name = 'covertype_small'
fit_helper.fit_and_validate_dataset(dataset_name=dataset_name, fit_args=fit_args)
def test_tabular_nn_regression(fit_helper):
fit_args = dict(
hyperparameters={TabularNeuralNetTorchModel: {}},
time_limit=10, # TabularNN trains for a long time on ames
)
dataset_name = 'ames'
fit_helper.fit_and_validate_dataset(dataset_name=dataset_name, fit_args=fit_args)
| StarcoderdataPython |
8053135 | <gh_stars>10-100
'''Main "script."'''
import ast
import copy
import csv
import ctypes
import json
import logging
import os
import sys
import time
import traceback
import uuid
from os import listdir
from os.path import isfile
from typing import List
import requests.exceptions
from argparse_prompt import PromptParser
from folioclient.FolioClient import FolioClient
from migration_tools.custom_exceptions import (
TransformationProcessError,
TransformationRecordFailedError,
)
from migration_tools.folder_structure import FolderStructure
from migration_tools.helper import Helper
from migration_tools.main_base import MainBase
from migration_tools.mapping_file_transformation.holdings_mapper import HoldingsMapper
from migration_tools.mapping_file_transformation.mapping_file_mapper_base import (
MappingFileMapperBase,
)
from migration_tools.report_blurbs import Blurbs
csv.field_size_limit(int(ctypes.c_ulong(-1).value // 2))
class Worker(MainBase):
"""Class that is responsible for the acutal work"""
def __init__(
self,
folio_client: FolioClient,
mapper: HoldingsMapper,
files,
folder_structure: FolderStructure,
holdings_merge_criteria,
):
super().__init__()
self.holdings = {}
self.total_records = 0
self.folder_structure = folder_structure
self.folio_client = folio_client
self.files = files
self.legacy_map = {}
self.holdings_merge_criteria = holdings_merge_criteria
if "_" in self.holdings_merge_criteria:
self.excluded_hold_type_id = self.holdings_merge_criteria.split("_")[-1]
logging.info(self.excluded_hold_type_id)
self.results_path = self.folder_structure.created_objects_path
self.mapper = mapper
self.failed_files: List[str] = list()
self.holdings_types = list(
self.folio_client.folio_get_all("/holdings-types", "holdingsTypes")
)
logging.info("%s\tholdings types in tenant", len(self.holdings_types))
self.default_holdings_type = next(
(h["id"] for h in self.holdings_types if h["name"] == "Unmapped"), ""
)
if not self.default_holdings_type:
raise TransformationProcessError(
"Holdings type named Unmapped not found in FOLIO."
)
logging.info("Init done")
def work(self):
logging.info("Starting....")
for file_name in self.files:
logging.info("Processing %s", file_name)
try:
self.process_single_file(file_name)
except Exception as ee:
error_str = (
f"Processing of {file_name} failed:\n{ee}."
"Check source files for empty lines or missing reference data"
)
logging.exception(error_str)
self.mapper.migration_report.add(
Blurbs.FailedFiles, f"{file_name} - {ee}"
)
sys.exit()
logging.info(
f"processed {self.total_records:,} records in {len(self.files)} files"
)
def process_single_file(self, file_name):
with open(file_name, encoding="utf-8-sig") as records_file:
self.mapper.migration_report.add_general_statistics(
"Number of files processed"
)
start = time.time()
for idx, record in enumerate(
self.mapper.get_objects(records_file, file_name)
):
try:
self.process_holding(idx, record)
except TransformationProcessError as process_error:
self.mapper.handle_transformation_process_error(idx, process_error)
except TransformationRecordFailedError as error:
self.mapper.handle_transformation_record_failed_error(idx, error)
except Exception as excepion:
self.mapper.handle_generic_exception(idx, excepion)
self.mapper.migration_report.add_general_statistics(
"Number of Legacy items in file"
)
if idx > 1 and idx % 10000 == 0:
elapsed = idx / (time.time() - start)
elapsed_formatted = "{0:.4g}".format(elapsed)
logging.info(
f"{idx:,} records processed. Recs/sec: {elapsed_formatted} "
)
self.total_records += idx
logging.info(
f"Done processing {file_name} containing {idx:,} records. "
f"Total records processed: {self.total_records:,}"
)
def process_holding(self, idx, row):
folio_rec = self.mapper.do_map(row, f"row # {idx}")
folio_rec["holdingsTypeId"] = self.default_holdings_type
holdings_from_row = []
if len(folio_rec.get("instanceId", [])) == 1: # Normal case.
folio_rec["instanceId"] = folio_rec["instanceId"][0]
holdings_from_row.append(folio_rec)
elif len(folio_rec.get("instanceId", [])) > 1: # Bound-with.
holdings_from_row.extend(self.create_bound_with_holdings(folio_rec))
else:
raise TransformationRecordFailedError(
idx, "No instance id in parsed record", ""
)
for folio_holding in holdings_from_row:
self.merge_holding_in(folio_holding)
self.mapper.report_folio_mapping(folio_holding, self.mapper.schema)
def create_bound_with_holdings(self, folio_rec):
# Add former ids
temp_ids = []
for former_id in folio_rec.get("formerIds", []):
if (
former_id.startswith("[")
and former_id.endswith("]")
and "," in former_id
):
ids = (
former_id[1:-1]
.replace('"', "")
.replace(" ", "")
.replace("'", "")
.split(",")
)
temp_ids.extend(ids)
else:
temp_ids.append(former_id)
folio_rec["formerIds"] = temp_ids
# Add note
note = {
"holdingsNoteTypeId": "e19eabab-a85c-4aef-a7b2-33bd9acef24e", # Default binding note type
"note": (
f'This Record is a Bound-with. It is bound with {len(folio_rec["instanceId"])} '
"instances. Below is a json structure allowing you to move this into the future "
"Bound-with functionality in FOLIO\n"
f'{{"instances": {json.dumps(folio_rec["instanceId"], indent=4)}}}'
),
"staffOnly": True,
}
note2 = {
"holdingsNoteTypeId": "e19eabab-a85c-4aef-a7b2-33bd9acef24e", # Default binding note type
"note": (
f'This Record is a Bound-with. It is bound with {len(folio_rec["instanceId"])} other records. '
"In order to locate the other records, make a search for the Class mark, but without brackets."
),
"staffOnly": False,
}
if "notes" in folio_rec:
folio_rec["notes"].append(note)
folio_rec["notes"].append(note2)
else:
folio_rec["notes"] = [note, note2]
for bwidx, index in enumerate(folio_rec["instanceId"]):
if not index:
raise ValueError(f"No ID for record {folio_rec}")
call_numbers = ast.literal_eval(folio_rec["callNumber"])
if isinstance(call_numbers, str):
call_numbers = [call_numbers]
c = copy.deepcopy(folio_rec)
c["instanceId"] = index
c["callNumber"] = call_numbers[bwidx]
c["holdingsTypeId"] = "7b94034e-ac0d-49c9-9417-0631a35d506b"
c["id"] = str(uuid.uuid4())
self.mapper.migration_report.add_general_statistics(
"Bound-with holdings created"
)
yield c
def merge_holding_in(self, folio_holding):
new_holding_key = self.to_key(folio_holding, self.holdings_merge_criteria)
existing_holding = self.holdings.get(new_holding_key, None)
exclude = (
self.holdings_merge_criteria.startswith("u_")
and folio_holding["holdingsTypeId"] == self.excluded_hold_type_id
)
if exclude or not existing_holding:
self.mapper.migration_report.add_general_statistics(
"Unique Holdings created from Items"
)
self.holdings[new_holding_key] = folio_holding
else:
self.mapper.migration_report.add_general_statistics(
"Holdings already created from Item"
)
self.merge_holding(new_holding_key, existing_holding, folio_holding)
def wrap_up(self):
logging.info("Done. Wrapping up...")
if any(self.holdings):
print(
f"Saving holdings created to {self.folder_structure.created_objects_path}"
)
with open(
self.folder_structure.created_objects_path, "w+"
) as holdings_file:
for holding in self.holdings.values():
for legacy_id in holding["formerIds"]:
# Prevent the first item in a boundwith to be overwritten
if legacy_id not in self.legacy_map:
self.legacy_map[legacy_id] = {"id": holding["id"]}
Helper.write_to_file(holdings_file, holding)
self.mapper.migration_report.add_general_statistics(
"Holdings Records Written to disk"
)
with open(
self.folder_structure.holdings_id_map_path, "w"
) as legacy_map_path_file:
json.dump(self.legacy_map, legacy_map_path_file)
logging.info(f"Wrote {len(self.legacy_map)} id:s to legacy map")
with open(
self.folder_structure.migration_reports_file, "w"
) as migration_report_file:
logging.info(
f"Writing migration- and mapping report to {self.folder_structure.migration_reports_file}"
)
Helper.write_migration_report(
migration_report_file, self.mapper.migration_report
)
Helper.print_mapping_report(
migration_report_file,
self.total_records,
self.mapper.mapped_folio_fields,
self.mapper.mapped_legacy_fields,
)
logging.info("All done!")
@staticmethod
def to_key(holding, fields_criteria):
"""creates a key if key values in holding record
to determine uniquenes"""
try:
"""creates a key of key values in holding record
to determine uniquenes"""
call_number = (
"".join(holding.get("callNumber", "").split())
if "c" in fields_criteria
else ""
)
instance_id = holding["instanceId"] if "b" in fields_criteria else ""
location_id = (
holding["permanentLocationId"] if "l" in fields_criteria else ""
)
return "-".join([instance_id, call_number, location_id, ""])
except Exception as ee:
print(holding)
raise ee
def merge_holding(self, key, old_holdings_record, new_holdings_record):
# TODO: Move to interface or parent class and make more generic
if self.holdings[key].get("notes", None):
self.holdings[key]["notes"].extend(new_holdings_record.get("notes", []))
self.holdings[key]["notes"] = dedupe(self.holdings[key].get("notes", []))
if self.holdings[key].get("holdingsStatements", None):
self.holdings[key]["holdingsStatements"].extend(
new_holdings_record.get("holdingsStatements", [])
)
self.holdings[key]["holdingsStatements"] = dedupe(
self.holdings[key]["holdingsStatements"]
)
if self.holdings[key].get("formerIds", None):
self.holdings[key]["formerIds"].extend(
new_holdings_record.get("formerIds", [])
)
self.holdings[key]["formerIds"] = list(set(self.holdings[key]["formerIds"]))
def parse_args():
"""Parse CLI Arguments"""
parser = PromptParser()
parser.add_argument("base_folder", help="Base folder of the client.")
parser.add_argument("okapi_url", help=("OKAPI base url"))
parser.add_argument("tenant_id", help=("id of the FOLIO tenant."))
parser.add_argument("username", help=("the api user"))
parser.add_argument("--password", help="the api users password", secure=True)
parser.add_argument(
"--suppress",
"-ds",
help="This batch of records are to be suppressed in FOLIO.",
default=False,
type=bool,
)
flavourhelp = (
"What criterias do you want to use when merging holdings?\t "
"All these parameters need to be the same in order to become "
"the same Holdings record in FOLIO. \n"
"\tclb\t-\tCallNumber, Location, Bib ID\n"
"\tlb\t-\tLocation and Bib ID only\n"
"\tclb_7b94034e-ac0d-49c9-9417-0631a35d506b\t-\t "
"Exclude bound-with holdings from merging. Requires a "
"Holdings type in the tenant with this Id"
)
parser.add_argument(
"--holdings_merge_criteria", "-hmc", default="clb", help=flavourhelp
)
parser.add_argument(
"--log_level_debug",
"-debug",
help="Set log level to debug",
default=False,
type=bool,
)
parser.add_argument(
"--time_stamp",
"-ts",
help="Time Stamp String (YYYYMMDD-HHMMSS) from Instance transformation. Required",
)
args = parser.parse_args()
logging.info(f"\tOkapi URL:\t{args.okapi_url}")
logging.info(f"\tTenanti Id:\t{args.tenant_id}")
logging.info(f"\tUsername:\t{args.username}")
logging.info("\tPassword:\tSecret")
return args
def dedupe(list_of_dicts):
# TODO: Move to interface or parent class
return [dict(t) for t in {tuple(d.items()) for d in list_of_dicts}]
def main():
"""Main Method. Used for bootstrapping."""
csv.register_dialect("tsv", delimiter="\t")
args = parse_args()
folder_structure = FolderStructure(args.base_folder, args.time_stamp)
folder_structure.setup_migration_file_structure("holdingsrecord", "item")
Worker.setup_logging(folder_structure, args.log_level_debug)
folder_structure.log_folder_structure()
try:
folio_client = FolioClient(
args.okapi_url, args.tenant_id, args.username, args.password
)
except requests.exceptions.SSLError as sslerror:
logging.error(sslerror)
logging.error(
"Network Error. Are you connected to the Internet? Do you need VPN? {}"
)
sys.exit()
# Source data files
files = [
os.path.join(folder_structure.legacy_records_folder, f)
for f in listdir(folder_structure.legacy_records_folder)
if isfile(os.path.join(folder_structure.legacy_records_folder, f))
]
logging.info("Files to process:")
for f in files:
logging.info(f"\t{f}")
# All the paths...
try:
with open(
folder_structure.call_number_type_map_path, "r"
) as callnumber_type_map_f, open(
folder_structure.instance_id_map_path, "r"
) as instance_id_map_file, open(
folder_structure.holdings_map_path
) as holdings_mapper_f, open(
folder_structure.locations_map_path
) as location_map_f:
instance_id_map = {}
for index, json_string in enumerate(instance_id_map_file):
# {"legacy_id", "folio_id","instanceLevelCallNumber"}
if index % 100000 == 0:
print(f"{index} instance ids loaded to map", end="\r")
map_object = json.loads(json_string)
instance_id_map[map_object["legacy_id"]] = map_object
logging.info(f"Loaded {index} migrated instance IDs")
holdings_map = json.load(holdings_mapper_f)
logging.info(
f'{len(holdings_map["data"])} fields in holdings mapping file map'
)
mapped_fields = MappingFileMapperBase.get_mapped_folio_properties_from_map(
holdings_map
)
logging.info(
f"{len(list(mapped_fields))} Mapped fields in holdings mapping file map"
)
location_map = list(csv.DictReader(location_map_f, dialect="tsv"))
call_number_type_map = list(
csv.DictReader(callnumber_type_map_f, dialect="tsv")
)
logging.info(f"Found {len(location_map)} rows in location map")
mapper = HoldingsMapper(
folio_client,
holdings_map,
location_map,
call_number_type_map,
instance_id_map,
)
worker = Worker(
folio_client,
mapper,
files,
folder_structure,
args.holdings_merge_criteria,
)
worker.work()
worker.wrap_up()
except TransformationProcessError as process_error:
logging.critical(process_error)
logging.critical("Halting.")
sys.exit()
except Exception as exception:
logging.info("\n=======ERROR===========")
logging.info(f"{exception}")
logging.info("\n=======Stack Trace===========")
traceback.print_exc()
if __name__ == "__main__":
main()
| StarcoderdataPython |
6507243 | #!/usr/bin/env python
import os
from Generic import *
from plasTeX.ConfigManager import TooManyValues
from String import StringOption
from UserList import UserList
class MultiParser(GenericParser):
def getArgument(self, args, range=None, delim=None, forcedarg=False):
if range is None:
range = self.range[:]
if delim is None:
delim = self.delim
new_args, args = GenericParser.getArgument(self, args, range, delim, forcedarg=forcedarg)
if type(new_args) in [list, tuple]:
return new_args, args
elif new_args is None:
return [], args
return [new_args], args
class MultiOption(MultiParser, GenericOption, UserList):
"""
Multiple configuration option
Multi options are options delimited by a specified character. They
can also be represented by an option specified multiple times.
All other options, when specified more than once, will overwrite
their previous value. Multi options will append values each
time an option is specified.
"""
synopsis = 'val1,val2,...'
def __init__(self, docstring=DEFAULTS['docstring'],
options=DEFAULTS['options'],
default=[],
optional=DEFAULTS['optional'],
values=DEFAULTS['values'],
category=DEFAULTS['category'],
callback=DEFAULTS['callback'],
synopsis=DEFAULTS['synopsis'],
environ=DEFAULTS['environ'],
registry=DEFAULTS['registry'],
delim=',',
range=[1,'*'],
mandatory=None,
name=DEFAULTS['name'],
source=DEFAULTS['source'],
template=StringOption):
"""
Initialize a multi option
This class is initialized with the same options as the
Option class with one addition: delim. The 'delim' argument
specifies what the delimiter is for each item in the list.
If the delimiter is 'None' or whitespace, each item in the
list is assumed to be delimited by whitespace.
"""
self.delim = delim
self.range = range
assert not issubclass(template, MultiOption), \
'MultiOptions can not have a MultiOption as a template'
assert issubclass(template, GenericOption), \
'Templates must be a subclass of GenericOption'
self.template = template(options=options,name=name,values=values)
UserList.__init__(self, [])
GenericOption.initialize(self, locals())
def cast(self, arg):
if arg is None: return []
if type(arg) in [list,tuple]:
return [self.template.cast(x) for x in list(arg)]
delim = self.delim
if not delim:
delim = ' '
return [self.template.cast(v.strip()) for v in str(arg).split(delim)
if v.strip()]
def getValue(self, value=None):
""" Return value for option """
if self.data and self.source & COMMANDLINE:
return self.data
if self.environ and os.environ.has_key(str(self.environ)):
self.source = ENVIRONMENT
self.file = None
return self.cast(os.environ[str(self.environ)])
if self.data:
return self.data
if self.default:
self.source = BUILTIN
self.file = None
return self.default
self.source = CODE
self.file = None
if value is None:
return []
return value
def clearValue(self):
""" Clear the value to be unset """
self.data = []
def __repr__(self):
""" Print command-line representation """
delim = self.delim
if not delim:
delim = ' '
if self.data:
option = self.actual
if not option:
args = self.getPossibleOptions()
if args: option = args[0]
if option:
return str('%s %s' % (option, delim.join(self.data))).strip()
return ''
def __iadd__(self, other):
""" Append a value to the list """
if callable(self.callback):
other = self.callback(self.cast(other))
self.data += self.validate(other)
range = self.validateRange(self.range)
name = self.name
if self.actual: name = self.actual
if len(self.data) > range[1]:
raise TooManyValues, "Expecting at most %s values for option '%s'." % (range[1], name)
return self
def validate(self, arg):
""" Validate the value of the option """
new_values = []
for i in self.cast(arg):
# new_values.append(self.checkValues(i))
new_values.append(self.template.validate(i))
return new_values
def checkValues(self, value):
return self.template.checkValues(value)
def __str__(self):
if self.delim and self.delim.strip():
delim = '%s ' % self.delim
return delim.join([str(x) for x in self.data])
else:
return '\n'.join([str(x) for x in self.data])
return str(self.data)
def acceptsArgument(self):
""" Return a boolean indicating if the option accepts an argument """
range = self.validateRange(self.range)
return not(not(range[1]))
def requiresArgument(self):
""" Return a boolean indicating if the option requires an argument """
range = self.validateRange(self.range)
return not(not(range[0]))
def setValue(self, value):
"""
Set the value of the option to the given value
Once the value is set to the new value and validated, the
specified callback function is called with that value as its
only argument.
"""
if value is None or ((type(value) in [list,tuple]) and not(value)):
self.clearValue()
else:
if callable(self.callback):
value = self.callback(self.cast(value))
self.data = self.validate(value)
class MultiArgument(GenericArgument, MultiOption):
""" Multiple command-line option """
def __init__(self, docstring=DEFAULTS['docstring'],
options=DEFAULTS['options'],
default=[],
optional=DEFAULTS['optional'],
values=DEFAULTS['values'],
category=DEFAULTS['category'],
callback=DEFAULTS['callback'],
synopsis=DEFAULTS['synopsis'],
environ=DEFAULTS['environ'],
registry=DEFAULTS['registry'],
delim=' ',
range=[1,'*'],
mandatory=None,
name=DEFAULTS['name'],
source=DEFAULTS['source'],
template=StringOption):
""" Initialize a multi argument """
self.delim = delim
self.range = range
assert not issubclass(template, MultiArgument), \
'MultiArguments can not have a MultiArguments as a template'
assert not issubclass(template, MultiOption), \
'MultiOptions can not have a MultiOptions as a template'
assert issubclass(template, GenericOption), \
'Templates must be a subclass of GenericOption'
self.template = template(options=options,name=name,values=values)
UserList.__init__(self, [])
GenericOption.initialize(self, locals())
| StarcoderdataPython |
4812851 | #!/usr/bin/python -tt
#=======================================================================
# General Documentation
"""Version number and additional information for package.
"""
#-----------------------------------------------------------------------
# Additional Documentation
#
# RCS Revision Code:
# $Id: package_version.py,v 1.10 2004/07/16 18:27:27 jlin Exp $
#
# Modification History:
# - 03 Mar 2004: Original by <NAME>, Computation Institute,
# University of Chicago. Passed passably reasonable tests.
#
# Notes:
# - Written for Python 2.2.
# - No import statements in module.
#
# Copyright (c) 2004 by <NAME>. For licensing, distribution
# conditions, contact information, and additional documentation see
# the URL http://www.johnny-lin.com/py_pkgs/atmqty/doc/.
#=======================================================================
version = '0.2.0.2'
author = '<NAME> <http://www.johnny-lin.com/>'
date = '16 Jul 2004'
credits = 'Package contributions: <NAME>, <NAME>.'
# ===== end file =====
| StarcoderdataPython |
5001337 | from functools import total_ordering
TANK_DAMAGE = 10
BIG_FORT_DAMAGE = 10
SMALL_FORT_DAMAGE = 5
BIG_FORT_REWARD = 70
SMALL_FORT_REWARD = 50
BIG_CITY_PENALTY = 150
SMALL_CITY_PENALTY = 120
def eval_state(state):
from scaii.env.sky_rts.env.scenarios.city_attack import UnitType, Actions
enemy_tank = None
tank = None
quadrants = dict([])
tank_quadrant = None
for _, obj in state.objects.items():
if obj.unit_type == UnitType.TANK:
if not obj.is_friendly:
enemy_tank = obj
else:
tank = obj
continue
centroid_x = (obj.min_x + obj.max_x) / 2
centroid_y = (obj.min_y + obj.max_y) / 2
quadrants[find_quadrant(centroid_x, centroid_y)] = obj
if not enemy_tank == None:
centroid_x = (enemy_tank.min_x + enemy_tank.max_x) / 2
centroid_y = (enemy_tank.min_y + enemy_tank.max_y) / 2
tank_quadrant = find_quadrant(centroid_x, centroid_y)
objects = [ComparableUnit(unit, tank, quadrant, enemy_tank if tank_quadrant ==
quadrant else None) for quadrant, unit in quadrants.items()]
objects.sort()
return (objects[-1].quadrant, objects)
def find_quadrant(centroid_x, centroid_y):
from scaii.env.sky_rts.env.scenarios.city_attack import Actions
CENTER_X, CENTER_Y = 20, 20
#print(centroid_x, centroid_y)
if centroid_x > CENTER_X and centroid_y < CENTER_Y:
return Actions.Q1
elif centroid_x < CENTER_X and centroid_y < CENTER_Y:
return Actions.Q2
elif centroid_x < CENTER_X and centroid_y > CENTER_Y:
return Actions.Q3
elif centroid_x > CENTER_X and centroid_y > CENTER_Y:
return Actions.Q4
else:
return "tank"
@total_ordering
class ComparableUnit():
def __init__(self, unit, tank, quadrant, enemy_tank):
self.unit = unit
# hack
self.tank = tank
self.quadrant = quadrant
self.enemy_tank = enemy_tank
self.hp = self.unit.hp
self.is_friendly = self.unit.is_friendly
self.unit_type = self.unit.unit_type
def will_kill_us(self):
return will_kill_us(self.unit, self.tank)
def will_be_killed(self):
return not self.will_kill_us()
def __eq__(self, other):
return self.unit.min_x == other.unit.min_x and self.unit.min_y == other.unit.min_y
def __gt__(self, other):
tank = self.tank
if self.enemy_tank != None and other.is_friendly:
return True
elif self.is_friendly and other.enemy_tank != None:
return False
elif self.enemy_tank != None and (not other.is_friendly):
return other.will_kill_us() or city_will_die(self, self.enemy_tank, other)
elif (not self.is_friendly) and other.enemy_tank != None:
return not (self.will_kill_us() or city_will_die(other, other.enemy_tank, self))
# Friendly always is bad
if self.is_friendly != other.is_friendly:
return not self.is_friendly
elif self.is_friendly and other.is_friendly:
return immediate_reward(self.unit, self.enemy_tank, self.tank) >= immediate_reward(other.unit, other.enemy_tank, other.tank)
# Cases where we might die
if self.will_kill_us() != other.will_kill_us():
return self.will_be_killed()
elif self.will_kill_us() and other.will_kill_us():
return immediate_reward(self.unit, self.enemy_tank, self.tank) >= immediate_reward(other.unit, other.enemy_tank, other.tank)
else: # i.e. we could kill both
if self.unit_type != other.unit_type:
return tower_pref(self.unit, other.unit)
else:
return self.hp < other.hp
def tower_pref(fort, other):
from scaii.env.sky_rts.env.scenarios.city_attack import UnitType
pref_factor = BIG_FORT_DAMAGE / SMALL_FORT_DAMAGE
if fort.unit_type == UnitType.BIG_FORT:
return ticks_til_death(fort.hp, TANK_DAMAGE) < ticks_til_death(other.hp, TANK_DAMAGE) * pref_factor
else:
return ticks_til_death(fort.hp, TANK_DAMAGE) * pref_factor > ticks_til_death(other.hp, TANK_DAMAGE)
def city_will_die(city, enemy_tank, fort):
city_dies_in = ticks_til_death(city.hp, TANK_DAMAGE)
fort_dies_in = ticks_til_death(fort.hp, TANK_DAMAGE) + 3
return city_dies_in < fort_dies_in
def will_kill_us(unit, tank):
return next_hp(unit, tank) <= 0
def immediate_reward(unit, enemy_tank, tank):
from scaii.env.sky_rts.env.scenarios.city_attack import UnitType
if enemy_tank != None:
enemy_kill_ticks = ticks_til_death(enemy_tank.hp, TANK_DAMAGE)
# approximation of the extra negative reward we'll get while
# travelling to the tank
negative_ticks = enemy_kill_ticks + 2
return negative_ticks * -TANK_DAMAGE
if unit.is_friendly:
kill_ticks = ticks_til_death(unit.hp, TANK_DAMAGE)
penalty = None
if unit.unit_type == UnitType.BIG_FORT:
# print("foo")
penalty = BIG_FORT_REWARD
elif unit.unit_type == UnitType.SMALL_FORT:
# print("b")
penalty = SMALL_FORT_REWARD
elif unit.unit_type == UnitType.BIG_CITY:
# print("c")
penalty = BIG_CITY_PENALTY
elif unit.unit_type == UnitType.SMALL_CITY:
# print("d")
penalty = SMALL_CITY_PENALTY
# print(unit.unit_type)
return (kill_ticks * -TANK_DAMAGE) - penalty
bonus = None
damage_factor = None
if unit.unit_type == UnitType.BIG_FORT:
bonus = BIG_FORT_REWARD
damage_factor = BIG_FORT_DAMAGE
elif unit.unit_type == UnitType.SMALL_FORT:
bonus = SMALL_FORT_REWARD
damage_factor = SMALL_FORT_DAMAGE
#print(damage_factor, unit)
we_die_in = ticks_til_death(tank.hp, damage_factor)
they_die_in = ticks_til_death(unit.hp, TANK_DAMAGE)
# -1 to estimate us getting hit an extra time due to travel
if we_die_in - 1 < they_die_in:
we_die_in = we_die_in - 1
return we_die_in * TANK_DAMAGE
else:
return (they_die_in * TANK_DAMAGE) + bonus
def ticks_til_death(hp, dmg_per_tick):
import math
assert(hp != None and dmg_per_tick != None)
return math.ceil(hp / dmg_per_tick)
def _next_hp_raw(unit, tank):
import math
from scaii.env.sky_rts.env.scenarios.city_attack import UnitType
if unit.is_friendly:
return tank.hp
damage_factor = None
if unit.unit_type == UnitType.BIG_FORT:
damage_factor = BIG_FORT_DAMAGE
elif unit.unit_type == UnitType.SMALL_FORT:
damage_factor = SMALL_FORT_DAMAGE
# number of ticks it us to kill a unit
damage_ticks = ticks_til_death(unit.hp, TANK_DAMAGE)
# Approximation of "extra damage" we take
# approaching the tower or due to attack speed
damage_ticks += 1
return tank.hp - (damage_ticks * damage_factor)
def next_hp(unit, tank):
return max(0, _next_hp_raw(unit, tank))
| StarcoderdataPython |
1723894 | <filename>exercicio95.py
#Crie um programa que tenha uma função fatorial() que receba dois parâmetros: o primeiro que indique o número a calcular e outro chamado show, que será um valor lógico (opcional) indicando se será mostrado ou não na tela o processo de cálculo do fatorial.
def fatorial(n, show=True):
f=1
for c in range(n,0,-1):
f*=c
if show:
print(c,end='')
if c>1:
print(' x ',end='')
else:
print(' = ', end='')
return f
num=int(input('digite: '))
print(f'{fatorial(num)}')
| StarcoderdataPython |
1674220 | <filename>lib/utils/PrefetchingIter.py
# --------------------------------------------------------
# Deep Feature Flow
# Copyright (c) 2017 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Modified by <NAME>
# --------------------------------------------------------
# Based on:
# MX-RCNN
# Copyright (c) 2016 by Contributors
# Licence under The Apache 2.0 License
# https://github.com/ijkguo/mx-rcnn/
# --------------------------------------------------------
import mxnet as mx
from mxnet.io import DataDesc, DataBatch
import threading
class PrefetchingIter(mx.io.DataIter):
"""Base class for prefetching iterators. Takes one or more DataIters (
or any class with "reset" and "next" methods) and combine them with
prefetching. For example:
Parameters
----------
iters : DataIter or list of DataIter
one or more DataIters (or any class with "reset" and "next" methods)
rename_data : None or list of dict
i-th element is a renaming map for i-th iter, in the form of
{'original_name' : 'new_name'}. Should have one entry for each entry
in iter[i].provide_data
rename_label : None or list of dict
Similar to rename_data
Examples
--------
iter = PrefetchingIter([NDArrayIter({'data': X1}), NDArrayIter({'data': X2})],
rename_data=[{'data': 'data1'}, {'data': 'data2'}])
"""
def __init__(self, iters, rename_data=None, rename_label=None):
super(PrefetchingIter, self).__init__()
if not isinstance(iters, list):
iters = [iters]
self.n_iter = len(iters)
assert self.n_iter ==1, "Our prefetching iter only support 1 DataIter"
self.iters = iters
self.rename_data = rename_data
self.rename_label = rename_label
self.batch_size = len(self.provide_data) * self.provide_data[0][0][1][0]
self.data_ready = [threading.Event() for i in range(self.n_iter)]
self.data_taken = [threading.Event() for i in range(self.n_iter)]
for e in self.data_taken:
e.set()
self.started = True
self.current_batch = [None for _ in range(self.n_iter)]
self.next_batch = [None for _ in range(self.n_iter)]
def prefetch_func(self, i):
"""Thread entry"""
while True:
self.data_taken[i].wait()
if not self.started:
break
try:
self.next_batch[i] = self.iters[i].next()
except StopIteration:
self.next_batch[i] = None
self.data_taken[i].clear()
self.data_ready[i].set()
self.prefetch_threads = [threading.Thread(target=prefetch_func, args=[self, i]) \
for i in range(self.n_iter)]
for thread in self.prefetch_threads:
thread.setDaemon(True)
thread.start()
def __del__(self):
self.started = False
for e in self.data_taken:
e.set()
for thread in self.prefetch_threads:
thread.join()
@property
def provide_data(self):
"""The name and shape of data provided by this iterator"""
if self.rename_data is None:
return sum([i.provide_data for i in self.iters], [])
else:
return sum([[
DataDesc(r[x.name], x.shape, x.dtype)
if isinstance(x, DataDesc) else DataDesc(*x)
for x in i.provide_data
] for r, i in zip(self.rename_data, self.iters)], [])
@property
def provide_label(self):
"""The name and shape of label provided by this iterator"""
if self.rename_label is None:
return sum([i.provide_label for i in self.iters], [])
else:
return sum([[
DataDesc(r[x.name], x.shape, x.dtype)
if isinstance(x, DataDesc) else DataDesc(*x)
for x in i.provide_label
] for r, i in zip(self.rename_label, self.iters)], [])
def reset(self):
for e in self.data_ready:
e.wait()
for i in self.iters:
i.reset()
for e in self.data_ready:
e.clear()
for e in self.data_taken:
e.set()
def iter_next(self):
for e in self.data_ready:
e.wait()
if self.next_batch[0] is None:
return False
else:
self.current_batch = self.next_batch[0]
for e in self.data_ready:
e.clear()
for e in self.data_taken:
e.set()
return True
def next(self):
if self.iter_next():
return self.current_batch
else:
raise StopIteration
def getdata(self):
return self.current_batch.data
def getlabel(self):
return self.current_batch.label
def getindex(self):
return self.current_batch.index
def getpad(self):
return self.current_batch.pad
| StarcoderdataPython |
3323693 | <gh_stars>0
import numpy as np
import pandas as pd
from keras.preprocessing import image
from os.path import join
import math
import matplotlib.pyplot as plt
def read_img(data_dir, img_id, train_or_test, size):
"""Read and resize image.
# Arguments
img_id: string
train_or_test: string 'train' or 'test'.
size: resize the original image.
# Returns
Image as numpy array.
"""
img = image.load_img(join(data_dir, train_or_test, '%s.jpg' % img_id), target_size=size)
img = image.img_to_array(img)
return img
SEED = 1987
def init(data_dir,num_classes = None):
labels = pd.read_csv(join(data_dir, 'labels.csv'))
if num_classes is None:
selected_breed_list = list(
labels.groupby('breed').count().sort_values(by='id', ascending=False).index)
else:
selected_breed_list = list(
labels.groupby('breed').count().sort_values(by='id', ascending=False).head(num_classes).index)
labels = labels[labels['breed'].isin(selected_breed_list)]
# Some wierd way to create one hot vectors
labels['target'] = 1
labels['rank'] = labels.groupby('breed').rank()['id']
labels_pivot = labels.pivot('id', 'breed', 'target').reset_index().fillna(0)
np.random.seed(seed=SEED)
rnd = np.random.random(len(labels))
train_idx = rnd < 0.8
valid_idx = rnd >= 0.8
y_train = labels_pivot[selected_breed_list].values
ytr = y_train[train_idx]
yv = y_train[valid_idx]
return labels,train_idx,valid_idx, ytr, yv
def init2(data_dir, num_classes=None):
labels = pd.read_csv(join(data_dir, 'labels.csv'))
if num_classes is None:
selected_breed_list = list(
labels.groupby('breed').count().sort_values(by='id', ascending=False).index)
else:
selected_breed_list = list(
labels.groupby('breed').count().sort_values(by='id', ascending=False).head(num_classes).index)
labels = labels[labels['breed'].isin(selected_breed_list)]
# Some wierd way to create one hot vectors
labels['target'] = 1
labels['rank'] = labels.groupby('breed').rank()['id']
labels_pivot = labels.pivot('id', 'breed', 'target').reset_index().fillna(0)
# np.random.seed(seed=SEED)
# rnd = np.random.random(len(labels))
# train_idx = rnd < 0.8
# valid_idx = rnd >= 0.8
y_train = labels_pivot[selected_breed_list].values
return labels, y_train
# test_proportion of 3 means 1/3 so 33% test and 67% train
def shuffle(matrix, target, test_proportion):
print("Matrix size: " + str(matrix.shape))
ratio = math.floor(matrix.shape[0] / test_proportion)
X_train = matrix[ratio:, :]
X_test = matrix[:ratio, :]
Y_train = target[ratio:, :]
Y_test = target[:ratio, :]
return X_train, X_test, Y_train, Y_test
| StarcoderdataPython |
12806912 | <reponame>niraito/rptools
import pytest
from pathlib import Path
from rptools.rpviz.__main__ import (
__build_arg_parser,
__run,
)
REF_IN_DIR = Path(__file__).resolve().parent / 'inputs' / 'as_dir'
REF_IN_TAR = Path(__file__).resolve().parent / 'inputs' / 'as_tar.tgz'
REF_OUT_DIR = Path(__file__).resolve().parent / 'outputs'
def __read_and_sort(
path: str
) -> list:
with open(path) as fh:
lines = [_ for _ in fh.readline()]
return lines.sort()
def test_build_arg_parser(mocker):
# No args
args = ['prog']
mocker.patch('sys.argv', args)
parser = __build_arg_parser()
with pytest.raises(SystemExit):
args = parser.parse_args()
def test_dir_input(mocker, tmpdir):
out_dir = tmpdir / 'odir'
args = ['prog', str(REF_IN_DIR), str(out_dir)]
mocker.patch('sys.argv', args)
parser = __build_arg_parser()
args = parser.parse_args()
__run(args)
ref_file = REF_OUT_DIR / 'network.json'
test_file = out_dir / 'network.json'
assert __read_and_sort(test_file) == __read_and_sort(ref_file)
def test_tar_input(mocker, tmpdir):
out_dir = tmpdir / 'odir'
args = ['prog', str(REF_IN_TAR), str(out_dir)]
mocker.patch('sys.argv', args)
parser = __build_arg_parser()
args = parser.parse_args()
__run(args)
ref_file = REF_OUT_DIR / 'network.json'
test_file = out_dir / 'network.json'
assert __read_and_sort(test_file) == __read_and_sort(ref_file) | StarcoderdataPython |
3237191 | from bpy.types import (
Operator,
Panel,
UIList,
UI_UL_list,
)
from .internals import *
from .operators import (
rto_history,
rename,
phantom_history,
)
class CollectionManager(Operator):
bl_label = "Collection Manager"
bl_idname = "view3d.collection_manager"
last_view_layer = ""
def draw(self, context):
layout = self.layout
scn = context.scene
view_layer = context.view_layer.name
if view_layer != self.last_view_layer:
update_collection_tree(context)
self.last_view_layer = view_layer
title_row = layout.split(factor=0.5)
main = title_row.row()
view = title_row.row(align=True)
view.alignment = 'RIGHT'
main.label(text="Collection Manager")
view.prop(context.view_layer, "use", text="")
view.separator()
window = context.window
scene = window.scene
view.template_search(
window, "view_layer",
scene, "view_layers",
new="scene.view_layer_add",
unlink="scene.view_layer_remove")
layout.row().separator()
layout.row().separator()
filter_row = layout.row()
filter_row.alignment = 'RIGHT'
filter_row.popover(panel="COLLECTIONMANAGER_PT_restriction_toggles", text="", icon='FILTER')
toggle_row = layout.split(factor=0.3)
toggle_row.alignment = 'LEFT'
sec1 = toggle_row.row()
sec1.alignment = 'LEFT'
sec1.enabled = False
if len(expanded) > 0:
text = "Collapse All Items"
else:
text = "Expand All Items"
sec1.operator("view3d.expand_all_items", text=text)
for laycol in collection_tree:
if laycol["has_children"]:
sec1.enabled = True
break
sec2 = toggle_row.row()
sec2.alignment = 'RIGHT'
if scn.show_exclude:
exclude_all_history = rto_history["exclude_all"].get(view_layer, [])
depress = True if len(exclude_all_history) else False
sec2.operator("view3d.un_exclude_all_collections", text="", icon='CHECKBOX_HLT', depress=depress)
if scn.show_selectable:
select_all_history = rto_history["select_all"].get(view_layer, [])
depress = True if len(select_all_history) else False
sec2.operator("view3d.un_restrict_select_all_collections", text="", icon='RESTRICT_SELECT_OFF', depress=depress)
if scn.show_hideviewport:
hide_all_history = rto_history["hide_all"].get(view_layer, [])
depress = True if len(hide_all_history) else False
sec2.operator("view3d.un_hide_all_collections", text="", icon='HIDE_OFF', depress=depress)
if scn.show_disableviewport:
disable_all_history = rto_history["disable_all"].get(view_layer, [])
depress = True if len(disable_all_history) else False
sec2.operator("view3d.un_disable_viewport_all_collections", text="", icon='RESTRICT_VIEW_OFF', depress=depress)
if scn.show_render:
render_all_history = rto_history["render_all"].get(view_layer, [])
depress = True if len(render_all_history) else False
sec2.operator("view3d.un_disable_render_all_collections", text="", icon='RESTRICT_RENDER_OFF', depress=depress)
layout.row().template_list("CM_UL_items", "", context.scene, "CMListCollection", context.scene, "CMListIndex", rows=15, sort_lock=True)
addcollec_row = layout.row()
addcollec_row.operator("view3d.add_collection", text="Add Collection", icon='COLLECTION_NEW').child = False
addcollec_row.operator("view3d.add_collection", text="Add SubCollection", icon='COLLECTION_NEW').child = True
phantom_row = layout.row()
toggle_text = "Disable " if scn.CM_Phantom_Mode else "Enable "
phantom_row.operator("view3d.toggle_phantom_mode", text=toggle_text+"Phantom Mode")
if scn.CM_Phantom_Mode:
view.enabled = False
addcollec_row.enabled = False
def execute(self, context):
wm = context.window_manager
lvl = 0
#expanded.clear()
#excludeall_history.clear()
#restrictselectall_history.clear()
#hideall_history.clear()
#disableviewall_history.clear()
#disablerenderall_history.clear()
update_property_group(context)
lvl = get_max_lvl()
if lvl > 25:
lvl = 25
self.view_layer = context.view_layer.name
# sync selection in ui list with active layer collection
try:
active_laycol_name = context.view_layer.active_layer_collection.name
active_laycol_row_index = layer_collections[active_laycol_name]["row_index"]
context.scene.CMListIndex = active_laycol_row_index
except:
context.scene.CMListIndex = 0
if context.scene.CM_Phantom_Mode:
if set(layer_collections.keys()) != set(phantom_history["initial_state"].keys()):
context.scene.CM_Phantom_Mode = False
if context.view_layer.name != phantom_history["view_layer"]:
context.scene.CM_Phantom_Mode = False
return wm.invoke_popup(self, width=(400+(lvl*20)))
def update_selection(self, context):
selected_item = context.scene.CMListCollection[context.scene.CMListIndex]
layer_collection = layer_collections[selected_item.name]["ptr"]
context.view_layer.active_layer_collection = layer_collection
def filter_items_by_name_insensitive(pattern, bitflag, items, propname="name", flags=None, reverse=False):
"""
Set FILTER_ITEM for items which name matches filter_name one (case-insensitive).
pattern is the filtering pattern.
propname is the name of the string property to use for filtering.
flags must be a list of integers the same length as items, or None!
return a list of flags (based on given flags if not None),
or an empty list if no flags were given and no filtering has been done.
"""
import fnmatch
if not pattern or not items: # Empty pattern or list = no filtering!
return flags or []
if flags is None:
flags = [0] * len(items)
# Make pattern case-insensitive
pattern = pattern.lower()
# Implicitly add heading/trailing wildcards.
pattern = "*" + pattern + "*"
for i, item in enumerate(items):
name = getattr(item, propname, None)
# Make name case-insensitive
name = name.lower()
# This is similar to a logical xor
if bool(name and fnmatch.fnmatch(name, pattern)) is not bool(reverse):
flags[i] |= bitflag
return flags
class CM_UL_items(UIList):
last_filter_value = ""
def draw_item(self, context, layout, data, item, icon, active_data,active_propname, index):
self.use_filter_show = True
scn = context.scene
laycol = layer_collections[item.name]
collection = laycol["ptr"].collection
split = layout.split(factor=0.96)
row = split.row(align=True)
row.alignment = 'LEFT'
# indent child items
if laycol["lvl"] > 0:
for x in range(laycol["lvl"]):
row.label(icon='BLANK1')
# add expander if collection has children to make UIList act like tree view
if laycol["has_children"]:
if laycol["expanded"]:
prop = row.operator("view3d.expand_sublevel", text="", icon='DISCLOSURE_TRI_DOWN', emboss=False)
prop.expand = False
prop.name = item.name
prop.index = index
else:
prop = row.operator("view3d.expand_sublevel", text="", icon='DISCLOSURE_TRI_RIGHT', emboss=False)
prop.expand = True
prop.name = item.name
prop.index = index
else:
row.label(icon='BLANK1')
row.label(icon='GROUP')
name_row = row.row()
#if rename[0] and index == scn.CMListIndex:
#name_row.activate_init = True
#rename[0] = False
name_row.prop(item, "name", text="", expand=True)
# used as a separator (actual separator not wide enough)
row.label()
# add set_collection op
row_setcol = row.row()
row_setcol.operator_context = 'INVOKE_DEFAULT'
icon = 'MESH_CUBE'
if len(context.selected_objects) > 0 and context.active_object:
if context.active_object.name in collection.objects:
icon = 'SNAP_VOLUME'
else:
row_setcol.enabled = False
prop = row_setcol.operator("view3d.set_collection", text="", icon=icon, emboss=False)
prop.collection_index = laycol["id"]
prop.collection_name = item.name
if scn.show_exclude:
icon = 'CHECKBOX_DEHLT' if laycol["ptr"].exclude else 'CHECKBOX_HLT'
row.operator("view3d.exclude_collection", text="", icon=icon, emboss=False).name = item.name
if scn.show_selectable:
icon = 'RESTRICT_SELECT_ON' if laycol["ptr"].collection.hide_select else 'RESTRICT_SELECT_OFF'
row.operator("view3d.restrict_select_collection", text="", icon=icon, emboss=False).name = item.name
if scn.show_hideviewport:
icon = 'HIDE_ON' if laycol["ptr"].hide_viewport else 'HIDE_OFF'
row.operator("view3d.hide_collection", text="", icon=icon, emboss=False).name = item.name
if scn.show_disableviewport:
icon = 'RESTRICT_VIEW_ON' if laycol["ptr"].collection.hide_viewport else 'RESTRICT_VIEW_OFF'
row.operator("view3d.disable_viewport_collection", text="", icon=icon, emboss=False).name = item.name
if scn.show_render:
icon = 'RESTRICT_RENDER_ON' if laycol["ptr"].collection.hide_render else 'RESTRICT_RENDER_OFF'
row.operator("view3d.disable_render_collection", text="", icon=icon, emboss=False).name = item.name
rm_op = split.row()
rm_op.alignment = 'RIGHT'
rm_op.operator("view3d.remove_collection", text="", icon='X', emboss=False).collection_name = item.name
if scn.CM_Phantom_Mode:
name_row.enabled = False
row_setcol.enabled = False
rm_op.enabled = False
def filter_items(self, context, data, propname):
flt_flags = []
flt_neworder = []
list_items = getattr(data, propname)
if self.filter_name:
flt_flags = filter_items_by_name_insensitive(self.filter_name, self.bitflag_filter_item, list_items)
else:
flt_flags = [self.bitflag_filter_item] * len(list_items)
for idx, item in enumerate(list_items):
if not layer_collections[item.name]["visible"]:
flt_flags[idx] = 0
return flt_flags, flt_neworder
def invoke(self, context, event):
pass
class CMRestrictionTogglesPanel(Panel):
bl_label = "Restriction Toggles"
bl_idname = "COLLECTIONMANAGER_PT_restriction_toggles"
bl_space_type = 'VIEW_3D'
bl_region_type = 'HEADER'
def draw(self, context):
layout = self.layout
row = layout.row()
row.prop(context.scene, "show_exclude", icon='CHECKBOX_HLT', icon_only=True)
row.prop(context.scene, "show_selectable", icon='RESTRICT_SELECT_OFF', icon_only=True)
row.prop(context.scene, "show_hideviewport", icon='HIDE_OFF', icon_only=True)
row.prop(context.scene, "show_disableviewport", icon='RESTRICT_VIEW_OFF', icon_only=True)
row.prop(context.scene, "show_render", icon='RESTRICT_RENDER_OFF', icon_only=True)
| StarcoderdataPython |
1874769 | <reponame>JohannesVerherstraeten/pypipeline<gh_stars>1-10
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TypeVar, Generic, TYPE_CHECKING, Dict, Any, Optional, Callable
from pypipeline.cell.icellobserver import ParameterUpdateEvent
from pypipeline.cellio.standardio import Input
from pypipeline.exceptions import InvalidInputException, NoInputProvidedException, \
InvalidStateException
if TYPE_CHECKING:
from pypipeline.cell import ICell
T = TypeVar('T')
class RuntimeParameter(Input[T], Generic[T]):
"""
Runtime parameter class.
A runtime parameter is a type of input that accepts 1 incoming connection and no outgoing connections.
Every time a runtime parameter is pulled, it will pull the incoming connection, if available.
If no incoming connection is present, it returns the default value.
An IO is owned by its cell.
An IO is the controlling class in the IO-ICell relation, as IO of the cell.
An IConnectionEntryPoint is the controlled class in the IConnection-IConnectionEntryPoint relation, as the
target of the connection.
"""
__DEFAULT_VALUE_KEY: str = "default_value"
def __init__(self,
cell: "ICell",
name: str,
validation_fn: Optional[Callable[[T], bool]] = None):
"""
Args:
cell: the cell of which this IO will be part.
name: the name of this IO. Should be unique within the cell.
validation_fn: An optional validation function that will be used to validate every value that passes
through this IO.
"""
super(RuntimeParameter, self).__init__(cell, name, validation_fn)
self.__default_value: T = None # type: ignore
self.__default_value_is_set: bool = False
def set_default_value(self, value: T) -> None:
"""
Args:
value: the new default value for this runtime parameter. This value will be used when pulling the runtime
parameter if no incoming connection is available.
"""
if not self.can_have_as_value(value):
raise InvalidInputException(f"{self}: Invalid value: {value}")
with self._get_state_lock():
self.logger.debug(f"{self}.set_default_value( {value} ) @ RuntimeParameter")
self.__default_value = value
self.__default_value_is_set = True
event = ParameterUpdateEvent(self.get_cell()) # TODO avoid indirection of cell
self.get_cell().notify_observers(event)
def get_default_value(self) -> T:
"""
Returns:
The default value of this runtime parameter. This value will be used when pulling the runtime
parameter if no incoming connection is available.
"""
with self._get_state_lock():
if not self.__default_value_is_set:
raise NoInputProvidedException(f"{self}.get_default_value() called, but default value has not yet "
f"been set.")
return self.__default_value
def default_value_is_set(self) -> bool:
"""
Returns:
True if a default value is provided for this runtime parameter, False otherwise.
"""
with self._get_state_lock():
return self.__default_value_is_set
def _clear_default_value(self) -> None:
"""
Returns:
Clears the currently configured default value.
"""
with self._get_state_lock():
self.__default_value_is_set = False
def assert_has_proper_default_value(self) -> None:
"""
Raises:
InvalidStateException: if the configured default value is invalid.
"""
if self.default_value_is_set() and not self.can_have_as_value(self.get_default_value()):
raise InvalidStateException(f"{self} has an invalid default value: {self.get_default_value()}")
def pull(self) -> T:
if self.get_nb_incoming_connections() == 0:
return self.get_default_value()
result = super(RuntimeParameter, self).pull()
if result is None:
return self.get_default_value()
return result
def set_value(self, value: T) -> None:
"""
Same as self.set_default_value().
Note: this method is not related to self._set_value(value) which is used by incoming connections to set the
(not-default) value of this RuntimeParameter.
"""
self.set_default_value(value)
def is_provided(self) -> bool:
return super(RuntimeParameter, self).is_provided() or self.default_value_is_set()
def _is_optional_even_when_typing_says_otherwise(self) -> bool:
return True # A RuntimeParameter can handle None values being set: it will return the default value instead
def _get_sync_state(self) -> Dict[str, Any]:
with self._get_state_lock():
state: Dict[str, Any] = super(RuntimeParameter, self)._get_sync_state()
state[self.__DEFAULT_VALUE_KEY] = self.get_default_value() if self.default_value_is_set() else None
return state
def _set_sync_state(self, state: Dict) -> None:
with self._get_state_lock():
super(RuntimeParameter, self)._set_sync_state(state)
if state[self.__DEFAULT_VALUE_KEY] is not None:
self.set_default_value(state[self.__DEFAULT_VALUE_KEY])
def get_nb_available_pulls(self) -> Optional[int]:
if self.get_nb_incoming_connections() == 0:
return None
return super(RuntimeParameter, self).get_nb_available_pulls()
def assert_is_valid(self) -> None:
super(RuntimeParameter, self).assert_is_valid()
self.assert_has_proper_default_value()
| StarcoderdataPython |
1877730 | <reponame>netor27/codefights-arcade-solutions
''''
In chess, queens can move any number of squares vertically, horizontally, or diagonally. The n-queens puzzle is the problem of placing n queens on an n × n chessboard so that no two queens can attack each other.
Given an integer n, print all possible distinct solutions to the n-queens puzzle. Each solution contains distinct board configurations of the placement of the n queens, where the solutions are arrays that contain permutations of [1, 2, 3, .. n]. The number in the ith position of the results array indicates that the ith column queen is placed in the row with that number. In your solution, the board configurations should be returned in lexicographical order.
Example
For n = 1, the output should be
nQueens(n) = [[1]].
For n = 4, the output should be
nQueens(n) = [[2, 4, 1, 3],
[3, 1, 4, 2]]
This diagram of the second permutation, [3, 1, 4, 2], will help you visualize its configuration:
The element in the 1st position of the array, 3, indicates that the queen for column 1 is placed in row 3. Since the element in the 2nd position of the array is 1, the queen for column 2 is placed in row 1. The element in the 3rd position of the array is 4, meaning that the queen for column 3 is placed in row 4, and the element in the 4th position of the array is 2, meaning that the queen for column 4 is placed in row 2.
Input/Output
[execution time limit] 4 seconds (py3)
[input] integer n
The size of the board.
Guaranteed constraints:
1 ≤ n ≤ 10.
[output] array.array.integer
All possible distinct board configurations of the placement of the n queens, ordered lexicographically.
''''
queensPos = []
auxPos = []
def nQueens(n):
nonlocal auxPos
auxPos = [0 for _ in range(n)]
processQueens(n, 0)
return queensPos
def processQueens(n, x,):
for y in range(n):
if canPlaceQueen(x, y):
auxPos[x] = y + 1
if x == n - 1:
newList = list(auxPos)
queensPos.append(newList)
processQueens(n, x + 1)
def canPlaceQueen(x, y):
for i in range(x):
# if pos[i] - 1 == y, columns are the same
# if abs(pos[i]-1 - x) == abs(i - y)
if auxPos[i] - 1 == y or abs(auxPos[i] - 1 - y) == abs(i - x):
return False
return True
| StarcoderdataPython |
306588 | <reponame>trainsn/CSE_5543<filename>lab6/Qmesh.py
## \file decimate_mesh.py
# Some simple mesh decimation routines.
# Use data structure HALF_EDGE_MESH_DCMT_BASE (DCMT = decimate).
import math
from math import sqrt
from math import acos
import sys
import numpy as np
import half_edge_mesh
import half_edge_mesh_DCMT
import half_edge_mesh_IO
from half_edge_mesh_DCMT import HALF_EDGE_MESH_DCMT_BASE
from half_edge_mesh_DCMT\
import VERTEX_DCMT_BASE, HALF_EDGE_DCMT_BASE, CELL_DCMT_BASE
def main(argv):
global input_filename, output_filename
global flag_allow_non_manifold, flag_fail_on_non_manifold
# Initialize
input_filename = None
output_filename = None
InitFlags()
parse_command_line(sys.argv)
mesh = HALF_EDGE_MESH_DCMT_BASE(VERTEX_DCMT_BASE, HALF_EDGE_DCMT_BASE, CELL_DCMT_BASE)
half_edge_mesh_IO.open_and_read_off_file(input_filename, mesh)
try:
split_all_cells(mesh)
old_triangles = mesh.NumCells()
for iteration in range(10):
join_triangle_with_large_angle(mesh)
split_all_cells(mesh)
collapse_shortest_edge_for_small_angle_cell(mesh)
if old_triangles == mesh.NumCells():
break
print("finish iter {:d}".format(iteration))
passed_check = check_mesh(mesh)
print("Mesh data structure passed check.")
print_mesh_info(mesh)
except Exception as e:
print(e)
sys.stderr.write("Exiting.")
exit(-1)
if (output_filename is None):
output_filename = "out.off"
if (output_filename == input_filename):
output_filename = "out2.off"
print()
print("Writing file: " + output_filename + ".")
half_edge_mesh_IO.open_and_write_file(output_filename, mesh)
# *** Collapse edge routines ***
## Collapse edge
def collapse_edge(mesh, half_edge):
global flag_allow_non_manifold
flag = check_edge_collapse(mesh, half_edge)
if mesh.IsIllegalEdgeCollapseH(half_edge):
return
if flag or flag_allow_non_manifold:
print("Collapsing edge (" + half_edge.EndpointsStr(",") + ").")
vnew = mesh.CollapseEdge(half_edge.Index())
if (vnew is None):
print("Skipped illegal collapse of edge (" + half_edge.EndpointsStr(",") + ").")
else:
print("Skipped collapse of edge (" + half_edge.EndpointsStr(",") + ").")
## Collapse shortest cell edge.
def collapse_shortest_cell_edge(mesh, icell):
cell = mesh.Cell(icell)
if (cell is None):
return
minL, maxL, ihalf_edge_min, ihalf_edge_max = cell.ComputeMinMaxEdgeLengthSquared()
half_edge_min = mesh.HalfEdge(ihalf_edge_min)
collapse_edge(mesh, half_edge_min)
## Collapse shortest edge in each cell.
def collapse_shortest_edge_for_small_angle_cell(mesh):
n = mesh.NumCells()
# Create a list of the cell indices.
cell_indices_list = list(mesh.CellIndices())
cell_indices_list.sort()
# DO NOT iterate over CellIndices() directly, since collapse/split/join
# may delete cells.
for icell in cell_indices_list:
# Check if cell index is valid.
# cell may be none if it has been deleted from the cell dictionary
cell = mesh.Cell(icell)
if (cell is None):
continue
cos_minA, cos_maxA, ihalf_edge_min, ihalf_edge_max = cell.ComputeCosMinMaxAngle()
if cos_minA > np.cos(30.*math.pi/180.) and cos_maxA > np.cos(150.*math.pi/180.):
# print(icell, math.degrees(acos(cos_minA)), math.degrees(acos(cos_maxA)))
collapse_shortest_cell_edge(mesh, icell)
# *** Split edge routines. ***
## Split edge.
def split_edge(mesh, half_edge):
print("Splitting edge (" + half_edge.EndpointsStr(",") + ").")
vnew = mesh.SplitEdge(half_edge.Index())
if (vnew is None):
print("Split of edge (" + half_edge.EndpointsStr(",") + ") failed.")
## Split longest cell edge.
def split_longest_cell_edge(mesh, icell):
cell = mesh.Cell(icell)
if (cell is None):
return
minL, maxL, ihalf_edge_min, ihalf_edge_max = cell.ComputeMinMaxEdgeLengthSquared()
half_edge_max = mesh.HalfEdge(ihalf_edge_max)
split_edge(mesh, half_edge_max)
## Split longest edge in each cell.
def split_longest_edge_for_large_angle_cell(mesh):
# Create a list of the cell indices.
cell_indices_list = list(mesh.CellIndices())
cell_indices_list.sort()
# DO NOT iterate over CellIndices() directly, since collapse/split/join
# may delete cells.
for icell in cell_indices_list:
# Check if cell index is valid.
# cell may be none if it has been deleted from the cell dictionary
cell = mesh.Cell(icell)
if (cell is None):
continue
cos_minA, cos_maxA, ihalf_edge_min, ihalf_edge_max = cell.ComputeCosMinMaxAngle()
if cos_maxA < np.cos(120.*math.pi/180.):
# min_angle = math.degrees(acos(cos_minA))
# max_angle = math.degrees(acos(cos_maxA))
# print(min_angle, 180 - min_angle - max_angle, max_angle)
split_longest_cell_edge(mesh, icell,)
# *** Split cell routines. ***
## Split cell with diagonal (half_edgeA.FromVertex(), half_edgeB.FromVertex())
# - Returns split edge.
# - Returns None if split fails.
def split_cell(mesh, half_edgeA, half_edgeB):
ihalf_edgeA = half_edgeA.Index()
ihalf_edgeB = half_edgeB.Index()
vA = half_edgeA.FromVertex()
vB = half_edgeB.FromVertex()
ivA = vA.Index()
ivB = vB.Index()
icell = half_edgeA.CellIndex()
flag = check_split_cell(mesh, half_edgeA, half_edgeB)
if (mesh.IsIllegalSplitCell(half_edgeA, half_edgeB)):
return None
if (flag or flag_allow_non_manifold):
print("Splitting cell {:d} at diagonal ({:d},{:d}).".format(icell, ivA, ivB))
split_edge = mesh.SplitCell(ihalf_edgeA, ihalf_edgeB)
if (split_edge is None):
print("Split of cell {:d} at diagonal (""{:d},{:d}) failed.".format(icell, ivA, ivB))
return split_edge
else:
print("Skipping split of cell {:d} at diagonal ({:d},{:d}).".format(icell, ivA, ivB))
return None
## Split cell at largest angle.
# - Split cell at vertex forming the largest angle.
# - Split as many times as necessary to triangulate.
def split_cell_at_largest_angle(mesh, cell):
cos_minA, cos_maxA, ihalf_edge_min, ihalf_edge_max = cell.ComputeCosMinMaxAngle()
half_edgeA = mesh.HalfEdge(ihalf_edge_max)
while not half_edgeA.Cell().IsTriangle():
half_edgeB = half_edgeA.PrevHalfEdgeInCell().PrevHalfEdgeInCell()
vA = half_edgeA.FromVertex()
vB = half_edgeB.FromVertex()
ivA = vA.Index()
ivB = vB.Index()
split_edge = split_cell(mesh, half_edgeA, half_edgeB)
if (split_edge is None):
# Cannot split cell at largest angle.
return
# Get largest angle in remaining cell.
cellA = split_edge.Cell()
cos_minA, cos_maxA, ihalf_edge_min, ihalf_edge_max =\
cell.ComputeCosMinMaxAngle()
half_edgeA = mesh.HalfEdge(ihalf_edge_max)
## Split all cells.
def split_all_cells(mesh):
n = mesh.MaxCellIndex()
flag_check = False
# Create a list of the cell indices.
cell_indices_list = list(mesh.CellIndices())
cell_indices_list.sort()
# DO NOT iterate over CellIndices() directly, since collapse/split/join
# may delete cells.
kount = 0
for icell in cell_indices_list:
# Check if cell index is valid.
# cell may be none if it has been deleted from the cell dictionary
cell = mesh.Cell(icell)
if (cell is None):
continue
split_cell_at_largest_angle(mesh, cell)
kount = kount + 1
# *** Join cell routines ***
def join_two_cells(mesh, half_edge):
half_edgeX = half_edge.NextHalfEdgeAroundEdge()
icell = half_edge.CellIndex()
icellX = half_edgeX.CellIndex()
flag = check_join_cell(mesh, half_edge)
if (mesh.IsIllegalJoinCells(half_edge)):
return
if flag:
print("Joining cell {:d} to cell {:d} by deleting edge (".format(icell, icellX) +
half_edge.EndpointsStr(",") + ").")
half_edgeB = mesh.JoinTwoCells(half_edge.Index())
if half_edgeB is None:
print("Join of cell {:d} to cell {:d} failed.".format(icell, icellX))
return
else:
print("Skipping join of cell {:d} with cell {:d}.".format(icell, icellX))
## Attempt to join each cell by deleting longest edge.
def join_triangle_with_large_angle(mesh):
# Create a list of the cell indices.
cell_indices_list = list(mesh.CellIndices())
cell_indices_list.sort()
# DO NOT iterate over CellIndices() directly, since collapse/split/join
# may delete cells.
for icell in cell_indices_list:
# Check if cell index is valid.
# cell may be none if it has been deleted from the cell dictionary
cell = mesh.Cell(icell)
if (cell is None):
continue
if (cell.NumVertices() > 3):
continue
cos_minA, cos_maxA, ihalf_edge_min, ihalf_edge_max = cell.ComputeCosMinMaxAngle()
if cos_maxA < np.cos(120.*math.pi/180):
# min_angle = math.degrees(acos(cos_minA))
# max_angle = math.degrees(acos(cos_maxA))
# print(min_angle, 180 - min_angle - max_angle, max_angle)
half_edge = mesh.HalfEdge(ihalf_edge_max).NextHalfEdgeInCell()
join_two_cells(mesh, half_edge)
# *** Check routines ***
def check_oriented_manifold(mesh):
flag_non_manifold_vertex, flag_non_manifold_edge, iv, ihalf_edgeA = mesh.CheckManifold()
if flag_non_manifold_edge:
half_edgeA = mesh.HalfEdge(ihalf_edgeA)
sys.stderr.write("Warning: Non-manifold edge (" + half_edgeA.EndpointsStr(",") + ").\n")
# Non-manifold edge automatically implies inconsistent orientations.
return False
flag_orientation, ihalf_edgeB = mesh.CheckOrientation()
if flag_orientation:
if flag_non_manifold_vertex:
sys.stderr.write("Warning: Non-manifold vertex {iv}.")
return False
else:
if flag_non_manifold_vertex:
sys.stderr.write\
("Warning: Non-manifold vertex or inconsistent orientations in cells incident on vertex {:d}.\n".format(iv))
else:
sys.stderr.write\
("Warning: Inconsistent orientation of cells incident on edge (" + half_edgeB.EndpointsStr(",") + ").")
return False
return True
def check_mesh(mesh):
global flag_fail_on_non_manifold
flag, error_msg = mesh.CheckAll()
if not(flag):
sys.stderr.write("Error detected in mesh data structure.\n")
if not(error_msg is None):
sys.stderr.write(error_msg + "\n")
exit(-1)
if (flag_fail_on_non_manifold):
flag_oriented_manifold = check_oriented_manifold(mesh)
if (flag_fail_on_non_manifold and not(flag_oriented_manifold)):
sys.stderr.write("Detected non-manifold or inconsistent orientations")
sys.stderr.write("Exiting.")
exit(-1)
return flag_oriented_manifold
else:
return True
## Print a warning message if collapsing half_edge is illegal or
# will change mesh topology.
# - Return True if collapse is not illegal and does not change
# mesh topology.
def check_edge_collapse(mesh, half_edge):
icell = half_edge.CellIndex()
return_flag = True
if (mesh.IsIllegalEdgeCollapseH(half_edge)):
print("Collapse of edge (" + half_edge.EndpointsStr(",") +") is illegal.")
print(" Some cell contains vertices " +\
half_edge.EndpointsStr(" and ") + " but not edge (" +\
half_edge.EndpointsStr(",") + ").")
return_flag = False
flag, ivC = mesh.FindTriangleHole(half_edge)
if (flag):
print("Collapsing edge (" + half_edge.EndpointsStr(",") +") will change the mesh topology.")
print(" Vertices (" + half_edge.EndpointsStr(", ") +", " + str(ivC) + ") form a triangle hole.")
return_flag = False
if not(half_edge.IsBoundary()):
if half_edge.FromVertex().IsBoundary() and half_edge.ToVertex().IsBoundary():
print("Collapsing edge (" + half_edge.EndpointsStr(",") + ") merges two non-adjacent boundary vertices.")
return_flag = False
if mesh.IsIsolatedTriangle(icell):
print("Collapsing edge(" + half_edge.EndpointsStr(",") +") will delete isolated cell {:d}.".format(icell))
return_flag = False
if mesh.IsInTetrahedron(icell):
print("Collapsing edge (" + half_edge.EndpointsStr(",") +") will collapse a tetrahedron.")
return_flag = False
return return_flag
## Print a warning message if splitting cell at diagonal
# (half_edgeA.FromVertex(), half_edgeB.FromVertex())
# will change the mesh topology.
# - Return True if split does not change manifold topology.
def check_split_cell(mesh, half_edgeA, half_edgeB):
vA = half_edgeA.FromVertex()
vB = half_edgeB.FromVertex()
ivA = vA.Index()
ivB = vB.Index()
icell = half_edgeA.CellIndex()
half_edgeC = mesh.FindEdge(vA, vB)
flag_cell_edge = False
return_flag = True
if (mesh.IsIllegalSplitCell(half_edgeA, half_edgeB)):
if (vA is half_edgeB.ToVertex()) or (vB is half_edgeA.FromVertex()):
flag_cell_edge = True
if flag_cell_edge:
print("({:d},{:d}) is a cell edge, not a cell diagonal.".format(ivA, ivB))
else:
print("Illegal split of cell {:d} with diagonal ({:d}{:d}).".format(icell, ivA, ivB))
return_flag = False
if not(half_edgeC is None) and not(flag_cell_edge):
sys.stdout.write("Splitting cell {:d} with diagonal ({:d},{:d})".format(icell, ivA, ivB))
sys.stdout.write(" creates an edge incident on three or nmore cells.\n")
return_flag = False
return return_flag
## Print a warning if joining cells separated by half_edge is illegal.
# - Return true if join is legal.
def check_join_cell(mesh, half_edge):
TWO = 2
return_flag = True
if (mesh.IsIllegalJoinCells(half_edge)):
half_edgeX = half_edge.NextHalfEdgeAroundEdge()
if (half_edge.IsBoundary()):
print("Only one cell contains edge (" + \
half_edge.EndpointsStr(",") + ").")
elif not (half_edge.FromVertex().IsIncidentOnMoreThanTwoEdges()):
print("Half edge endpoint {half_edge.FromVertexIndex()} is incident on only two edges.")
elif not (half_edge.ToVertex().IsIncidentOnMoreThanTwoEdges()):
print("Half edge endpoint {half_edge.ToVertexIndex()} is incident on only two edges.")
elif not (half_edge is half_edgeX.NextHalfEdgeAroundEdge()):
print("More than two cells are incident on edge (" + \
half_edge.EndpointsStr(",") + ").")
else:
cell = half_edge.Cell()
cellX = half_edgeX.Cell()
num_shared_vertices = \
mesh.CountNumVerticesSharedByTwoCells(cell, cellX)
if (num_shared_vertices > TWO):
print("Cells {cell.Index()} and {cellX.Index()} share {num_shared_vertices} vertices.")
else:
print("Join of two cells incident on edge (" + \
half_edge.EndpointsStr(",") + ") is illegal.")
return_flag = False
return return_flag
# *** Init/parse/print/prompt functions. ***
## Initialize global flags.
def InitFlags():
global input_filename, output_filename
global flag_allow_non_manifold, flag_fail_on_non_manifold
# Initialize
input_filename = None
output_filename = None
flag_allow_non_manifold = False
flag_fail_on_non_manifold = False
def parse_command_line(argv):
global input_filename, output_filename
global flag_allow_non_manifold, flag_fail_on_non_manifold
iarg = 1
while iarg < len(argv) and argv[iarg][0] == '-':
s = argv[iarg]
if (s == "-allow_non_manifold"):
flag_allow_non_manifold = True
elif (s == "-fail_on_non_manifold"):
flag_fail_on_non_manifold = True
else:
sys.stderr.write("Usage error. Option " + s + " is undefined.\n")
usage_error()
iarg = iarg + 1
if (iarg >= len(argv) or iarg+2 < len(argv)):
usage_error()
input_filename = argv[iarg]
if (iarg+1 < len(argv)):
output_filename = argv[iarg+1]
## Print mesh information, such as number of vertices, edges, cells
# min and max edge lengths and cell angles.
def print_mesh_info(mesh):
FIVE = 5
num_vertices = mesh.NumVertices()
num_edges = mesh.CountNumEdges()
num_boundary_edges = mesh.CountNumBoundaryEdges()
num_cells = mesh.NumCells()
num_triangles = mesh.CountNumTriangles()
num_quads = mesh.CountNumQuads()
num_large_cells = mesh.CountNumCellsOfSizeGE(FIVE)
minL_squared, maxL_squared, ihmin, ihmax =\
mesh.ComputeMinMaxEdgeLengthSquared()
min_ratio_squared, icmin, Lmin, Lmax, ihmin, ihmax =\
mesh.ComputeMinCellEdgeLengthRatioSquared()
cos_minA, cos_maxA, ihmin, ihmax = mesh.ComputeCosMinMaxAngle()
flag_non_manifoldV, flag_non_manifoldE, iv, ie = mesh.CheckManifold()
is_oriented, iv = mesh.CheckOrientation()
print("Number of vertices: ", num_vertices)
print("Number of mesh edges: ", num_edges)
print("Number of boundary edges: ", num_boundary_edges)
print("Number of mesh cells: ", num_cells)
print(" Number of mesh triangles: ", num_triangles)
print(" Number of mesh quadrilaterals: ", num_quads)
print(" Number of mesh cells with > 4 vertices: ", num_large_cells)
print("Min edge length: {:.4f}".format(sqrt(minL_squared)))
print("Max edge length: {:.4f}".format(sqrt(maxL_squared)))
print("Min cell edge length ratio: {:.4f}".format(sqrt(min_ratio_squared)))
print("Minimum cell angle: {:.4f}".format(math.degrees(acos(cos_minA))))
print("Maximum cell angle: {:.4f}".format(math.degrees(acos(cos_maxA))))
if not flag_non_manifoldV and not flag_non_manifoldE and is_oriented:
print("Mesh is an oriented manifold.")
else:
print("Mesh is non-manifold or has inconsistent cell orientations.")
def usage_error():
sys.stderr.flush()
exit(-1)
if __name__ == '__main__':
main(sys.argv)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.