hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f721134f2cf6dd7f8af453cc2143cd6f38f7cc03 | 1,204 | py | Python | lagom/envs/record_episode_statistics.py | zuoxingdong/lagom | 3b6710804dbc79c6dffb369ac87c68f4055ab6cd | [
"MIT"
] | 383 | 2018-07-11T17:43:10.000Z | 2022-01-24T08:46:23.000Z | lagom/envs/record_episode_statistics.py | LorinChen/lagom | 273bb7f5babb1f250f6dba0b5f62c6614f301719 | [
"MIT"
] | 90 | 2018-07-11T23:51:45.000Z | 2021-12-16T08:56:42.000Z | lagom/envs/record_episode_statistics.py | LorinChen/lagom | 273bb7f5babb1f250f6dba0b5f62c6614f301719 | [
"MIT"
] | 32 | 2018-07-12T18:21:03.000Z | 2021-09-15T05:47:48.000Z | import time
from collections import deque
import gym
class RecordEpisodeStatistics(gym.Wrapper):
def __init__(self, env, deque_size=100):
super().__init__(env)
self.t0 = time.perf_counter()
self.episode_return = 0.0
self.episode_horizon = 0
self.return_queue = deque(maxlen=deque_size)
self.horizon_queue = deque(maxlen=deque_size)
def reset(self, **kwargs):
observation = super().reset(**kwargs)
self.episode_return = 0.0
self.episode_horizon = 0
return observation
def step(self, action):
observation, reward, done, info = super().step(action)
self.episode_return += reward
self.episode_horizon += 1
if done:
info['episode'] = {'return': self.episode_return,
'horizon': self.episode_horizon,
'time': round(time.perf_counter() - self.t0, 4)}
self.return_queue.append(self.episode_return)
self.horizon_queue.append(self.episode_horizon)
self.episode_return = 0.0
self.episode_horizon = 0
return observation, reward, done, info
| 34.4 | 79 | 0.599668 | import time
from collections import deque
import gym
class RecordEpisodeStatistics(gym.Wrapper):
def __init__(self, env, deque_size=100):
super().__init__(env)
self.t0 = time.perf_counter()
self.episode_return = 0.0
self.episode_horizon = 0
self.return_queue = deque(maxlen=deque_size)
self.horizon_queue = deque(maxlen=deque_size)
def reset(self, **kwargs):
observation = super().reset(**kwargs)
self.episode_return = 0.0
self.episode_horizon = 0
return observation
def step(self, action):
observation, reward, done, info = super().step(action)
self.episode_return += reward
self.episode_horizon += 1
if done:
info['episode'] = {'return': self.episode_return,
'horizon': self.episode_horizon,
'time': round(time.perf_counter() - self.t0, 4)}
self.return_queue.append(self.episode_return)
self.horizon_queue.append(self.episode_horizon)
self.episode_return = 0.0
self.episode_horizon = 0
return observation, reward, done, info
| true | true |
f721149609f8936e76f673d4273205ed140bf7b3 | 1,608 | py | Python | blog_auth/migrations/0001_initial.py | MicroPyramid/ngo-cms | 5f0baf69ce646ab6b895d3ae2f49b782630c9959 | [
"MIT"
] | 5 | 2019-08-12T17:56:25.000Z | 2021-08-31T04:36:42.000Z | blog_auth/migrations/0001_initial.py | MicroPyramid/ngo-cms | 5f0baf69ce646ab6b895d3ae2f49b782630c9959 | [
"MIT"
] | 12 | 2020-02-12T00:38:11.000Z | 2022-03-11T23:50:12.000Z | blog_auth/migrations/0001_initial.py | MicroPyramid/ngo-cms | 5f0baf69ce646ab6b895d3ae2f49b782630c9959 | [
"MIT"
] | 8 | 2019-06-19T18:54:02.000Z | 2021-01-05T19:31:30.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last login')),
('email', models.EmailField(unique=True, max_length=75)),
('rpwd', models.CharField(max_length=20)),
('first_name', models.CharField(max_length=100)),
('last_name', models.CharField(max_length=100)),
('gender', models.CharField(default=b'Unknown', max_length=10, verbose_name=b'Gender', choices=[(b'Male', b'Male'), (b'Female', b'Female')])),
('join_date', models.DateTimeField(auto_now_add=True)),
('mobile', models.CharField(max_length=15)),
('user_type', models.CharField(default=b'user', max_length=10, verbose_name=b'UserType', choices=[(b'user', b'user'), (b'Admin', b'Admin')])),
('is_admin', models.BooleanField(default=False)),
('is_active', models.BooleanField(default=True)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
]
| 43.459459 | 158 | 0.584577 |
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last login')),
('email', models.EmailField(unique=True, max_length=75)),
('rpwd', models.CharField(max_length=20)),
('first_name', models.CharField(max_length=100)),
('last_name', models.CharField(max_length=100)),
('gender', models.CharField(default=b'Unknown', max_length=10, verbose_name=b'Gender', choices=[(b'Male', b'Male'), (b'Female', b'Female')])),
('join_date', models.DateTimeField(auto_now_add=True)),
('mobile', models.CharField(max_length=15)),
('user_type', models.CharField(default=b'user', max_length=10, verbose_name=b'UserType', choices=[(b'user', b'user'), (b'Admin', b'Admin')])),
('is_admin', models.BooleanField(default=False)),
('is_active', models.BooleanField(default=True)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
]
| true | true |
f721152db9db3827adab40e0750d01f58df5decf | 15,018 | py | Python | cinder/tests/unit/zonemanager/test_brcd_fc_zone_client_cli.py | lightsey/cinder | e03d68e42e57a63f8d0f3e177fb4287290612b24 | [
"Apache-2.0"
] | 3 | 2015-04-02T21:44:36.000Z | 2016-04-29T21:19:04.000Z | cinder/tests/unit/zonemanager/test_brcd_fc_zone_client_cli.py | lightsey/cinder | e03d68e42e57a63f8d0f3e177fb4287290612b24 | [
"Apache-2.0"
] | 3 | 2016-04-29T21:45:26.000Z | 2016-05-04T19:41:23.000Z | cinder/tests/unit/zonemanager/test_brcd_fc_zone_client_cli.py | lightsey/cinder | e03d68e42e57a63f8d0f3e177fb4287290612b24 | [
"Apache-2.0"
] | 4 | 2016-01-27T00:25:52.000Z | 2021-03-25T19:54:08.000Z | # (c) Copyright 2016 Brocade Communications Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Unit tests for brcd fc zone client cli."""
from unittest import mock
from oslo_concurrency import processutils
from cinder import exception
from cinder import test
from cinder.zonemanager.drivers.brocade import (brcd_fc_zone_client_cli
as client_cli)
from cinder.zonemanager.drivers.brocade import exception as b_exception
import cinder.zonemanager.drivers.brocade.fc_zone_constants as zone_constant
nsshow = '20:1a:00:05:1e:e8:e3:29'
switch_data = [' N 011a00;2,3;20:1a:00:05:1e:e8:e3:29;\
20:1a:00:05:1e:e8:e3:29;na',
' Fabric Port Name: 20:1a:00:05:1e:e8:e3:29']
cfgactvshow = ['Effective configuration:\n',
' cfg:\tOpenStack_Cfg\t\n',
' zone:\topenstack50060b0000c26604201900051ee8e329\t\n',
'\t\t50:06:0b:00:00:c2:66:04\n',
'\t\t20:19:00:05:1e:e8:e3:29\n']
active_zoneset = {
'zones': {
'openstack50060b0000c26604201900051ee8e329':
['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29']},
'active_zone_config': 'OpenStack_Cfg'}
active_zoneset_multiple_zones = {
'zones': {
'openstack50060b0000c26604201900051ee8e329':
['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29'],
'openstack50060b0000c26602201900051ee8e327':
['50:06:0b:00:00:c2:66:02', '20:19:00:05:1e:e8:e3:27']},
'active_zone_config': 'OpenStack_Cfg'}
new_zone_memb_same = {
'openstack50060b0000c26604201900051ee8e329':
['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29']}
new_zone_memb_not_same = {
'openstack50060b0000c26604201900051ee8e330':
['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:30']}
new_zone = {'openstack10000012345678902001009876543210':
['10:00:00:12:34:56:78:90', '20:01:00:98:76:54:32:10']}
new_zones = {'openstack10000012345678902001009876543210':
['10:00:00:12:34:56:78:90', '20:01:00:98:76:54:32:10'],
'openstack10000011111111112001001111111111':
['10:00:00:11:11:11:11:11', '20:01:00:11:11:11:11:11']}
zone_names_to_delete = 'openstack50060b0000c26604201900051ee8e329'
supported_firmware = ['Kernel: 2.6', 'Fabric OS: v7.0.1']
unsupported_firmware = ['Fabric OS: v6.2.1']
class TestBrcdFCZoneClientCLI(client_cli.BrcdFCZoneClientCLI, test.TestCase):
# override some of the functions
def __init__(self, *args, **kwargs):
test.TestCase.__init__(self, *args, **kwargs)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_get_switch_info')
def test_get_active_zone_set(self, get_switch_info_mock):
cmd_list = [zone_constant.GET_ACTIVE_ZONE_CFG]
get_switch_info_mock.return_value = cfgactvshow
active_zoneset_returned = self.get_active_zone_set()
get_switch_info_mock.assert_called_once_with(cmd_list)
self.assertDictEqual(active_zoneset, active_zoneset_returned)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh')
def test_get_active_zone_set_ssh_error(self, run_ssh_mock):
run_ssh_mock.side_effect = processutils.ProcessExecutionError
self.assertRaises(b_exception.BrocadeZoningCliException,
self.get_active_zone_set)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'get_active_zone_set')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_cfg_save')
def test_add_zones_new_zone_no_activate(self, cfg_save_mock,
apply_zone_change_mock,
get_active_zs_mock):
get_active_zs_mock.return_value = active_zoneset
self.add_zones(new_zones, False, None)
self.assertEqual(1, get_active_zs_mock.call_count)
self.assertEqual(3, apply_zone_change_mock.call_count)
cfg_save_mock.assert_called_once_with()
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'get_active_zone_set')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'activate_zoneset')
def test_add_zones_new_zone_activate(self, activate_zoneset_mock,
apply_zone_change_mock,
get_active_zs_mock):
get_active_zs_mock.return_value = active_zoneset
self.add_zones(new_zone, True, active_zoneset)
self.assertEqual(2, apply_zone_change_mock.call_count)
activate_zoneset_mock.assert_called_once_with(
active_zoneset['active_zone_config'])
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'get_active_zone_set')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'activate_zoneset')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
def test_update_zone_exists_memb_same(self, apply_zone_change_mock,
activate_zoneset_mock,
get_active_zs_mock):
get_active_zs_mock.return_value = active_zoneset
self.update_zones(new_zone_memb_same, True, zone_constant.ZONE_ADD,
active_zoneset)
self.assertEqual(1, apply_zone_change_mock.call_count)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'get_active_zone_set')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'activate_zoneset')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
def test_update_zone_exists_memb_not_same(self, apply_zone_change_mock,
activate_zoneset_mock,
get_active_zs_mock):
get_active_zs_mock.return_value = active_zoneset
self.update_zones(new_zone_memb_not_same, True,
zone_constant.ZONE_ADD, active_zoneset)
self.assertEqual(1, apply_zone_change_mock.call_count)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'get_active_zone_set')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'activate_zoneset')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
def test_add_zone_all_exists_memb_not_same(self, apply_zone_change_mock,
activate_zoneset_mock,
get_active_zs_mock):
self.add_zones(new_zone_memb_not_same, True, active_zoneset)
call_args = apply_zone_change_mock.call_args[0][0]
self.assertEqual(0, get_active_zs_mock.call_count)
self.assertEqual(2, apply_zone_change_mock.call_count)
self.assertIn(zone_constant.CFG_ADD.strip(), call_args)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_ssh_execute')
def test_activate_zoneset(self, ssh_execute_mock):
ssh_execute_mock.return_value = True
return_value = self.activate_zoneset('zoneset1')
self.assertTrue(return_value)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_ssh_execute')
def test_deactivate_zoneset(self, ssh_execute_mock):
ssh_execute_mock.return_value = True
return_value = self.deactivate_zoneset()
self.assertTrue(return_value)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_cfg_save')
def test_delete_zones_activate_false(self, cfg_save_mock,
apply_zone_change_mock):
with mock.patch.object(self, '_zone_delete') as zone_delete_mock:
self.delete_zones(zone_names_to_delete, False,
active_zoneset_multiple_zones)
self.assertEqual(1, apply_zone_change_mock.call_count)
zone_delete_mock.assert_called_once_with(zone_names_to_delete)
cfg_save_mock.assert_called_once_with()
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'activate_zoneset')
def test_delete_zones_activate_true(self, activate_zs_mock,
apply_zone_change_mock):
with mock.patch.object(self, '_zone_delete') \
as zone_delete_mock:
self.delete_zones(zone_names_to_delete, True,
active_zoneset_multiple_zones)
self.assertEqual(1, apply_zone_change_mock.call_count)
zone_delete_mock.assert_called_once_with(zone_names_to_delete)
activate_zs_mock.assert_called_once_with(
active_zoneset['active_zone_config'])
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_get_switch_info')
def test_get_nameserver_info(self, get_switch_info_mock):
ns_info_list_expected = ['20:1a:00:05:1e:e8:e3:29']
get_switch_info_mock.return_value = (switch_data)
ns_info_list = self.get_nameserver_info()
self.assertEqual(ns_info_list_expected, ns_info_list)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh')
def test_get_nameserver_info_ssh_error(self, run_ssh_mock):
run_ssh_mock.side_effect = processutils.ProcessExecutionError
self.assertRaises(b_exception.BrocadeZoningCliException,
self.get_nameserver_info)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_ssh_execute')
def test__cfg_save(self, ssh_execute_mock):
cmd_list = [zone_constant.CFG_SAVE]
self._cfg_save()
ssh_execute_mock.assert_called_once_with(cmd_list, True, 1)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
def test__zone_delete(self, apply_zone_change_mock):
zone_name = 'testzone'
cmd_list = ['zonedelete', '"testzone"']
self._zone_delete(zone_name)
apply_zone_change_mock.assert_called_once_with(cmd_list)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
def test__cfg_trans_abort(self, apply_zone_change_mock):
cmd_list = [zone_constant.CFG_ZONE_TRANS_ABORT]
with mock.patch.object(self, '_is_trans_abortable') \
as is_trans_abortable_mock:
is_trans_abortable_mock.return_value = True
self._cfg_trans_abort()
is_trans_abortable_mock.assert_called_once_with()
apply_zone_change_mock.assert_called_once_with(cmd_list)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh')
def test__is_trans_abortable_true(self, run_ssh_mock):
cmd_list = [zone_constant.CFG_SHOW_TRANS]
run_ssh_mock.return_value = (Stream(zone_constant.TRANS_ABORTABLE),
None)
data = self._is_trans_abortable()
self.assertTrue(data)
run_ssh_mock.assert_called_once_with(cmd_list, True, 1)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh')
def test__is_trans_abortable_ssh_error(self, run_ssh_mock):
run_ssh_mock.return_value = (Stream(), Stream())
self.assertRaises(b_exception.BrocadeZoningCliException,
self._is_trans_abortable)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh')
def test__is_trans_abortable_false(self, run_ssh_mock):
cmd_list = [zone_constant.CFG_SHOW_TRANS]
cfgtransshow = 'There is no outstanding zoning transaction'
run_ssh_mock.return_value = (Stream(cfgtransshow), None)
data = self._is_trans_abortable()
self.assertFalse(data)
run_ssh_mock.assert_called_once_with(cmd_list, True, 1)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh')
def test_apply_zone_change(self, run_ssh_mock):
cmd_list = [zone_constant.CFG_SAVE]
run_ssh_mock.return_value = (None, None)
self.apply_zone_change(cmd_list)
run_ssh_mock.assert_called_once_with(cmd_list, True, 1)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh')
def test__get_switch_info(self, run_ssh_mock):
cmd_list = [zone_constant.NS_SHOW]
nsshow_list = [nsshow]
run_ssh_mock.return_value = (Stream(nsshow), Stream())
switch_data = self._get_switch_info(cmd_list)
self.assertEqual(nsshow_list, switch_data)
run_ssh_mock.assert_called_once_with(cmd_list, True, 1)
def test__parse_ns_output(self):
invalid_switch_data = [' N 011a00;20:1a:00:05:1e:e8:e3:29']
expected_wwn_list = ['20:1a:00:05:1e:e8:e3:29']
return_wwn_list = self._parse_ns_output(switch_data)
self.assertEqual(expected_wwn_list, return_wwn_list)
self.assertRaises(exception.InvalidParameterValue,
self._parse_ns_output, invalid_switch_data)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_execute_shell_cmd')
def test_is_supported_firmware(self, exec_shell_cmd_mock):
exec_shell_cmd_mock.return_value = (supported_firmware, None)
self.assertTrue(self.is_supported_firmware())
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_execute_shell_cmd')
def test_is_supported_firmware_invalid(self, exec_shell_cmd_mock):
exec_shell_cmd_mock.return_value = (unsupported_firmware, None)
self.assertFalse(self.is_supported_firmware())
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_execute_shell_cmd')
def test_is_supported_firmware_no_ssh_response(self, exec_shell_cmd_mock):
exec_shell_cmd_mock.return_value = (None, Stream())
self.assertFalse(self.is_supported_firmware())
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_execute_shell_cmd')
def test_is_supported_firmware_ssh_error(self, exec_shell_cmd_mock):
exec_shell_cmd_mock.side_effect = processutils.ProcessExecutionError
self.assertRaises(b_exception.BrocadeZoningCliException,
self.is_supported_firmware)
class Channel(object):
def recv_exit_status(self):
return 0
class Stream(object):
def __init__(self, buffer=''):
self.buffer = buffer
self.channel = Channel()
def readlines(self):
return self.buffer
def splitlines(self):
return self.buffer.splitlines()
def close(self):
pass
def flush(self):
self.buffer = ''
| 48.289389 | 78 | 0.699827 |
from unittest import mock
from oslo_concurrency import processutils
from cinder import exception
from cinder import test
from cinder.zonemanager.drivers.brocade import (brcd_fc_zone_client_cli
as client_cli)
from cinder.zonemanager.drivers.brocade import exception as b_exception
import cinder.zonemanager.drivers.brocade.fc_zone_constants as zone_constant
nsshow = '20:1a:00:05:1e:e8:e3:29'
switch_data = [' N 011a00;2,3;20:1a:00:05:1e:e8:e3:29;\
20:1a:00:05:1e:e8:e3:29;na',
' Fabric Port Name: 20:1a:00:05:1e:e8:e3:29']
cfgactvshow = ['Effective configuration:\n',
' cfg:\tOpenStack_Cfg\t\n',
' zone:\topenstack50060b0000c26604201900051ee8e329\t\n',
'\t\t50:06:0b:00:00:c2:66:04\n',
'\t\t20:19:00:05:1e:e8:e3:29\n']
active_zoneset = {
'zones': {
'openstack50060b0000c26604201900051ee8e329':
['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29']},
'active_zone_config': 'OpenStack_Cfg'}
active_zoneset_multiple_zones = {
'zones': {
'openstack50060b0000c26604201900051ee8e329':
['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29'],
'openstack50060b0000c26602201900051ee8e327':
['50:06:0b:00:00:c2:66:02', '20:19:00:05:1e:e8:e3:27']},
'active_zone_config': 'OpenStack_Cfg'}
new_zone_memb_same = {
'openstack50060b0000c26604201900051ee8e329':
['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29']}
new_zone_memb_not_same = {
'openstack50060b0000c26604201900051ee8e330':
['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:30']}
new_zone = {'openstack10000012345678902001009876543210':
['10:00:00:12:34:56:78:90', '20:01:00:98:76:54:32:10']}
new_zones = {'openstack10000012345678902001009876543210':
['10:00:00:12:34:56:78:90', '20:01:00:98:76:54:32:10'],
'openstack10000011111111112001001111111111':
['10:00:00:11:11:11:11:11', '20:01:00:11:11:11:11:11']}
zone_names_to_delete = 'openstack50060b0000c26604201900051ee8e329'
supported_firmware = ['Kernel: 2.6', 'Fabric OS: v7.0.1']
unsupported_firmware = ['Fabric OS: v6.2.1']
class TestBrcdFCZoneClientCLI(client_cli.BrcdFCZoneClientCLI, test.TestCase):
def __init__(self, *args, **kwargs):
test.TestCase.__init__(self, *args, **kwargs)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_get_switch_info')
def test_get_active_zone_set(self, get_switch_info_mock):
cmd_list = [zone_constant.GET_ACTIVE_ZONE_CFG]
get_switch_info_mock.return_value = cfgactvshow
active_zoneset_returned = self.get_active_zone_set()
get_switch_info_mock.assert_called_once_with(cmd_list)
self.assertDictEqual(active_zoneset, active_zoneset_returned)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh')
def test_get_active_zone_set_ssh_error(self, run_ssh_mock):
run_ssh_mock.side_effect = processutils.ProcessExecutionError
self.assertRaises(b_exception.BrocadeZoningCliException,
self.get_active_zone_set)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'get_active_zone_set')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_cfg_save')
def test_add_zones_new_zone_no_activate(self, cfg_save_mock,
apply_zone_change_mock,
get_active_zs_mock):
get_active_zs_mock.return_value = active_zoneset
self.add_zones(new_zones, False, None)
self.assertEqual(1, get_active_zs_mock.call_count)
self.assertEqual(3, apply_zone_change_mock.call_count)
cfg_save_mock.assert_called_once_with()
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'get_active_zone_set')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'activate_zoneset')
def test_add_zones_new_zone_activate(self, activate_zoneset_mock,
apply_zone_change_mock,
get_active_zs_mock):
get_active_zs_mock.return_value = active_zoneset
self.add_zones(new_zone, True, active_zoneset)
self.assertEqual(2, apply_zone_change_mock.call_count)
activate_zoneset_mock.assert_called_once_with(
active_zoneset['active_zone_config'])
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'get_active_zone_set')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'activate_zoneset')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
def test_update_zone_exists_memb_same(self, apply_zone_change_mock,
activate_zoneset_mock,
get_active_zs_mock):
get_active_zs_mock.return_value = active_zoneset
self.update_zones(new_zone_memb_same, True, zone_constant.ZONE_ADD,
active_zoneset)
self.assertEqual(1, apply_zone_change_mock.call_count)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'get_active_zone_set')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'activate_zoneset')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
def test_update_zone_exists_memb_not_same(self, apply_zone_change_mock,
activate_zoneset_mock,
get_active_zs_mock):
get_active_zs_mock.return_value = active_zoneset
self.update_zones(new_zone_memb_not_same, True,
zone_constant.ZONE_ADD, active_zoneset)
self.assertEqual(1, apply_zone_change_mock.call_count)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'get_active_zone_set')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'activate_zoneset')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
def test_add_zone_all_exists_memb_not_same(self, apply_zone_change_mock,
activate_zoneset_mock,
get_active_zs_mock):
self.add_zones(new_zone_memb_not_same, True, active_zoneset)
call_args = apply_zone_change_mock.call_args[0][0]
self.assertEqual(0, get_active_zs_mock.call_count)
self.assertEqual(2, apply_zone_change_mock.call_count)
self.assertIn(zone_constant.CFG_ADD.strip(), call_args)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_ssh_execute')
def test_activate_zoneset(self, ssh_execute_mock):
ssh_execute_mock.return_value = True
return_value = self.activate_zoneset('zoneset1')
self.assertTrue(return_value)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_ssh_execute')
def test_deactivate_zoneset(self, ssh_execute_mock):
ssh_execute_mock.return_value = True
return_value = self.deactivate_zoneset()
self.assertTrue(return_value)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_cfg_save')
def test_delete_zones_activate_false(self, cfg_save_mock,
apply_zone_change_mock):
with mock.patch.object(self, '_zone_delete') as zone_delete_mock:
self.delete_zones(zone_names_to_delete, False,
active_zoneset_multiple_zones)
self.assertEqual(1, apply_zone_change_mock.call_count)
zone_delete_mock.assert_called_once_with(zone_names_to_delete)
cfg_save_mock.assert_called_once_with()
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'activate_zoneset')
def test_delete_zones_activate_true(self, activate_zs_mock,
apply_zone_change_mock):
with mock.patch.object(self, '_zone_delete') \
as zone_delete_mock:
self.delete_zones(zone_names_to_delete, True,
active_zoneset_multiple_zones)
self.assertEqual(1, apply_zone_change_mock.call_count)
zone_delete_mock.assert_called_once_with(zone_names_to_delete)
activate_zs_mock.assert_called_once_with(
active_zoneset['active_zone_config'])
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_get_switch_info')
def test_get_nameserver_info(self, get_switch_info_mock):
ns_info_list_expected = ['20:1a:00:05:1e:e8:e3:29']
get_switch_info_mock.return_value = (switch_data)
ns_info_list = self.get_nameserver_info()
self.assertEqual(ns_info_list_expected, ns_info_list)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh')
def test_get_nameserver_info_ssh_error(self, run_ssh_mock):
run_ssh_mock.side_effect = processutils.ProcessExecutionError
self.assertRaises(b_exception.BrocadeZoningCliException,
self.get_nameserver_info)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_ssh_execute')
def test__cfg_save(self, ssh_execute_mock):
cmd_list = [zone_constant.CFG_SAVE]
self._cfg_save()
ssh_execute_mock.assert_called_once_with(cmd_list, True, 1)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
def test__zone_delete(self, apply_zone_change_mock):
zone_name = 'testzone'
cmd_list = ['zonedelete', '"testzone"']
self._zone_delete(zone_name)
apply_zone_change_mock.assert_called_once_with(cmd_list)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
def test__cfg_trans_abort(self, apply_zone_change_mock):
cmd_list = [zone_constant.CFG_ZONE_TRANS_ABORT]
with mock.patch.object(self, '_is_trans_abortable') \
as is_trans_abortable_mock:
is_trans_abortable_mock.return_value = True
self._cfg_trans_abort()
is_trans_abortable_mock.assert_called_once_with()
apply_zone_change_mock.assert_called_once_with(cmd_list)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh')
def test__is_trans_abortable_true(self, run_ssh_mock):
cmd_list = [zone_constant.CFG_SHOW_TRANS]
run_ssh_mock.return_value = (Stream(zone_constant.TRANS_ABORTABLE),
None)
data = self._is_trans_abortable()
self.assertTrue(data)
run_ssh_mock.assert_called_once_with(cmd_list, True, 1)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh')
def test__is_trans_abortable_ssh_error(self, run_ssh_mock):
run_ssh_mock.return_value = (Stream(), Stream())
self.assertRaises(b_exception.BrocadeZoningCliException,
self._is_trans_abortable)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh')
def test__is_trans_abortable_false(self, run_ssh_mock):
cmd_list = [zone_constant.CFG_SHOW_TRANS]
cfgtransshow = 'There is no outstanding zoning transaction'
run_ssh_mock.return_value = (Stream(cfgtransshow), None)
data = self._is_trans_abortable()
self.assertFalse(data)
run_ssh_mock.assert_called_once_with(cmd_list, True, 1)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh')
def test_apply_zone_change(self, run_ssh_mock):
cmd_list = [zone_constant.CFG_SAVE]
run_ssh_mock.return_value = (None, None)
self.apply_zone_change(cmd_list)
run_ssh_mock.assert_called_once_with(cmd_list, True, 1)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh')
def test__get_switch_info(self, run_ssh_mock):
cmd_list = [zone_constant.NS_SHOW]
nsshow_list = [nsshow]
run_ssh_mock.return_value = (Stream(nsshow), Stream())
switch_data = self._get_switch_info(cmd_list)
self.assertEqual(nsshow_list, switch_data)
run_ssh_mock.assert_called_once_with(cmd_list, True, 1)
def test__parse_ns_output(self):
invalid_switch_data = [' N 011a00;20:1a:00:05:1e:e8:e3:29']
expected_wwn_list = ['20:1a:00:05:1e:e8:e3:29']
return_wwn_list = self._parse_ns_output(switch_data)
self.assertEqual(expected_wwn_list, return_wwn_list)
self.assertRaises(exception.InvalidParameterValue,
self._parse_ns_output, invalid_switch_data)
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_execute_shell_cmd')
def test_is_supported_firmware(self, exec_shell_cmd_mock):
exec_shell_cmd_mock.return_value = (supported_firmware, None)
self.assertTrue(self.is_supported_firmware())
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_execute_shell_cmd')
def test_is_supported_firmware_invalid(self, exec_shell_cmd_mock):
exec_shell_cmd_mock.return_value = (unsupported_firmware, None)
self.assertFalse(self.is_supported_firmware())
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_execute_shell_cmd')
def test_is_supported_firmware_no_ssh_response(self, exec_shell_cmd_mock):
exec_shell_cmd_mock.return_value = (None, Stream())
self.assertFalse(self.is_supported_firmware())
@mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_execute_shell_cmd')
def test_is_supported_firmware_ssh_error(self, exec_shell_cmd_mock):
exec_shell_cmd_mock.side_effect = processutils.ProcessExecutionError
self.assertRaises(b_exception.BrocadeZoningCliException,
self.is_supported_firmware)
class Channel(object):
def recv_exit_status(self):
return 0
class Stream(object):
def __init__(self, buffer=''):
self.buffer = buffer
self.channel = Channel()
def readlines(self):
return self.buffer
def splitlines(self):
return self.buffer.splitlines()
def close(self):
pass
def flush(self):
self.buffer = ''
| true | true |
f72115d189ce1aea3fd459147ab92b50d1a8393a | 807 | py | Python | bluebottle/bluebottle_drf2/renderers.py | terrameijar/bluebottle | b4f5ba9c4f03e678fdd36091b29240307ea69ffd | [
"BSD-3-Clause"
] | 10 | 2015-05-28T18:26:40.000Z | 2021-09-06T10:07:03.000Z | bluebottle/bluebottle_drf2/renderers.py | terrameijar/bluebottle | b4f5ba9c4f03e678fdd36091b29240307ea69ffd | [
"BSD-3-Clause"
] | 762 | 2015-01-15T10:00:59.000Z | 2022-03-31T15:35:14.000Z | bluebottle/bluebottle_drf2/renderers.py | terrameijar/bluebottle | b4f5ba9c4f03e678fdd36091b29240307ea69ffd | [
"BSD-3-Clause"
] | 9 | 2015-02-20T13:19:30.000Z | 2022-03-08T14:09:17.000Z | from rest_framework_json_api.renderers import JSONRenderer
from django.contrib.auth.models import AnonymousUser
class BluebottleJSONAPIRenderer(JSONRenderer):
def get_indent(self, *args, **kwargs):
return 4
@classmethod
def build_json_resource_obj(
cls,
fields,
resource,
resource_instance,
resource_name,
*args,
**kwargs
):
if isinstance(resource_instance, AnonymousUser):
return {
'id': resource['id'],
'type': resource_name,
'attributes': {
'is-anonymous': True
}
}
return super().build_json_resource_obj(
fields, resource, resource_instance, resource_name, *args, **kwargs
)
| 26.032258 | 79 | 0.570012 | from rest_framework_json_api.renderers import JSONRenderer
from django.contrib.auth.models import AnonymousUser
class BluebottleJSONAPIRenderer(JSONRenderer):
def get_indent(self, *args, **kwargs):
return 4
@classmethod
def build_json_resource_obj(
cls,
fields,
resource,
resource_instance,
resource_name,
*args,
**kwargs
):
if isinstance(resource_instance, AnonymousUser):
return {
'id': resource['id'],
'type': resource_name,
'attributes': {
'is-anonymous': True
}
}
return super().build_json_resource_obj(
fields, resource, resource_instance, resource_name, *args, **kwargs
)
| true | true |
f72116597d007b731f68d9cb1a6c637348e7d55b | 4,912 | py | Python | rclpy/rclpy/context.py | bastinat0r/rclpy | 510b243b2efe9e6b4b20837b7dea8092069cd2d3 | [
"Apache-2.0"
] | 1 | 2021-01-11T06:28:59.000Z | 2021-01-11T06:28:59.000Z | rclpy/rclpy/context.py | bastinat0r/rclpy | 510b243b2efe9e6b4b20837b7dea8092069cd2d3 | [
"Apache-2.0"
] | 1 | 2020-06-28T10:40:59.000Z | 2020-06-28T10:40:59.000Z | rclpy/rclpy/context.py | bastinat0r/rclpy | 510b243b2efe9e6b4b20837b7dea8092069cd2d3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import threading
from typing import Callable
from typing import List
from typing import Optional
import weakref
g_logging_configure_lock = threading.Lock()
g_logging_ref_count = 0
class Context:
"""
Encapsulates the lifecycle of init and shutdown.
Context objects should not be reused, and are finalized in their destructor.
Wraps the `rcl_context_t` type.
"""
def __init__(self):
from rclpy.impl.implementation_singleton import rclpy_implementation
from .handle import Handle
self._handle = Handle(rclpy_implementation.rclpy_create_context())
self._lock = threading.Lock()
self._callbacks = []
self._callbacks_lock = threading.Lock()
self._logging_initialized = False
@property
def handle(self):
return self._handle
def init(self, args: Optional[List[str]] = None, *, initialize_logging: bool = True):
"""
Initialize ROS communications for a given context.
:param args: List of command line arguments.
"""
# imported locally to avoid loading extensions on module import
from rclpy.impl.implementation_singleton import rclpy_implementation
global g_logging_ref_count
with self._handle as capsule, self._lock:
rclpy_implementation.rclpy_init(args if args is not None else sys.argv, capsule)
if initialize_logging and not self._logging_initialized:
with g_logging_configure_lock:
g_logging_ref_count += 1
if g_logging_ref_count == 1:
rclpy_implementation.rclpy_logging_configure(capsule)
self._logging_initialized = True
def ok(self):
"""Check if context hasn't been shut down."""
# imported locally to avoid loading extensions on module import
from rclpy.impl.implementation_singleton import rclpy_implementation
with self._handle as capsule, self._lock:
return rclpy_implementation.rclpy_ok(capsule)
def _call_on_shutdown_callbacks(self):
with self._callbacks_lock:
for weak_method in self._callbacks:
callback = weak_method()
callback()
self._callbacks = []
def shutdown(self):
"""Shutdown this context."""
# imported locally to avoid loading extensions on module import
from rclpy.impl.implementation_singleton import rclpy_implementation
with self._handle as capsule, self._lock:
rclpy_implementation.rclpy_shutdown(capsule)
self._call_on_shutdown_callbacks()
self._logging_fini()
def try_shutdown(self):
"""Shutdown this context, if not already shutdown."""
# imported locally to avoid loading extensions on module import
from rclpy.impl.implementation_singleton import rclpy_implementation
with self._handle as capsule, self._lock:
if rclpy_implementation.rclpy_ok(capsule):
rclpy_implementation.rclpy_shutdown(capsule)
self._call_on_shutdown_callbacks()
def _remove_callback(self, weak_method):
self._callbacks.remove(weak_method)
def on_shutdown(self, callback: Callable[[], None]):
"""Add a callback to be called on shutdown."""
if not callable(callback):
raise TypeError('callback should be a callable, got {}', type(callback))
with self._callbacks_lock:
if not self.ok():
callback()
else:
self._callbacks.append(weakref.WeakMethod(callback, self._remove_callback))
def _logging_fini(self):
from rclpy.impl.implementation_singleton import rclpy_implementation
global g_logging_ref_count
with self._lock:
if self._logging_initialized:
with g_logging_configure_lock:
g_logging_ref_count -= 1
if g_logging_ref_count == 0:
rclpy_implementation.rclpy_logging_fini()
if g_logging_ref_count < 0:
raise RuntimeError(
'Unexpected error: logger ref count should never be lower that zero')
self._logging_initialized = False
| 39.296 | 97 | 0.667142 |
import sys
import threading
from typing import Callable
from typing import List
from typing import Optional
import weakref
g_logging_configure_lock = threading.Lock()
g_logging_ref_count = 0
class Context:
def __init__(self):
from rclpy.impl.implementation_singleton import rclpy_implementation
from .handle import Handle
self._handle = Handle(rclpy_implementation.rclpy_create_context())
self._lock = threading.Lock()
self._callbacks = []
self._callbacks_lock = threading.Lock()
self._logging_initialized = False
@property
def handle(self):
return self._handle
def init(self, args: Optional[List[str]] = None, *, initialize_logging: bool = True):
from rclpy.impl.implementation_singleton import rclpy_implementation
global g_logging_ref_count
with self._handle as capsule, self._lock:
rclpy_implementation.rclpy_init(args if args is not None else sys.argv, capsule)
if initialize_logging and not self._logging_initialized:
with g_logging_configure_lock:
g_logging_ref_count += 1
if g_logging_ref_count == 1:
rclpy_implementation.rclpy_logging_configure(capsule)
self._logging_initialized = True
def ok(self):
from rclpy.impl.implementation_singleton import rclpy_implementation
with self._handle as capsule, self._lock:
return rclpy_implementation.rclpy_ok(capsule)
def _call_on_shutdown_callbacks(self):
with self._callbacks_lock:
for weak_method in self._callbacks:
callback = weak_method()
callback()
self._callbacks = []
def shutdown(self):
from rclpy.impl.implementation_singleton import rclpy_implementation
with self._handle as capsule, self._lock:
rclpy_implementation.rclpy_shutdown(capsule)
self._call_on_shutdown_callbacks()
self._logging_fini()
def try_shutdown(self):
from rclpy.impl.implementation_singleton import rclpy_implementation
with self._handle as capsule, self._lock:
if rclpy_implementation.rclpy_ok(capsule):
rclpy_implementation.rclpy_shutdown(capsule)
self._call_on_shutdown_callbacks()
def _remove_callback(self, weak_method):
self._callbacks.remove(weak_method)
def on_shutdown(self, callback: Callable[[], None]):
if not callable(callback):
raise TypeError('callback should be a callable, got {}', type(callback))
with self._callbacks_lock:
if not self.ok():
callback()
else:
self._callbacks.append(weakref.WeakMethod(callback, self._remove_callback))
def _logging_fini(self):
from rclpy.impl.implementation_singleton import rclpy_implementation
global g_logging_ref_count
with self._lock:
if self._logging_initialized:
with g_logging_configure_lock:
g_logging_ref_count -= 1
if g_logging_ref_count == 0:
rclpy_implementation.rclpy_logging_fini()
if g_logging_ref_count < 0:
raise RuntimeError(
'Unexpected error: logger ref count should never be lower that zero')
self._logging_initialized = False
| true | true |
f72116774894f97836e29f765583285f9e3b5acf | 2,226 | py | Python | .modules/.Infoga/lib/output.py | termux-one/EasY_HaCk | 0a8d09ca4b126b027b6842e02fa0c29d8250e090 | [
"Apache-2.0"
] | 1,103 | 2018-04-20T14:08:11.000Z | 2022-03-29T06:22:43.000Z | .modules/.Infoga/lib/output.py | sshourya948/EasY_HaCk | 0a8d09ca4b126b027b6842e02fa0c29d8250e090 | [
"Apache-2.0"
] | 29 | 2019-04-03T14:52:38.000Z | 2022-03-24T12:33:05.000Z | .modules/.Infoga/lib/output.py | sshourya948/EasY_HaCk | 0a8d09ca4b126b027b6842e02fa0c29d8250e090 | [
"Apache-2.0"
] | 161 | 2018-04-20T15:57:12.000Z | 2022-03-15T19:16:16.000Z | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
#
# @name : Infoga - Email Information Gathering
# @url : http://github.com/m4ll0k
# @author : Momo Outaadi (m4ll0k)
from lib.colors import *
def plus(string):print("%s[+]%s %s"%(G%0,E,string))
def warn(string):print("%s[!]%s %s"%(R%0,E,string))
def test(string):print("%s[*]%s %s"%(B%0,E,string))
def info(string):print("%s[i]%s %s"%(Y%0,E,string))
def more(string):print(" %s|%s %s"%(W%0,E,string))
# pwned data
def ppwned(data,ver):
if 'found' in data['status']:
warn('This email was leaked... found %s results..'%(data['results']))
if ver == 2 or ver == 3:
for i in range(0,len(data['data'])):
more('Leaked in: %s'%data['data'][i]['title'])
more('Data Leaked: %s'%data['data'][i]['date_leaked'])
more('Details: %s'%data['data'][i]['details'])
more('Source Network: %s'%data['data'][i]['source_network'])
print("")
# print shodan return data
def data(ip,data,email,ver):
if ver == 1:plus('Email: %s (%s)'%(email,ip))
elif ver == 2:
try:
plus('Email: %s (%s)'%(email,ip))
if data['hostnames']:more('Hostname: %s'%(data['hostnames'][0]))
if data['country_code'] and data['country_name']:more('Country: %s (%s)'%(data['country_code'],data['country_name']))
if data['city'] and data['region_code']:more('City: %s (%s)'%(data['city'],data['region_code']))
except KeyError as e:
pass
elif ver == 3:
try:
plus('Email: %s (%s)'%(email,ip))
if data['hostnames']:more('Hostname: %s'%(data['hostnames'][0]))
if data['country_code'] and data['country_name']:more('Country: %s (%s)'%(data['country_code'],data['country_name']))
if data['city'] and data['region_code']:more('City: %s (%s)'%(data['city'],data['region_code']))
if data['asn']:more('ASN: %s'%(data['asn']))
if data['isp']:more('ISP: %s'%(data['isp']))
if data['latitude'] and data['longitude']:more('Map: Map: https://www.google.com/maps/@%s,%s,10z (%s,%s)'%(
data['latitude'],data['longitude'],data['latitude'],data['longitude']))
if data['org']:more('Organization: %s'%(data['org']))
if data['ports']:more('Ports: %s'%(data['ports']))
if data['vulns']:more('Vulns: %s'%(data['vulns']))
except KeyError as e:
pass
print("") | 42.807692 | 120 | 0.600629 |
from lib.colors import *
def plus(string):print("%s[+]%s %s"%(G%0,E,string))
def warn(string):print("%s[!]%s %s"%(R%0,E,string))
def test(string):print("%s[*]%s %s"%(B%0,E,string))
def info(string):print("%s[i]%s %s"%(Y%0,E,string))
def more(string):print(" %s|%s %s"%(W%0,E,string))
def ppwned(data,ver):
if 'found' in data['status']:
warn('This email was leaked... found %s results..'%(data['results']))
if ver == 2 or ver == 3:
for i in range(0,len(data['data'])):
more('Leaked in: %s'%data['data'][i]['title'])
more('Data Leaked: %s'%data['data'][i]['date_leaked'])
more('Details: %s'%data['data'][i]['details'])
more('Source Network: %s'%data['data'][i]['source_network'])
print("")
def data(ip,data,email,ver):
if ver == 1:plus('Email: %s (%s)'%(email,ip))
elif ver == 2:
try:
plus('Email: %s (%s)'%(email,ip))
if data['hostnames']:more('Hostname: %s'%(data['hostnames'][0]))
if data['country_code'] and data['country_name']:more('Country: %s (%s)'%(data['country_code'],data['country_name']))
if data['city'] and data['region_code']:more('City: %s (%s)'%(data['city'],data['region_code']))
except KeyError as e:
pass
elif ver == 3:
try:
plus('Email: %s (%s)'%(email,ip))
if data['hostnames']:more('Hostname: %s'%(data['hostnames'][0]))
if data['country_code'] and data['country_name']:more('Country: %s (%s)'%(data['country_code'],data['country_name']))
if data['city'] and data['region_code']:more('City: %s (%s)'%(data['city'],data['region_code']))
if data['asn']:more('ASN: %s'%(data['asn']))
if data['isp']:more('ISP: %s'%(data['isp']))
if data['latitude'] and data['longitude']:more('Map: Map: https://www.google.com/maps/@%s,%s,10z (%s,%s)'%(
data['latitude'],data['longitude'],data['latitude'],data['longitude']))
if data['org']:more('Organization: %s'%(data['org']))
if data['ports']:more('Ports: %s'%(data['ports']))
if data['vulns']:more('Vulns: %s'%(data['vulns']))
except KeyError as e:
pass
print("") | true | true |
f7211689b5c3abfbb49932d88e4323e9e99aec1e | 19,600 | py | Python | pypureclient/flasharray/FA_2_3/models/volume_performance_by_array.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 14 | 2018-12-07T18:30:27.000Z | 2022-02-22T09:12:33.000Z | pypureclient/flasharray/FA_2_3/models/volume_performance_by_array.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 28 | 2019-09-17T21:03:52.000Z | 2022-03-29T22:07:35.000Z | pypureclient/flasharray/FA_2_3/models/volume_performance_by_array.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 15 | 2020-06-11T15:50:08.000Z | 2022-03-21T09:27:25.000Z | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_3 import models
class VolumePerformanceByArray(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'name': 'str',
'bytes_per_mirrored_write': 'int',
'bytes_per_op': 'int',
'bytes_per_read': 'int',
'bytes_per_write': 'int',
'mirrored_write_bytes_per_sec': 'int',
'mirrored_writes_per_sec': 'int',
'qos_rate_limit_usec_per_mirrored_write_op': 'int',
'qos_rate_limit_usec_per_read_op': 'int',
'qos_rate_limit_usec_per_write_op': 'int',
'queue_usec_per_mirrored_write_op': 'int',
'queue_usec_per_read_op': 'int',
'queue_usec_per_write_op': 'int',
'read_bytes_per_sec': 'int',
'reads_per_sec': 'int',
'san_usec_per_mirrored_write_op': 'int',
'san_usec_per_read_op': 'int',
'san_usec_per_write_op': 'int',
'service_usec_per_mirrored_write_op': 'int',
'service_usec_per_read_op': 'int',
'service_usec_per_write_op': 'int',
'time': 'int',
'usec_per_mirrored_write_op': 'int',
'usec_per_read_op': 'int',
'usec_per_write_op': 'int',
'write_bytes_per_sec': 'int',
'writes_per_sec': 'int',
'array': 'Resource'
}
attribute_map = {
'id': 'id',
'name': 'name',
'bytes_per_mirrored_write': 'bytes_per_mirrored_write',
'bytes_per_op': 'bytes_per_op',
'bytes_per_read': 'bytes_per_read',
'bytes_per_write': 'bytes_per_write',
'mirrored_write_bytes_per_sec': 'mirrored_write_bytes_per_sec',
'mirrored_writes_per_sec': 'mirrored_writes_per_sec',
'qos_rate_limit_usec_per_mirrored_write_op': 'qos_rate_limit_usec_per_mirrored_write_op',
'qos_rate_limit_usec_per_read_op': 'qos_rate_limit_usec_per_read_op',
'qos_rate_limit_usec_per_write_op': 'qos_rate_limit_usec_per_write_op',
'queue_usec_per_mirrored_write_op': 'queue_usec_per_mirrored_write_op',
'queue_usec_per_read_op': 'queue_usec_per_read_op',
'queue_usec_per_write_op': 'queue_usec_per_write_op',
'read_bytes_per_sec': 'read_bytes_per_sec',
'reads_per_sec': 'reads_per_sec',
'san_usec_per_mirrored_write_op': 'san_usec_per_mirrored_write_op',
'san_usec_per_read_op': 'san_usec_per_read_op',
'san_usec_per_write_op': 'san_usec_per_write_op',
'service_usec_per_mirrored_write_op': 'service_usec_per_mirrored_write_op',
'service_usec_per_read_op': 'service_usec_per_read_op',
'service_usec_per_write_op': 'service_usec_per_write_op',
'time': 'time',
'usec_per_mirrored_write_op': 'usec_per_mirrored_write_op',
'usec_per_read_op': 'usec_per_read_op',
'usec_per_write_op': 'usec_per_write_op',
'write_bytes_per_sec': 'write_bytes_per_sec',
'writes_per_sec': 'writes_per_sec',
'array': 'array'
}
required_args = {
}
def __init__(
self,
id=None, # type: str
name=None, # type: str
bytes_per_mirrored_write=None, # type: int
bytes_per_op=None, # type: int
bytes_per_read=None, # type: int
bytes_per_write=None, # type: int
mirrored_write_bytes_per_sec=None, # type: int
mirrored_writes_per_sec=None, # type: int
qos_rate_limit_usec_per_mirrored_write_op=None, # type: int
qos_rate_limit_usec_per_read_op=None, # type: int
qos_rate_limit_usec_per_write_op=None, # type: int
queue_usec_per_mirrored_write_op=None, # type: int
queue_usec_per_read_op=None, # type: int
queue_usec_per_write_op=None, # type: int
read_bytes_per_sec=None, # type: int
reads_per_sec=None, # type: int
san_usec_per_mirrored_write_op=None, # type: int
san_usec_per_read_op=None, # type: int
san_usec_per_write_op=None, # type: int
service_usec_per_mirrored_write_op=None, # type: int
service_usec_per_read_op=None, # type: int
service_usec_per_write_op=None, # type: int
time=None, # type: int
usec_per_mirrored_write_op=None, # type: int
usec_per_read_op=None, # type: int
usec_per_write_op=None, # type: int
write_bytes_per_sec=None, # type: int
writes_per_sec=None, # type: int
array=None, # type: models.Resource
):
"""
Keyword args:
id (str): A globally unique, system-generated ID. The ID cannot be modified and cannot refer to another resource.
name (str): A user-specified name. The name must be locally unique and can be changed.
bytes_per_mirrored_write (int): The average I/O size per mirrored write. Measured in bytes.
bytes_per_op (int): The average I/O size for both read and write (all) operations.
bytes_per_read (int): The average I/O size per read. Measured in bytes.
bytes_per_write (int): The average I/O size per write. Measured in bytes.
mirrored_write_bytes_per_sec (int): The number of mirrored bytes written per second.
mirrored_writes_per_sec (int): The number of mirrored writes per second.
qos_rate_limit_usec_per_mirrored_write_op (int): The average time it takes the array to process a mirrored I/O write request. Measured in microseconds.
qos_rate_limit_usec_per_read_op (int): The average time spent waiting due to QoS rate limiting for a read request. Measured in microseconds.
qos_rate_limit_usec_per_write_op (int): The average time that a write I/O request spends waiting as a result of the volume reaching its QoS bandwidth limit. Measured in microseconds.
queue_usec_per_mirrored_write_op (int): The average time that a mirrored write I/O request spends in the array waiting to be served. Measured in microseconds.
queue_usec_per_read_op (int): The average time that a read I/O request spends in the array waiting to be served. Measured in microseconds.
queue_usec_per_write_op (int): The average time that a write I/O request spends in the array waiting to be served. Measured in microseconds.
read_bytes_per_sec (int): The number of bytes read per second.
reads_per_sec (int): The number of read requests processed per second.
san_usec_per_mirrored_write_op (int): The average time required to transfer data from the initiator to the array for a mirrored write request. Measured in microseconds.
san_usec_per_read_op (int): The average time required to transfer data from the array to the initiator for a read request. Measured in microseconds.
san_usec_per_write_op (int): The average time required to transfer data from the initiator to the array for a write request. Measured in microseconds.
service_usec_per_mirrored_write_op (int): The average time required for the array to service a mirrored write request. Measured in microseconds.
service_usec_per_read_op (int): The average time required for the array to service a read request. Measured in microseconds.
service_usec_per_write_op (int): The average time required for the array to service a write request. Measured in microseconds.
time (int): The time when the sample performance data was taken. Measured in milliseconds since the UNIX epoch.
usec_per_mirrored_write_op (int): The average time it takes the array to process a mirrored I/O write request. Measured in microseconds. The average time does not include SAN time, queue time, or QoS rate limit time.
usec_per_read_op (int): The average time it takes the array to process an I/O read request. Measured in microseconds. The average time does not include SAN time, queue time, or QoS rate limit time.
usec_per_write_op (int): The average time it takes the array to process an I/O write request. Measured in microseconds. The average time does not include SAN time, queue time, or QoS rate limit time.
write_bytes_per_sec (int): The number of bytes written per second.
writes_per_sec (int): The number of write requests processed per second.
array (Resource): The array on which the performance metrics were recorded.
"""
if id is not None:
self.id = id
if name is not None:
self.name = name
if bytes_per_mirrored_write is not None:
self.bytes_per_mirrored_write = bytes_per_mirrored_write
if bytes_per_op is not None:
self.bytes_per_op = bytes_per_op
if bytes_per_read is not None:
self.bytes_per_read = bytes_per_read
if bytes_per_write is not None:
self.bytes_per_write = bytes_per_write
if mirrored_write_bytes_per_sec is not None:
self.mirrored_write_bytes_per_sec = mirrored_write_bytes_per_sec
if mirrored_writes_per_sec is not None:
self.mirrored_writes_per_sec = mirrored_writes_per_sec
if qos_rate_limit_usec_per_mirrored_write_op is not None:
self.qos_rate_limit_usec_per_mirrored_write_op = qos_rate_limit_usec_per_mirrored_write_op
if qos_rate_limit_usec_per_read_op is not None:
self.qos_rate_limit_usec_per_read_op = qos_rate_limit_usec_per_read_op
if qos_rate_limit_usec_per_write_op is not None:
self.qos_rate_limit_usec_per_write_op = qos_rate_limit_usec_per_write_op
if queue_usec_per_mirrored_write_op is not None:
self.queue_usec_per_mirrored_write_op = queue_usec_per_mirrored_write_op
if queue_usec_per_read_op is not None:
self.queue_usec_per_read_op = queue_usec_per_read_op
if queue_usec_per_write_op is not None:
self.queue_usec_per_write_op = queue_usec_per_write_op
if read_bytes_per_sec is not None:
self.read_bytes_per_sec = read_bytes_per_sec
if reads_per_sec is not None:
self.reads_per_sec = reads_per_sec
if san_usec_per_mirrored_write_op is not None:
self.san_usec_per_mirrored_write_op = san_usec_per_mirrored_write_op
if san_usec_per_read_op is not None:
self.san_usec_per_read_op = san_usec_per_read_op
if san_usec_per_write_op is not None:
self.san_usec_per_write_op = san_usec_per_write_op
if service_usec_per_mirrored_write_op is not None:
self.service_usec_per_mirrored_write_op = service_usec_per_mirrored_write_op
if service_usec_per_read_op is not None:
self.service_usec_per_read_op = service_usec_per_read_op
if service_usec_per_write_op is not None:
self.service_usec_per_write_op = service_usec_per_write_op
if time is not None:
self.time = time
if usec_per_mirrored_write_op is not None:
self.usec_per_mirrored_write_op = usec_per_mirrored_write_op
if usec_per_read_op is not None:
self.usec_per_read_op = usec_per_read_op
if usec_per_write_op is not None:
self.usec_per_write_op = usec_per_write_op
if write_bytes_per_sec is not None:
self.write_bytes_per_sec = write_bytes_per_sec
if writes_per_sec is not None:
self.writes_per_sec = writes_per_sec
if array is not None:
self.array = array
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `VolumePerformanceByArray`".format(key))
if key == "bytes_per_mirrored_write" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_mirrored_write`, must be a value greater than or equal to `0`")
if key == "bytes_per_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_op`, must be a value greater than or equal to `0`")
if key == "bytes_per_read" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_read`, must be a value greater than or equal to `0`")
if key == "bytes_per_write" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_write`, must be a value greater than or equal to `0`")
if key == "mirrored_write_bytes_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `mirrored_write_bytes_per_sec`, must be a value greater than or equal to `0`")
if key == "mirrored_writes_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `mirrored_writes_per_sec`, must be a value greater than or equal to `0`")
if key == "qos_rate_limit_usec_per_mirrored_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `qos_rate_limit_usec_per_mirrored_write_op`, must be a value greater than or equal to `0`")
if key == "qos_rate_limit_usec_per_read_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `qos_rate_limit_usec_per_read_op`, must be a value greater than or equal to `0`")
if key == "qos_rate_limit_usec_per_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `qos_rate_limit_usec_per_write_op`, must be a value greater than or equal to `0`")
if key == "queue_usec_per_mirrored_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `queue_usec_per_mirrored_write_op`, must be a value greater than or equal to `0`")
if key == "queue_usec_per_read_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `queue_usec_per_read_op`, must be a value greater than or equal to `0`")
if key == "queue_usec_per_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `queue_usec_per_write_op`, must be a value greater than or equal to `0`")
if key == "read_bytes_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `read_bytes_per_sec`, must be a value greater than or equal to `0`")
if key == "reads_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `reads_per_sec`, must be a value greater than or equal to `0`")
if key == "san_usec_per_mirrored_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `san_usec_per_mirrored_write_op`, must be a value greater than or equal to `0`")
if key == "san_usec_per_read_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `san_usec_per_read_op`, must be a value greater than or equal to `0`")
if key == "san_usec_per_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `san_usec_per_write_op`, must be a value greater than or equal to `0`")
if key == "service_usec_per_mirrored_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `service_usec_per_mirrored_write_op`, must be a value greater than or equal to `0`")
if key == "service_usec_per_read_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `service_usec_per_read_op`, must be a value greater than or equal to `0`")
if key == "service_usec_per_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `service_usec_per_write_op`, must be a value greater than or equal to `0`")
if key == "usec_per_mirrored_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `usec_per_mirrored_write_op`, must be a value greater than or equal to `0`")
if key == "usec_per_read_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `usec_per_read_op`, must be a value greater than or equal to `0`")
if key == "usec_per_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `usec_per_write_op`, must be a value greater than or equal to `0`")
if key == "write_bytes_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `write_bytes_per_sec`, must be a value greater than or equal to `0`")
if key == "writes_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `writes_per_sec`, must be a value greater than or equal to `0`")
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(VolumePerformanceByArray, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, VolumePerformanceByArray):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 55.211268 | 228 | 0.659847 |
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_3 import models
class VolumePerformanceByArray(object):
swagger_types = {
'id': 'str',
'name': 'str',
'bytes_per_mirrored_write': 'int',
'bytes_per_op': 'int',
'bytes_per_read': 'int',
'bytes_per_write': 'int',
'mirrored_write_bytes_per_sec': 'int',
'mirrored_writes_per_sec': 'int',
'qos_rate_limit_usec_per_mirrored_write_op': 'int',
'qos_rate_limit_usec_per_read_op': 'int',
'qos_rate_limit_usec_per_write_op': 'int',
'queue_usec_per_mirrored_write_op': 'int',
'queue_usec_per_read_op': 'int',
'queue_usec_per_write_op': 'int',
'read_bytes_per_sec': 'int',
'reads_per_sec': 'int',
'san_usec_per_mirrored_write_op': 'int',
'san_usec_per_read_op': 'int',
'san_usec_per_write_op': 'int',
'service_usec_per_mirrored_write_op': 'int',
'service_usec_per_read_op': 'int',
'service_usec_per_write_op': 'int',
'time': 'int',
'usec_per_mirrored_write_op': 'int',
'usec_per_read_op': 'int',
'usec_per_write_op': 'int',
'write_bytes_per_sec': 'int',
'writes_per_sec': 'int',
'array': 'Resource'
}
attribute_map = {
'id': 'id',
'name': 'name',
'bytes_per_mirrored_write': 'bytes_per_mirrored_write',
'bytes_per_op': 'bytes_per_op',
'bytes_per_read': 'bytes_per_read',
'bytes_per_write': 'bytes_per_write',
'mirrored_write_bytes_per_sec': 'mirrored_write_bytes_per_sec',
'mirrored_writes_per_sec': 'mirrored_writes_per_sec',
'qos_rate_limit_usec_per_mirrored_write_op': 'qos_rate_limit_usec_per_mirrored_write_op',
'qos_rate_limit_usec_per_read_op': 'qos_rate_limit_usec_per_read_op',
'qos_rate_limit_usec_per_write_op': 'qos_rate_limit_usec_per_write_op',
'queue_usec_per_mirrored_write_op': 'queue_usec_per_mirrored_write_op',
'queue_usec_per_read_op': 'queue_usec_per_read_op',
'queue_usec_per_write_op': 'queue_usec_per_write_op',
'read_bytes_per_sec': 'read_bytes_per_sec',
'reads_per_sec': 'reads_per_sec',
'san_usec_per_mirrored_write_op': 'san_usec_per_mirrored_write_op',
'san_usec_per_read_op': 'san_usec_per_read_op',
'san_usec_per_write_op': 'san_usec_per_write_op',
'service_usec_per_mirrored_write_op': 'service_usec_per_mirrored_write_op',
'service_usec_per_read_op': 'service_usec_per_read_op',
'service_usec_per_write_op': 'service_usec_per_write_op',
'time': 'time',
'usec_per_mirrored_write_op': 'usec_per_mirrored_write_op',
'usec_per_read_op': 'usec_per_read_op',
'usec_per_write_op': 'usec_per_write_op',
'write_bytes_per_sec': 'write_bytes_per_sec',
'writes_per_sec': 'writes_per_sec',
'array': 'array'
}
required_args = {
}
def __init__(
self,
id=None,
name=None,
bytes_per_mirrored_write=None,
bytes_per_op=None,
bytes_per_read=None,
bytes_per_write=None,
mirrored_write_bytes_per_sec=None,
mirrored_writes_per_sec=None,
qos_rate_limit_usec_per_mirrored_write_op=None,
qos_rate_limit_usec_per_read_op=None,
qos_rate_limit_usec_per_write_op=None,
queue_usec_per_mirrored_write_op=None,
queue_usec_per_read_op=None,
queue_usec_per_write_op=None,
read_bytes_per_sec=None,
reads_per_sec=None,
san_usec_per_mirrored_write_op=None,
san_usec_per_read_op=None,
san_usec_per_write_op=None,
service_usec_per_mirrored_write_op=None,
service_usec_per_read_op=None,
service_usec_per_write_op=None,
time=None,
usec_per_mirrored_write_op=None,
usec_per_read_op=None,
usec_per_write_op=None,
write_bytes_per_sec=None,
writes_per_sec=None,
array=None,
):
if id is not None:
self.id = id
if name is not None:
self.name = name
if bytes_per_mirrored_write is not None:
self.bytes_per_mirrored_write = bytes_per_mirrored_write
if bytes_per_op is not None:
self.bytes_per_op = bytes_per_op
if bytes_per_read is not None:
self.bytes_per_read = bytes_per_read
if bytes_per_write is not None:
self.bytes_per_write = bytes_per_write
if mirrored_write_bytes_per_sec is not None:
self.mirrored_write_bytes_per_sec = mirrored_write_bytes_per_sec
if mirrored_writes_per_sec is not None:
self.mirrored_writes_per_sec = mirrored_writes_per_sec
if qos_rate_limit_usec_per_mirrored_write_op is not None:
self.qos_rate_limit_usec_per_mirrored_write_op = qos_rate_limit_usec_per_mirrored_write_op
if qos_rate_limit_usec_per_read_op is not None:
self.qos_rate_limit_usec_per_read_op = qos_rate_limit_usec_per_read_op
if qos_rate_limit_usec_per_write_op is not None:
self.qos_rate_limit_usec_per_write_op = qos_rate_limit_usec_per_write_op
if queue_usec_per_mirrored_write_op is not None:
self.queue_usec_per_mirrored_write_op = queue_usec_per_mirrored_write_op
if queue_usec_per_read_op is not None:
self.queue_usec_per_read_op = queue_usec_per_read_op
if queue_usec_per_write_op is not None:
self.queue_usec_per_write_op = queue_usec_per_write_op
if read_bytes_per_sec is not None:
self.read_bytes_per_sec = read_bytes_per_sec
if reads_per_sec is not None:
self.reads_per_sec = reads_per_sec
if san_usec_per_mirrored_write_op is not None:
self.san_usec_per_mirrored_write_op = san_usec_per_mirrored_write_op
if san_usec_per_read_op is not None:
self.san_usec_per_read_op = san_usec_per_read_op
if san_usec_per_write_op is not None:
self.san_usec_per_write_op = san_usec_per_write_op
if service_usec_per_mirrored_write_op is not None:
self.service_usec_per_mirrored_write_op = service_usec_per_mirrored_write_op
if service_usec_per_read_op is not None:
self.service_usec_per_read_op = service_usec_per_read_op
if service_usec_per_write_op is not None:
self.service_usec_per_write_op = service_usec_per_write_op
if time is not None:
self.time = time
if usec_per_mirrored_write_op is not None:
self.usec_per_mirrored_write_op = usec_per_mirrored_write_op
if usec_per_read_op is not None:
self.usec_per_read_op = usec_per_read_op
if usec_per_write_op is not None:
self.usec_per_write_op = usec_per_write_op
if write_bytes_per_sec is not None:
self.write_bytes_per_sec = write_bytes_per_sec
if writes_per_sec is not None:
self.writes_per_sec = writes_per_sec
if array is not None:
self.array = array
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `VolumePerformanceByArray`".format(key))
if key == "bytes_per_mirrored_write" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_mirrored_write`, must be a value greater than or equal to `0`")
if key == "bytes_per_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_op`, must be a value greater than or equal to `0`")
if key == "bytes_per_read" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_read`, must be a value greater than or equal to `0`")
if key == "bytes_per_write" and value is not None:
if value < 0:
raise ValueError("Invalid value for `bytes_per_write`, must be a value greater than or equal to `0`")
if key == "mirrored_write_bytes_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `mirrored_write_bytes_per_sec`, must be a value greater than or equal to `0`")
if key == "mirrored_writes_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `mirrored_writes_per_sec`, must be a value greater than or equal to `0`")
if key == "qos_rate_limit_usec_per_mirrored_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `qos_rate_limit_usec_per_mirrored_write_op`, must be a value greater than or equal to `0`")
if key == "qos_rate_limit_usec_per_read_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `qos_rate_limit_usec_per_read_op`, must be a value greater than or equal to `0`")
if key == "qos_rate_limit_usec_per_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `qos_rate_limit_usec_per_write_op`, must be a value greater than or equal to `0`")
if key == "queue_usec_per_mirrored_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `queue_usec_per_mirrored_write_op`, must be a value greater than or equal to `0`")
if key == "queue_usec_per_read_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `queue_usec_per_read_op`, must be a value greater than or equal to `0`")
if key == "queue_usec_per_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `queue_usec_per_write_op`, must be a value greater than or equal to `0`")
if key == "read_bytes_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `read_bytes_per_sec`, must be a value greater than or equal to `0`")
if key == "reads_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `reads_per_sec`, must be a value greater than or equal to `0`")
if key == "san_usec_per_mirrored_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `san_usec_per_mirrored_write_op`, must be a value greater than or equal to `0`")
if key == "san_usec_per_read_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `san_usec_per_read_op`, must be a value greater than or equal to `0`")
if key == "san_usec_per_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `san_usec_per_write_op`, must be a value greater than or equal to `0`")
if key == "service_usec_per_mirrored_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `service_usec_per_mirrored_write_op`, must be a value greater than or equal to `0`")
if key == "service_usec_per_read_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `service_usec_per_read_op`, must be a value greater than or equal to `0`")
if key == "service_usec_per_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `service_usec_per_write_op`, must be a value greater than or equal to `0`")
if key == "usec_per_mirrored_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `usec_per_mirrored_write_op`, must be a value greater than or equal to `0`")
if key == "usec_per_read_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `usec_per_read_op`, must be a value greater than or equal to `0`")
if key == "usec_per_write_op" and value is not None:
if value < 0:
raise ValueError("Invalid value for `usec_per_write_op`, must be a value greater than or equal to `0`")
if key == "write_bytes_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `write_bytes_per_sec`, must be a value greater than or equal to `0`")
if key == "writes_per_sec" and value is not None:
if value < 0:
raise ValueError("Invalid value for `writes_per_sec`, must be a value greater than or equal to `0`")
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(VolumePerformanceByArray, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, VolumePerformanceByArray):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f72116c79003469c2b0e2b7eb8a18e69c2918151 | 3,600 | py | Python | src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/shared/python/stepfunctions.py | ikben/aws-deployment-framework | 9a32492209d35660b9ece66211eb200b64dc0ef9 | [
"Apache-2.0"
] | 1 | 2022-03-24T10:43:53.000Z | 2022-03-24T10:43:53.000Z | src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/shared/python/stepfunctions.py | thomasmcgannon/aws-deployment-framework | 0723ddf4eaf55888ae780dc48873f0ec4766cfbd | [
"Apache-2.0"
] | null | null | null | src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/shared/python/stepfunctions.py | thomasmcgannon/aws-deployment-framework | 0723ddf4eaf55888ae780dc48873f0ec4766cfbd | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
"""
Step Functions module used throughout the ADF
"""
import json
from time import sleep
from logger import configure_logger
from partition import get_partition
LOGGER = configure_logger(__name__)
class StepFunctions:
"""
Class used for modeling Step Functions
"""
def __init__(
self,
role,
deployment_account_id,
deployment_account_region,
regions,
account_ids=None,
full_path=None,
update_pipelines_only=0,
error=0
):
self.deployment_account_region = deployment_account_region
self.client = role.client(
'stepfunctions',
region_name=self.deployment_account_region
)
self.regions = regions
self.deployment_account_id = deployment_account_id
self.update_pipelines_only = update_pipelines_only
self.account_ids = account_ids
self.execution_arn = None
self.full_path = full_path
self.execution_status = None
self.error = error
def execute_statemachine(self):
"""
Main entry to executed state machine in Deployment Account
"""
self._start_statemachine()
self._wait_state_machine_execution()
def _start_statemachine(self):
"""
Executes the Update Cross Account IAM Step Function in the Deployment Account
"""
partition = get_partition(self.deployment_account_region)
self.execution_arn = self.client.start_execution(
stateMachineArn=(
f"arn:{partition}:states:{self.deployment_account_region}:"
f"{self.deployment_account_id}:stateMachine:EnableCrossAccountAccess"
),
input=json.dumps({
"deployment_account_region": self.deployment_account_region,
"deployment_account_id": self.deployment_account_id,
"account_ids": self.account_ids,
"regions": self.regions,
"full_path": self.full_path,
"update_only": self.update_pipelines_only,
"error": self.error
})
).get('executionArn')
self._fetch_statemachine_status()
@property
def execution_status(self):
"""
Returns the status of the state machine
"""
return self._execution_status
@execution_status.setter
def execution_status(self, execution_status):
"""
Set the status of the state machine
"""
self._execution_status = execution_status
def _fetch_statemachine_status(self):
"""
Get the current status of the state machine
"""
execution = self.client.describe_execution(
executionArn=self.execution_arn
)
self._execution_status = execution.get('status', None)
# Is there a legit waiter for this?
def _wait_state_machine_execution(self):
"""
Waits until the state machine is complete
"""
while self.execution_status == 'RUNNING':
self._fetch_statemachine_status()
sleep(10) # Wait for 10 seconds and check the status again
if self.execution_status in ('FAILED', 'ABORTED', 'TIMED_OUT'):
raise Exception(
f'State Machine on Deployment account {self.deployment_account_id} '
f'has status: {self.execution_status}, see logs'
)
| 31.578947 | 85 | 0.621944 |
import json
from time import sleep
from logger import configure_logger
from partition import get_partition
LOGGER = configure_logger(__name__)
class StepFunctions:
def __init__(
self,
role,
deployment_account_id,
deployment_account_region,
regions,
account_ids=None,
full_path=None,
update_pipelines_only=0,
error=0
):
self.deployment_account_region = deployment_account_region
self.client = role.client(
'stepfunctions',
region_name=self.deployment_account_region
)
self.regions = regions
self.deployment_account_id = deployment_account_id
self.update_pipelines_only = update_pipelines_only
self.account_ids = account_ids
self.execution_arn = None
self.full_path = full_path
self.execution_status = None
self.error = error
def execute_statemachine(self):
self._start_statemachine()
self._wait_state_machine_execution()
def _start_statemachine(self):
partition = get_partition(self.deployment_account_region)
self.execution_arn = self.client.start_execution(
stateMachineArn=(
f"arn:{partition}:states:{self.deployment_account_region}:"
f"{self.deployment_account_id}:stateMachine:EnableCrossAccountAccess"
),
input=json.dumps({
"deployment_account_region": self.deployment_account_region,
"deployment_account_id": self.deployment_account_id,
"account_ids": self.account_ids,
"regions": self.regions,
"full_path": self.full_path,
"update_only": self.update_pipelines_only,
"error": self.error
})
).get('executionArn')
self._fetch_statemachine_status()
@property
def execution_status(self):
return self._execution_status
@execution_status.setter
def execution_status(self, execution_status):
self._execution_status = execution_status
def _fetch_statemachine_status(self):
execution = self.client.describe_execution(
executionArn=self.execution_arn
)
self._execution_status = execution.get('status', None)
def _wait_state_machine_execution(self):
while self.execution_status == 'RUNNING':
self._fetch_statemachine_status()
sleep(10)
if self.execution_status in ('FAILED', 'ABORTED', 'TIMED_OUT'):
raise Exception(
f'State Machine on Deployment account {self.deployment_account_id} '
f'has status: {self.execution_status}, see logs'
)
| true | true |
f721174bebba042d3b37612296998e084c86fde8 | 918 | py | Python | apps/cli/utils/merge_yaml_sources.py | derekmerck/DIANA | 5553265b8fc822b35848d0966b25b93b99d503fb | [
"MIT"
] | 9 | 2018-03-15T19:10:27.000Z | 2021-03-15T21:01:24.000Z | apps/cli/utils/merge_yaml_sources.py | derekmerck/DIANA | 5553265b8fc822b35848d0966b25b93b99d503fb | [
"MIT"
] | null | null | null | apps/cli/utils/merge_yaml_sources.py | derekmerck/DIANA | 5553265b8fc822b35848d0966b25b93b99d503fb | [
"MIT"
] | 2 | 2018-03-15T19:13:22.000Z | 2018-04-18T16:33:33.000Z | import os, logging
from glob import glob
from pprint import pformat
import yaml
"""
Env var expansion and merge data from:
- input in yaml/json format
- input file or dir of files in yaml/json format
"""
def merge_yaml_sources(data=None, path=None):
result = {}
if data:
data_exp = os.path.expandvars(data)
result = yaml.safe_load(data_exp)
if os.path.isfile(path):
with open(path) as f:
finput_exp = os.path.expandvars(f.read())
result.update(yaml.safe_load(finput_exp))
elif os.path.isdir(path):
fps = glob(os.path.join(path, "*.yml"))
for fp in fps:
with open(fp) as f:
finput_exp = os.path.expandvars(f.read())
result.update(yaml.safe_load(finput_exp))
logging.debug("Merged yaml maps")
logging.debug("===================")
logging.debug(pformat(result))
return result | 27 | 57 | 0.615468 | import os, logging
from glob import glob
from pprint import pformat
import yaml
def merge_yaml_sources(data=None, path=None):
result = {}
if data:
data_exp = os.path.expandvars(data)
result = yaml.safe_load(data_exp)
if os.path.isfile(path):
with open(path) as f:
finput_exp = os.path.expandvars(f.read())
result.update(yaml.safe_load(finput_exp))
elif os.path.isdir(path):
fps = glob(os.path.join(path, "*.yml"))
for fp in fps:
with open(fp) as f:
finput_exp = os.path.expandvars(f.read())
result.update(yaml.safe_load(finput_exp))
logging.debug("Merged yaml maps")
logging.debug("===================")
logging.debug(pformat(result))
return result | true | true |
f7211765b08d783a5f129616815fe2035703ff38 | 25,215 | py | Python | neutron/agent/l3/router_info.py | markmcclain/neutron | 3108d2dece0501dbb661e2f5a4bb530a199f9fde | [
"Apache-2.0"
] | 3 | 2016-08-07T01:25:54.000Z | 2021-03-01T10:19:14.000Z | neutron/agent/l3/router_info.py | cyysu/neutron_read | 07d1a526d7d44ad0207d27e0ee04f1582541ab89 | [
"Apache-2.0"
] | null | null | null | neutron/agent/l3/router_info.py | cyysu/neutron_read | 07d1a526d7d44ad0207d27e0ee04f1582541ab89 | [
"Apache-2.0"
] | 2 | 2016-09-10T13:21:10.000Z | 2016-12-23T01:44:53.000Z | # Copyright (c) 2014 Openstack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from oslo_log import log as logging
from neutron.agent.l3 import namespaces
from neutron.agent.linux import ip_lib
from neutron.agent.linux import iptables_manager
from neutron.agent.linux import ra
from neutron.common import constants as l3_constants
from neutron.common import exceptions as n_exc
from neutron.common import utils as common_utils
from neutron.i18n import _LW
LOG = logging.getLogger(__name__)
INTERNAL_DEV_PREFIX = namespaces.INTERNAL_DEV_PREFIX
EXTERNAL_DEV_PREFIX = namespaces.EXTERNAL_DEV_PREFIX
EXTERNAL_INGRESS_MARK_MASK = '0xffffffff'
class RouterInfo(object):
def __init__(self,
router_id,
router,
agent_conf,
interface_driver,
use_ipv6=False):
self.router_id = router_id
self.ex_gw_port = None
self._snat_enabled = None
self._snat_action = None
self.internal_ports = []
self.floating_ips = set()
# Invoke the setter for establishing initial SNAT action
self.router = router
self.use_ipv6 = use_ipv6
self.ns_name = None
self.router_namespace = None
if agent_conf.use_namespaces:
ns = namespaces.RouterNamespace(
router_id, agent_conf, interface_driver, use_ipv6)
self.router_namespace = ns
self.ns_name = ns.name
self.iptables_manager = iptables_manager.IptablesManager(
use_ipv6=use_ipv6,
namespace=self.ns_name)
self.routes = []
self.agent_conf = agent_conf
self.driver = interface_driver
# radvd is a neutron.agent.linux.ra.DaemonMonitor
self.radvd = None
def initialize(self, process_monitor):
"""Initialize the router on the system.
This differs from __init__ in that this method actually affects the
system creating namespaces, starting processes, etc. The other merely
initializes the python object. This separates in-memory object
initialization from methods that actually go do stuff to the system.
:param process_monitor: The agent's process monitor instance.
"""
self.process_monitor = process_monitor
self.radvd = ra.DaemonMonitor(self.router_id,
self.ns_name,
process_monitor,
self.get_internal_device_name)
if self.router_namespace:
self.router_namespace.create()
@property
def router(self):
return self._router
@router.setter
def router(self, value):
self._router = value
if not self._router:
return
# enable_snat by default if it wasn't specified by plugin
self._snat_enabled = self._router.get('enable_snat', True)
# Set a SNAT action for the router
if self._router.get('gw_port'):
self._snat_action = ('add_rules' if self._snat_enabled
else 'remove_rules')
elif self.ex_gw_port:
# Gateway port was removed, remove rules
self._snat_action = 'remove_rules'
@property
def is_ha(self):
# TODO(Carl) Refactoring should render this obsolete. Remove it.
return False
def get_internal_device_name(self, port_id):
return (INTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN]
def get_external_device_name(self, port_id):
return (EXTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN]
def get_external_device_interface_name(self, ex_gw_port):
return self.get_external_device_name(ex_gw_port['id'])
def perform_snat_action(self, snat_callback, *args):
# Process SNAT rules for attached subnets
if self._snat_action:
snat_callback(self._router.get('gw_port'),
*args,
action=self._snat_action)
self._snat_action = None
def _update_routing_table(self, operation, route):
cmd = ['ip', 'route', operation, 'to', route['destination'],
'via', route['nexthop']]
ip_wrapper = ip_lib.IPWrapper(namespace=self.ns_name)
ip_wrapper.netns.execute(cmd, check_exit_code=False)
def routes_updated(self):
new_routes = self.router['routes']
old_routes = self.routes
adds, removes = common_utils.diff_list_of_dict(old_routes,
new_routes)
for route in adds:
LOG.debug("Added route entry is '%s'", route)
# remove replaced route from deleted route
for del_route in removes:
if route['destination'] == del_route['destination']:
removes.remove(del_route)
#replace success even if there is no existing route
self._update_routing_table('replace', route)
for route in removes:
LOG.debug("Removed route entry is '%s'", route)
self._update_routing_table('delete', route)
self.routes = new_routes
def get_ex_gw_port(self):
return self.router.get('gw_port')
def get_floating_ips(self):
"""Filter Floating IPs to be hosted on this agent."""
return self.router.get(l3_constants.FLOATINGIP_KEY, [])
def floating_forward_rules(self, floating_ip, fixed_ip):
return [('PREROUTING', '-d %s -j DNAT --to %s' %
(floating_ip, fixed_ip)),
('OUTPUT', '-d %s -j DNAT --to %s' %
(floating_ip, fixed_ip)),
('float-snat', '-s %s -j SNAT --to %s' %
(fixed_ip, floating_ip))]
def process_floating_ip_nat_rules(self):
"""Configure NAT rules for the router's floating IPs.
Configures iptables rules for the floating ips of the given router
"""
# Clear out all iptables rules for floating ips
self.iptables_manager.ipv4['nat'].clear_rules_by_tag('floating_ip')
floating_ips = self.get_floating_ips()
# Loop once to ensure that floating ips are configured.
for fip in floating_ips:
# Rebuild iptables rules for the floating ip.
fixed = fip['fixed_ip_address']
fip_ip = fip['floating_ip_address']
for chain, rule in self.floating_forward_rules(fip_ip, fixed):
self.iptables_manager.ipv4['nat'].add_rule(chain, rule,
tag='floating_ip')
self.iptables_manager.apply()
def process_snat_dnat_for_fip(self):
try:
self.process_floating_ip_nat_rules()
except Exception:
# TODO(salv-orlando): Less broad catching
raise n_exc.FloatingIpSetupException(
'L3 agent failure to setup NAT for floating IPs')
def _add_fip_addr_to_device(self, fip, device):
"""Configures the floating ip address on the device.
"""
try:
ip_cidr = common_utils.ip_to_cidr(fip['floating_ip_address'])
device.addr.add(ip_cidr)
return True
except RuntimeError:
# any exception occurred here should cause the floating IP
# to be set in error state
LOG.warn(_LW("Unable to configure IP address for "
"floating IP: %s"), fip['id'])
def add_floating_ip(self, fip, interface_name, device):
raise NotImplementedError()
def remove_floating_ip(self, device, ip_cidr):
device.addr.delete(ip_cidr)
self.driver.delete_conntrack_state(namespace=self.ns_name, ip=ip_cidr)
def get_router_cidrs(self, device):
return set([addr['cidr'] for addr in device.addr.list()])
def process_floating_ip_addresses(self, interface_name):
"""Configure IP addresses on router's external gateway interface.
Ensures addresses for existing floating IPs and cleans up
those that should not longer be configured.
"""
fip_statuses = {}
if interface_name is None:
LOG.debug('No Interface for floating IPs router: %s',
self.router['id'])
return fip_statuses
device = ip_lib.IPDevice(interface_name, namespace=self.ns_name)
existing_cidrs = self.get_router_cidrs(device)
new_cidrs = set()
floating_ips = self.get_floating_ips()
# Loop once to ensure that floating ips are configured.
for fip in floating_ips:
fip_ip = fip['floating_ip_address']
ip_cidr = common_utils.ip_to_cidr(fip_ip)
new_cidrs.add(ip_cidr)
fip_statuses[fip['id']] = l3_constants.FLOATINGIP_STATUS_ACTIVE
if ip_cidr not in existing_cidrs:
fip_statuses[fip['id']] = self.add_floating_ip(
fip, interface_name, device)
LOG.debug('Floating ip %(id)s added, status %(status)s',
{'id': fip['id'],
'status': fip_statuses.get(fip['id'])})
fips_to_remove = (
ip_cidr for ip_cidr in existing_cidrs - new_cidrs
if common_utils.is_cidr_host(ip_cidr))
for ip_cidr in fips_to_remove:
self.remove_floating_ip(device, ip_cidr)
return fip_statuses
def configure_fip_addresses(self, interface_name):
try:
return self.process_floating_ip_addresses(interface_name)
except Exception:
# TODO(salv-orlando): Less broad catching
raise n_exc.FloatingIpSetupException('L3 agent failure to setup '
'floating IPs')
def put_fips_in_error_state(self):
fip_statuses = {}
for fip in self.router.get(l3_constants.FLOATINGIP_KEY, []):
fip_statuses[fip['id']] = l3_constants.FLOATINGIP_STATUS_ERROR
return fip_statuses
def delete(self, agent):
self.router['gw_port'] = None
self.router[l3_constants.INTERFACE_KEY] = []
self.router[l3_constants.FLOATINGIP_KEY] = []
self.process(agent)
self.radvd.disable()
if self.router_namespace:
self.router_namespace.delete()
def _internal_network_added(self, ns_name, network_id, port_id,
fixed_ips, mac_address,
interface_name, prefix):
if not ip_lib.device_exists(interface_name,
namespace=ns_name):
self.driver.plug(network_id, port_id, interface_name, mac_address,
namespace=ns_name,
prefix=prefix)
ip_cidrs = common_utils.fixed_ip_cidrs(fixed_ips)
self.driver.init_l3(interface_name, ip_cidrs, namespace=ns_name)
for fixed_ip in fixed_ips:
ip_lib.send_gratuitous_arp(ns_name,
interface_name,
fixed_ip['ip_address'],
self.agent_conf.send_arp_for_ha)
def internal_network_added(self, port):
network_id = port['network_id']
port_id = port['id']
fixed_ips = port['fixed_ips']
mac_address = port['mac_address']
interface_name = self.get_internal_device_name(port_id)
self._internal_network_added(self.ns_name,
network_id,
port_id,
fixed_ips,
mac_address,
interface_name,
INTERNAL_DEV_PREFIX)
def internal_network_removed(self, port):
interface_name = self.get_internal_device_name(port['id'])
if ip_lib.device_exists(interface_name, namespace=self.ns_name):
self.driver.unplug(interface_name, namespace=self.ns_name,
prefix=INTERNAL_DEV_PREFIX)
def _get_existing_devices(self):
ip_wrapper = ip_lib.IPWrapper(namespace=self.ns_name)
ip_devs = ip_wrapper.get_devices(exclude_loopback=True)
return [ip_dev.name for ip_dev in ip_devs]
def _process_internal_ports(self):
existing_port_ids = set(p['id'] for p in self.internal_ports)
internal_ports = self.router.get(l3_constants.INTERFACE_KEY, [])
current_port_ids = set(p['id'] for p in internal_ports
if p['admin_state_up'])
new_port_ids = current_port_ids - existing_port_ids
new_ports = [p for p in internal_ports if p['id'] in new_port_ids]
old_ports = [p for p in self.internal_ports
if p['id'] not in current_port_ids]
new_ipv6_port = False
old_ipv6_port = False
for p in new_ports:
self.internal_network_added(p)
self.internal_ports.append(p)
if not new_ipv6_port:
for subnet in p['subnets']:
if netaddr.IPNetwork(subnet['cidr']).version == 6:
new_ipv6_port = True
break
for p in old_ports:
self.internal_network_removed(p)
self.internal_ports.remove(p)
if not old_ipv6_port:
for subnet in p['subnets']:
if netaddr.IPNetwork(subnet['cidr']).version == 6:
old_ipv6_port = True
break
# Enable RA
if new_ipv6_port or old_ipv6_port:
self.radvd.enable(internal_ports)
existing_devices = self._get_existing_devices()
current_internal_devs = set(n for n in existing_devices
if n.startswith(INTERNAL_DEV_PREFIX))
current_port_devs = set(self.get_internal_device_name(port_id)
for port_id in current_port_ids)
stale_devs = current_internal_devs - current_port_devs
for stale_dev in stale_devs:
LOG.debug('Deleting stale internal router device: %s',
stale_dev)
self.driver.unplug(stale_dev,
namespace=self.ns_name,
prefix=INTERNAL_DEV_PREFIX)
def _list_floating_ip_cidrs(self):
# Compute a list of addresses this router is supposed to have.
# This avoids unnecessarily removing those addresses and
# causing a momentarily network outage.
floating_ips = self.get_floating_ips()
return [common_utils.ip_to_cidr(ip['floating_ip_address'])
for ip in floating_ips]
def _plug_external_gateway(self, ex_gw_port, interface_name, ns_name):
if not ip_lib.device_exists(interface_name, namespace=ns_name):
self.driver.plug(ex_gw_port['network_id'],
ex_gw_port['id'],
interface_name,
ex_gw_port['mac_address'],
bridge=self.agent_conf.external_network_bridge,
namespace=ns_name,
prefix=EXTERNAL_DEV_PREFIX)
def _external_gateway_added(self, ex_gw_port, interface_name,
ns_name, preserve_ips):
self._plug_external_gateway(ex_gw_port, interface_name, ns_name)
# Build up the interface and gateway IP addresses that
# will be added to the interface.
ip_cidrs = common_utils.fixed_ip_cidrs(ex_gw_port['fixed_ips'])
gateway_ips = []
enable_ra_on_gw = False
if 'subnets' in ex_gw_port:
gateway_ips = [subnet['gateway_ip']
for subnet in ex_gw_port['subnets']
if subnet['gateway_ip']]
if self.use_ipv6 and not self.is_v6_gateway_set(gateway_ips):
# No IPv6 gateway is available, but IPv6 is enabled.
if self.agent_conf.ipv6_gateway:
# ipv6_gateway configured, use address for default route.
gateway_ips.append(self.agent_conf.ipv6_gateway)
else:
# ipv6_gateway is also not configured.
# Use RA for default route.
enable_ra_on_gw = True
self.driver.init_l3(interface_name,
ip_cidrs,
namespace=ns_name,
gateway_ips=gateway_ips,
extra_subnets=ex_gw_port.get('extra_subnets', []),
preserve_ips=preserve_ips,
enable_ra_on_gw=enable_ra_on_gw)
for fixed_ip in ex_gw_port['fixed_ips']:
ip_lib.send_gratuitous_arp(ns_name,
interface_name,
fixed_ip['ip_address'],
self.agent_conf.send_arp_for_ha)
def is_v6_gateway_set(self, gateway_ips):
"""Check to see if list of gateway_ips has an IPv6 gateway.
"""
# Note - don't require a try-except here as all
# gateway_ips elements are valid addresses, if they exist.
return any(netaddr.IPAddress(gw_ip).version == 6
for gw_ip in gateway_ips)
def external_gateway_added(self, ex_gw_port, interface_name):
preserve_ips = self._list_floating_ip_cidrs()
self._external_gateway_added(
ex_gw_port, interface_name, self.ns_name, preserve_ips)
def external_gateway_updated(self, ex_gw_port, interface_name):
preserve_ips = self._list_floating_ip_cidrs()
self._external_gateway_added(
ex_gw_port, interface_name, self.ns_name, preserve_ips)
def external_gateway_removed(self, ex_gw_port, interface_name):
self.driver.unplug(interface_name,
bridge=self.agent_conf.external_network_bridge,
namespace=self.ns_name,
prefix=EXTERNAL_DEV_PREFIX)
def _process_external_gateway(self, ex_gw_port):
# TODO(Carl) Refactor to clarify roles of ex_gw_port vs self.ex_gw_port
ex_gw_port_id = (ex_gw_port and ex_gw_port['id'] or
self.ex_gw_port and self.ex_gw_port['id'])
interface_name = None
if ex_gw_port_id:
interface_name = self.get_external_device_name(ex_gw_port_id)
if ex_gw_port:
def _gateway_ports_equal(port1, port2):
def _get_filtered_dict(d, ignore):
return dict((k, v) for k, v in d.iteritems()
if k not in ignore)
keys_to_ignore = set(['binding:host_id'])
port1_filtered = _get_filtered_dict(port1, keys_to_ignore)
port2_filtered = _get_filtered_dict(port2, keys_to_ignore)
return port1_filtered == port2_filtered
if not self.ex_gw_port:
self.external_gateway_added(ex_gw_port, interface_name)
elif not _gateway_ports_equal(ex_gw_port, self.ex_gw_port):
self.external_gateway_updated(ex_gw_port, interface_name)
elif not ex_gw_port and self.ex_gw_port:
self.external_gateway_removed(self.ex_gw_port, interface_name)
existing_devices = self._get_existing_devices()
stale_devs = [dev for dev in existing_devices
if dev.startswith(EXTERNAL_DEV_PREFIX)
and dev != interface_name]
for stale_dev in stale_devs:
LOG.debug('Deleting stale external router device: %s', stale_dev)
self.driver.unplug(stale_dev,
bridge=self.agent_conf.external_network_bridge,
namespace=self.ns_name,
prefix=EXTERNAL_DEV_PREFIX)
# Process SNAT rules for external gateway
self.perform_snat_action(self._handle_router_snat_rules,
interface_name)
def external_gateway_nat_rules(self, ex_gw_ip, interface_name):
mark = self.agent_conf.external_ingress_mark
rules = [('POSTROUTING', '! -i %(interface_name)s '
'! -o %(interface_name)s -m conntrack ! '
'--ctstate DNAT -j ACCEPT' %
{'interface_name': interface_name}),
('snat', '-o %s -j SNAT --to-source %s' %
(interface_name, ex_gw_ip)),
('snat', '-m mark ! --mark %s '
'-m conntrack --ctstate DNAT '
'-j SNAT --to-source %s' % (mark, ex_gw_ip))]
return rules
def external_gateway_mangle_rules(self, interface_name):
mark = self.agent_conf.external_ingress_mark
rules = [('mark', '-i %s -j MARK --set-xmark %s/%s' %
(interface_name, mark, EXTERNAL_INGRESS_MARK_MASK))]
return rules
def _empty_snat_chains(self, iptables_manager):
iptables_manager.ipv4['nat'].empty_chain('POSTROUTING')
iptables_manager.ipv4['nat'].empty_chain('snat')
iptables_manager.ipv4['mangle'].empty_chain('mark')
def _add_snat_rules(self, ex_gw_port, iptables_manager,
interface_name, action):
if action == 'add_rules' and ex_gw_port:
# ex_gw_port should not be None in this case
# NAT rules are added only if ex_gw_port has an IPv4 address
for ip_addr in ex_gw_port['fixed_ips']:
ex_gw_ip = ip_addr['ip_address']
if netaddr.IPAddress(ex_gw_ip).version == 4:
rules = self.external_gateway_nat_rules(ex_gw_ip,
interface_name)
for rule in rules:
iptables_manager.ipv4['nat'].add_rule(*rule)
rules = self.external_gateway_mangle_rules(interface_name)
for rule in rules:
iptables_manager.ipv4['mangle'].add_rule(*rule)
break
def _handle_router_snat_rules(self, ex_gw_port,
interface_name, action):
self._empty_snat_chains(self.iptables_manager)
self.iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat')
self._add_snat_rules(ex_gw_port,
self.iptables_manager,
interface_name,
action)
def process_external(self, agent):
existing_floating_ips = self.floating_ips
try:
with self.iptables_manager.defer_apply():
ex_gw_port = self.get_ex_gw_port()
self._process_external_gateway(ex_gw_port)
# TODO(Carl) Return after setting existing_floating_ips and
# still call update_fip_statuses?
if not ex_gw_port:
return
# Process SNAT/DNAT rules and addresses for floating IPs
self.process_snat_dnat_for_fip()
# Once NAT rules for floating IPs are safely in place
# configure their addresses on the external gateway port
interface_name = self.get_external_device_interface_name(
ex_gw_port)
fip_statuses = self.configure_fip_addresses(interface_name)
except (n_exc.FloatingIpSetupException,
n_exc.IpTablesApplyException) as e:
# All floating IPs must be put in error state
LOG.exception(e)
fip_statuses = self.put_fips_in_error_state()
agent.update_fip_statuses(self, existing_floating_ips, fip_statuses)
@common_utils.exception_logger()
def process(self, agent):
"""Process updates to this router
This method is the point where the agent requests that updates be
applied to this router.
:param agent: Passes the agent in order to send RPC messages.
"""
self._process_internal_ports()
self.process_external(agent)
# Process static routes for router
self.routes_updated()
# Update ex_gw_port and enable_snat on the router info cache
self.ex_gw_port = self.get_ex_gw_port()
self.snat_ports = self.router.get(
l3_constants.SNAT_ROUTER_INTF_KEY, [])
self.enable_snat = self.router.get('enable_snat')
| 42.592905 | 79 | 0.599683 |
import netaddr
from oslo_log import log as logging
from neutron.agent.l3 import namespaces
from neutron.agent.linux import ip_lib
from neutron.agent.linux import iptables_manager
from neutron.agent.linux import ra
from neutron.common import constants as l3_constants
from neutron.common import exceptions as n_exc
from neutron.common import utils as common_utils
from neutron.i18n import _LW
LOG = logging.getLogger(__name__)
INTERNAL_DEV_PREFIX = namespaces.INTERNAL_DEV_PREFIX
EXTERNAL_DEV_PREFIX = namespaces.EXTERNAL_DEV_PREFIX
EXTERNAL_INGRESS_MARK_MASK = '0xffffffff'
class RouterInfo(object):
def __init__(self,
router_id,
router,
agent_conf,
interface_driver,
use_ipv6=False):
self.router_id = router_id
self.ex_gw_port = None
self._snat_enabled = None
self._snat_action = None
self.internal_ports = []
self.floating_ips = set()
self.router = router
self.use_ipv6 = use_ipv6
self.ns_name = None
self.router_namespace = None
if agent_conf.use_namespaces:
ns = namespaces.RouterNamespace(
router_id, agent_conf, interface_driver, use_ipv6)
self.router_namespace = ns
self.ns_name = ns.name
self.iptables_manager = iptables_manager.IptablesManager(
use_ipv6=use_ipv6,
namespace=self.ns_name)
self.routes = []
self.agent_conf = agent_conf
self.driver = interface_driver
self.radvd = None
def initialize(self, process_monitor):
self.process_monitor = process_monitor
self.radvd = ra.DaemonMonitor(self.router_id,
self.ns_name,
process_monitor,
self.get_internal_device_name)
if self.router_namespace:
self.router_namespace.create()
@property
def router(self):
return self._router
@router.setter
def router(self, value):
self._router = value
if not self._router:
return
self._snat_enabled = self._router.get('enable_snat', True)
# Set a SNAT action for the router
if self._router.get('gw_port'):
self._snat_action = ('add_rules' if self._snat_enabled
else 'remove_rules')
elif self.ex_gw_port:
# Gateway port was removed, remove rules
self._snat_action = 'remove_rules'
@property
def is_ha(self):
# TODO(Carl) Refactoring should render this obsolete. Remove it.
return False
def get_internal_device_name(self, port_id):
return (INTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN]
def get_external_device_name(self, port_id):
return (EXTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN]
def get_external_device_interface_name(self, ex_gw_port):
return self.get_external_device_name(ex_gw_port['id'])
def perform_snat_action(self, snat_callback, *args):
# Process SNAT rules for attached subnets
if self._snat_action:
snat_callback(self._router.get('gw_port'),
*args,
action=self._snat_action)
self._snat_action = None
def _update_routing_table(self, operation, route):
cmd = ['ip', 'route', operation, 'to', route['destination'],
'via', route['nexthop']]
ip_wrapper = ip_lib.IPWrapper(namespace=self.ns_name)
ip_wrapper.netns.execute(cmd, check_exit_code=False)
def routes_updated(self):
new_routes = self.router['routes']
old_routes = self.routes
adds, removes = common_utils.diff_list_of_dict(old_routes,
new_routes)
for route in adds:
LOG.debug("Added route entry is '%s'", route)
# remove replaced route from deleted route
for del_route in removes:
if route['destination'] == del_route['destination']:
removes.remove(del_route)
#replace success even if there is no existing route
self._update_routing_table('replace', route)
for route in removes:
LOG.debug("Removed route entry is '%s'", route)
self._update_routing_table('delete', route)
self.routes = new_routes
def get_ex_gw_port(self):
return self.router.get('gw_port')
def get_floating_ips(self):
return self.router.get(l3_constants.FLOATINGIP_KEY, [])
def floating_forward_rules(self, floating_ip, fixed_ip):
return [('PREROUTING', '-d %s -j DNAT --to %s' %
(floating_ip, fixed_ip)),
('OUTPUT', '-d %s -j DNAT --to %s' %
(floating_ip, fixed_ip)),
('float-snat', '-s %s -j SNAT --to %s' %
(fixed_ip, floating_ip))]
def process_floating_ip_nat_rules(self):
# Clear out all iptables rules for floating ips
self.iptables_manager.ipv4['nat'].clear_rules_by_tag('floating_ip')
floating_ips = self.get_floating_ips()
# Loop once to ensure that floating ips are configured.
for fip in floating_ips:
# Rebuild iptables rules for the floating ip.
fixed = fip['fixed_ip_address']
fip_ip = fip['floating_ip_address']
for chain, rule in self.floating_forward_rules(fip_ip, fixed):
self.iptables_manager.ipv4['nat'].add_rule(chain, rule,
tag='floating_ip')
self.iptables_manager.apply()
def process_snat_dnat_for_fip(self):
try:
self.process_floating_ip_nat_rules()
except Exception:
# TODO(salv-orlando): Less broad catching
raise n_exc.FloatingIpSetupException(
'L3 agent failure to setup NAT for floating IPs')
def _add_fip_addr_to_device(self, fip, device):
try:
ip_cidr = common_utils.ip_to_cidr(fip['floating_ip_address'])
device.addr.add(ip_cidr)
return True
except RuntimeError:
# any exception occurred here should cause the floating IP
# to be set in error state
LOG.warn(_LW("Unable to configure IP address for "
"floating IP: %s"), fip['id'])
def add_floating_ip(self, fip, interface_name, device):
raise NotImplementedError()
def remove_floating_ip(self, device, ip_cidr):
device.addr.delete(ip_cidr)
self.driver.delete_conntrack_state(namespace=self.ns_name, ip=ip_cidr)
def get_router_cidrs(self, device):
return set([addr['cidr'] for addr in device.addr.list()])
def process_floating_ip_addresses(self, interface_name):
fip_statuses = {}
if interface_name is None:
LOG.debug('No Interface for floating IPs router: %s',
self.router['id'])
return fip_statuses
device = ip_lib.IPDevice(interface_name, namespace=self.ns_name)
existing_cidrs = self.get_router_cidrs(device)
new_cidrs = set()
floating_ips = self.get_floating_ips()
# Loop once to ensure that floating ips are configured.
for fip in floating_ips:
fip_ip = fip['floating_ip_address']
ip_cidr = common_utils.ip_to_cidr(fip_ip)
new_cidrs.add(ip_cidr)
fip_statuses[fip['id']] = l3_constants.FLOATINGIP_STATUS_ACTIVE
if ip_cidr not in existing_cidrs:
fip_statuses[fip['id']] = self.add_floating_ip(
fip, interface_name, device)
LOG.debug('Floating ip %(id)s added, status %(status)s',
{'id': fip['id'],
'status': fip_statuses.get(fip['id'])})
fips_to_remove = (
ip_cidr for ip_cidr in existing_cidrs - new_cidrs
if common_utils.is_cidr_host(ip_cidr))
for ip_cidr in fips_to_remove:
self.remove_floating_ip(device, ip_cidr)
return fip_statuses
def configure_fip_addresses(self, interface_name):
try:
return self.process_floating_ip_addresses(interface_name)
except Exception:
# TODO(salv-orlando): Less broad catching
raise n_exc.FloatingIpSetupException('L3 agent failure to setup '
'floating IPs')
def put_fips_in_error_state(self):
fip_statuses = {}
for fip in self.router.get(l3_constants.FLOATINGIP_KEY, []):
fip_statuses[fip['id']] = l3_constants.FLOATINGIP_STATUS_ERROR
return fip_statuses
def delete(self, agent):
self.router['gw_port'] = None
self.router[l3_constants.INTERFACE_KEY] = []
self.router[l3_constants.FLOATINGIP_KEY] = []
self.process(agent)
self.radvd.disable()
if self.router_namespace:
self.router_namespace.delete()
def _internal_network_added(self, ns_name, network_id, port_id,
fixed_ips, mac_address,
interface_name, prefix):
if not ip_lib.device_exists(interface_name,
namespace=ns_name):
self.driver.plug(network_id, port_id, interface_name, mac_address,
namespace=ns_name,
prefix=prefix)
ip_cidrs = common_utils.fixed_ip_cidrs(fixed_ips)
self.driver.init_l3(interface_name, ip_cidrs, namespace=ns_name)
for fixed_ip in fixed_ips:
ip_lib.send_gratuitous_arp(ns_name,
interface_name,
fixed_ip['ip_address'],
self.agent_conf.send_arp_for_ha)
def internal_network_added(self, port):
network_id = port['network_id']
port_id = port['id']
fixed_ips = port['fixed_ips']
mac_address = port['mac_address']
interface_name = self.get_internal_device_name(port_id)
self._internal_network_added(self.ns_name,
network_id,
port_id,
fixed_ips,
mac_address,
interface_name,
INTERNAL_DEV_PREFIX)
def internal_network_removed(self, port):
interface_name = self.get_internal_device_name(port['id'])
if ip_lib.device_exists(interface_name, namespace=self.ns_name):
self.driver.unplug(interface_name, namespace=self.ns_name,
prefix=INTERNAL_DEV_PREFIX)
def _get_existing_devices(self):
ip_wrapper = ip_lib.IPWrapper(namespace=self.ns_name)
ip_devs = ip_wrapper.get_devices(exclude_loopback=True)
return [ip_dev.name for ip_dev in ip_devs]
def _process_internal_ports(self):
existing_port_ids = set(p['id'] for p in self.internal_ports)
internal_ports = self.router.get(l3_constants.INTERFACE_KEY, [])
current_port_ids = set(p['id'] for p in internal_ports
if p['admin_state_up'])
new_port_ids = current_port_ids - existing_port_ids
new_ports = [p for p in internal_ports if p['id'] in new_port_ids]
old_ports = [p for p in self.internal_ports
if p['id'] not in current_port_ids]
new_ipv6_port = False
old_ipv6_port = False
for p in new_ports:
self.internal_network_added(p)
self.internal_ports.append(p)
if not new_ipv6_port:
for subnet in p['subnets']:
if netaddr.IPNetwork(subnet['cidr']).version == 6:
new_ipv6_port = True
break
for p in old_ports:
self.internal_network_removed(p)
self.internal_ports.remove(p)
if not old_ipv6_port:
for subnet in p['subnets']:
if netaddr.IPNetwork(subnet['cidr']).version == 6:
old_ipv6_port = True
break
# Enable RA
if new_ipv6_port or old_ipv6_port:
self.radvd.enable(internal_ports)
existing_devices = self._get_existing_devices()
current_internal_devs = set(n for n in existing_devices
if n.startswith(INTERNAL_DEV_PREFIX))
current_port_devs = set(self.get_internal_device_name(port_id)
for port_id in current_port_ids)
stale_devs = current_internal_devs - current_port_devs
for stale_dev in stale_devs:
LOG.debug('Deleting stale internal router device: %s',
stale_dev)
self.driver.unplug(stale_dev,
namespace=self.ns_name,
prefix=INTERNAL_DEV_PREFIX)
def _list_floating_ip_cidrs(self):
# Compute a list of addresses this router is supposed to have.
# This avoids unnecessarily removing those addresses and
# causing a momentarily network outage.
floating_ips = self.get_floating_ips()
return [common_utils.ip_to_cidr(ip['floating_ip_address'])
for ip in floating_ips]
def _plug_external_gateway(self, ex_gw_port, interface_name, ns_name):
if not ip_lib.device_exists(interface_name, namespace=ns_name):
self.driver.plug(ex_gw_port['network_id'],
ex_gw_port['id'],
interface_name,
ex_gw_port['mac_address'],
bridge=self.agent_conf.external_network_bridge,
namespace=ns_name,
prefix=EXTERNAL_DEV_PREFIX)
def _external_gateway_added(self, ex_gw_port, interface_name,
ns_name, preserve_ips):
self._plug_external_gateway(ex_gw_port, interface_name, ns_name)
# Build up the interface and gateway IP addresses that
# will be added to the interface.
ip_cidrs = common_utils.fixed_ip_cidrs(ex_gw_port['fixed_ips'])
gateway_ips = []
enable_ra_on_gw = False
if 'subnets' in ex_gw_port:
gateway_ips = [subnet['gateway_ip']
for subnet in ex_gw_port['subnets']
if subnet['gateway_ip']]
if self.use_ipv6 and not self.is_v6_gateway_set(gateway_ips):
# No IPv6 gateway is available, but IPv6 is enabled.
if self.agent_conf.ipv6_gateway:
# ipv6_gateway configured, use address for default route.
gateway_ips.append(self.agent_conf.ipv6_gateway)
else:
# ipv6_gateway is also not configured.
# Use RA for default route.
enable_ra_on_gw = True
self.driver.init_l3(interface_name,
ip_cidrs,
namespace=ns_name,
gateway_ips=gateway_ips,
extra_subnets=ex_gw_port.get('extra_subnets', []),
preserve_ips=preserve_ips,
enable_ra_on_gw=enable_ra_on_gw)
for fixed_ip in ex_gw_port['fixed_ips']:
ip_lib.send_gratuitous_arp(ns_name,
interface_name,
fixed_ip['ip_address'],
self.agent_conf.send_arp_for_ha)
def is_v6_gateway_set(self, gateway_ips):
# Note - don't require a try-except here as all
return any(netaddr.IPAddress(gw_ip).version == 6
for gw_ip in gateway_ips)
def external_gateway_added(self, ex_gw_port, interface_name):
preserve_ips = self._list_floating_ip_cidrs()
self._external_gateway_added(
ex_gw_port, interface_name, self.ns_name, preserve_ips)
def external_gateway_updated(self, ex_gw_port, interface_name):
preserve_ips = self._list_floating_ip_cidrs()
self._external_gateway_added(
ex_gw_port, interface_name, self.ns_name, preserve_ips)
def external_gateway_removed(self, ex_gw_port, interface_name):
self.driver.unplug(interface_name,
bridge=self.agent_conf.external_network_bridge,
namespace=self.ns_name,
prefix=EXTERNAL_DEV_PREFIX)
def _process_external_gateway(self, ex_gw_port):
ex_gw_port_id = (ex_gw_port and ex_gw_port['id'] or
self.ex_gw_port and self.ex_gw_port['id'])
interface_name = None
if ex_gw_port_id:
interface_name = self.get_external_device_name(ex_gw_port_id)
if ex_gw_port:
def _gateway_ports_equal(port1, port2):
def _get_filtered_dict(d, ignore):
return dict((k, v) for k, v in d.iteritems()
if k not in ignore)
keys_to_ignore = set(['binding:host_id'])
port1_filtered = _get_filtered_dict(port1, keys_to_ignore)
port2_filtered = _get_filtered_dict(port2, keys_to_ignore)
return port1_filtered == port2_filtered
if not self.ex_gw_port:
self.external_gateway_added(ex_gw_port, interface_name)
elif not _gateway_ports_equal(ex_gw_port, self.ex_gw_port):
self.external_gateway_updated(ex_gw_port, interface_name)
elif not ex_gw_port and self.ex_gw_port:
self.external_gateway_removed(self.ex_gw_port, interface_name)
existing_devices = self._get_existing_devices()
stale_devs = [dev for dev in existing_devices
if dev.startswith(EXTERNAL_DEV_PREFIX)
and dev != interface_name]
for stale_dev in stale_devs:
LOG.debug('Deleting stale external router device: %s', stale_dev)
self.driver.unplug(stale_dev,
bridge=self.agent_conf.external_network_bridge,
namespace=self.ns_name,
prefix=EXTERNAL_DEV_PREFIX)
self.perform_snat_action(self._handle_router_snat_rules,
interface_name)
def external_gateway_nat_rules(self, ex_gw_ip, interface_name):
mark = self.agent_conf.external_ingress_mark
rules = [('POSTROUTING', '! -i %(interface_name)s '
'! -o %(interface_name)s -m conntrack ! '
'--ctstate DNAT -j ACCEPT' %
{'interface_name': interface_name}),
('snat', '-o %s -j SNAT --to-source %s' %
(interface_name, ex_gw_ip)),
('snat', '-m mark ! --mark %s '
'-m conntrack --ctstate DNAT '
'-j SNAT --to-source %s' % (mark, ex_gw_ip))]
return rules
def external_gateway_mangle_rules(self, interface_name):
mark = self.agent_conf.external_ingress_mark
rules = [('mark', '-i %s -j MARK --set-xmark %s/%s' %
(interface_name, mark, EXTERNAL_INGRESS_MARK_MASK))]
return rules
def _empty_snat_chains(self, iptables_manager):
iptables_manager.ipv4['nat'].empty_chain('POSTROUTING')
iptables_manager.ipv4['nat'].empty_chain('snat')
iptables_manager.ipv4['mangle'].empty_chain('mark')
def _add_snat_rules(self, ex_gw_port, iptables_manager,
interface_name, action):
if action == 'add_rules' and ex_gw_port:
for ip_addr in ex_gw_port['fixed_ips']:
ex_gw_ip = ip_addr['ip_address']
if netaddr.IPAddress(ex_gw_ip).version == 4:
rules = self.external_gateway_nat_rules(ex_gw_ip,
interface_name)
for rule in rules:
iptables_manager.ipv4['nat'].add_rule(*rule)
rules = self.external_gateway_mangle_rules(interface_name)
for rule in rules:
iptables_manager.ipv4['mangle'].add_rule(*rule)
break
def _handle_router_snat_rules(self, ex_gw_port,
interface_name, action):
self._empty_snat_chains(self.iptables_manager)
self.iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat')
self._add_snat_rules(ex_gw_port,
self.iptables_manager,
interface_name,
action)
def process_external(self, agent):
existing_floating_ips = self.floating_ips
try:
with self.iptables_manager.defer_apply():
ex_gw_port = self.get_ex_gw_port()
self._process_external_gateway(ex_gw_port)
if not ex_gw_port:
return
self.process_snat_dnat_for_fip()
interface_name = self.get_external_device_interface_name(
ex_gw_port)
fip_statuses = self.configure_fip_addresses(interface_name)
except (n_exc.FloatingIpSetupException,
n_exc.IpTablesApplyException) as e:
LOG.exception(e)
fip_statuses = self.put_fips_in_error_state()
agent.update_fip_statuses(self, existing_floating_ips, fip_statuses)
@common_utils.exception_logger()
def process(self, agent):
self._process_internal_ports()
self.process_external(agent)
self.routes_updated()
self.ex_gw_port = self.get_ex_gw_port()
self.snat_ports = self.router.get(
l3_constants.SNAT_ROUTER_INTF_KEY, [])
self.enable_snat = self.router.get('enable_snat')
| true | true |
f72117735968d8ce8ce83ea74bae1b18b3eb310b | 284 | py | Python | app/user/urls.py | redoCehT/recipe-app-api | c529f641adf1a7d5af39bf9dc832b68af3348176 | [
"MIT"
] | null | null | null | app/user/urls.py | redoCehT/recipe-app-api | c529f641adf1a7d5af39bf9dc832b68af3348176 | [
"MIT"
] | null | null | null | app/user/urls.py | redoCehT/recipe-app-api | c529f641adf1a7d5af39bf9dc832b68af3348176 | [
"MIT"
] | null | null | null | from django.urls import path
from user import views
app_name = "user"
urlpatterns = [
path("create", views.CreateUserView.as_view(), name="create"),
path("token", views.CreateTokenView.as_view(), name="token"),
path("me", views.ManageUserView.as_view(), name="me"),
]
| 21.846154 | 66 | 0.68662 | from django.urls import path
from user import views
app_name = "user"
urlpatterns = [
path("create", views.CreateUserView.as_view(), name="create"),
path("token", views.CreateTokenView.as_view(), name="token"),
path("me", views.ManageUserView.as_view(), name="me"),
]
| true | true |
f721193ca67842d2930d048034dca9a2d38b368b | 7,370 | py | Python | elodie/media/media.py | phifogg/elodie | 6ca24c10b2b3fa28169976e04a9fd2f524250a44 | [
"Apache-2.0"
] | null | null | null | elodie/media/media.py | phifogg/elodie | 6ca24c10b2b3fa28169976e04a9fd2f524250a44 | [
"Apache-2.0"
] | 1 | 2017-01-07T06:30:43.000Z | 2017-01-19T12:47:07.000Z | elodie/media/media.py | phifogg/elodie | 6ca24c10b2b3fa28169976e04a9fd2f524250a44 | [
"Apache-2.0"
] | null | null | null | """
The media module provides a base :class:`Media` class for media objects that
are tracked by Elodie. The Media class provides some base functionality used
by all the media types, but isn't itself used to represent anything. Its
sub-classes (:class:`~elodie.media.audio.Audio`,
:class:`~elodie.media.photo.Photo`, and :class:`~elodie.media.video.Video`)
are used to represent the actual files.
.. moduleauthor:: Jaisen Mathai <jaisen@jmathai.com>
"""
from __future__ import print_function
# load modules
from elodie import constants
from elodie.dependencies import get_exiftool
from elodie.external.pyexiftool import ExifTool
from elodie.media.base import Base
class Media(Base):
"""The base class for all media objects.
:param str source: The fully qualified path to the video file.
"""
__name__ = 'Media'
d_coordinates = {
'latitude': 'latitude_ref',
'longitude': 'longitude_ref'
}
def __init__(self, source=None):
super(Media, self).__init__(source)
self.exif_map = {
'date_taken': [
'EXIF:DateTimeOriginal',
'EXIF:CreateDate',
'EXIF:ModifyDate'
]
}
self.album_keys = ['XMP-xmpDM:Album', 'XMP:Album']
self.title_key = 'XMP:Title'
self.latitude_keys = ['EXIF:GPSLatitude']
self.longitude_keys = ['EXIF:GPSLongitude']
self.latitude_ref_key = 'EXIF:GPSLatitudeRef'
self.longitude_ref_key = 'EXIF:GPSLongitudeRef'
self.set_gps_ref = True
self.exiftool_addedargs = [
'-overwrite_original',
u'-config',
u'"{}"'.format(constants.exiftool_config)
]
def get_album(self):
"""Get album from EXIF
:returns: None or string
"""
if(not self.is_valid()):
return None
exiftool_attributes = self.get_exiftool_attributes()
if exiftool_attributes is None:
return None
for album_key in self.album_keys:
if album_key in exiftool_attributes:
return exiftool_attributes[album_key]
return None
def get_coordinate(self, type='latitude'):
"""Get latitude or longitude of media from EXIF
:param str type: Type of coordinate to get. Either "latitude" or
"longitude".
:returns: float or None if not present in EXIF or a non-photo file
"""
exif = self.get_exiftool_attributes()
if not exif:
return None
# The lat/lon _keys array has an order of precedence.
# The first key is writable and we will give the writable
# key precence when reading.
direction_multiplier = 1.0
for key in self.latitude_keys + self.longitude_keys:
if key not in exif:
continue
# Cast coordinate to a float due to a bug in exiftool's
# -json output format.
# https://github.com/jmathai/elodie/issues/171
# http://u88.n24.queensu.ca/exiftool/forum/index.php/topic,7952.0.html #noqa
this_coordinate = float(exif[key])
# TODO: verify that we need to check ref key
# when self.set_gps_ref != True
if type == 'latitude' and key in self.latitude_keys:
if self.latitude_ref_key in exif and \
exif[self.latitude_ref_key] == 'S':
direction_multiplier = -1.0
return this_coordinate * direction_multiplier
elif type == 'longitude' and key in self.longitude_keys:
if self.longitude_ref_key in exif and \
exif[self.longitude_ref_key] == 'W':
direction_multiplier = -1.0
return this_coordinate * direction_multiplier
return None
def get_exiftool_attributes(self):
"""Get attributes for the media object from exiftool.
:returns: dict, or False if exiftool was not available.
"""
source = self.source
exiftool = get_exiftool()
if(exiftool is None):
return False
with ExifTool(addedargs=self.exiftool_addedargs) as et:
metadata = et.get_metadata(source)
if not metadata:
return False
return metadata
def get_title(self):
"""Get the title for a photo of video
:returns: str or None if no title is set or not a valid media type
"""
if(not self.is_valid()):
return None
exiftool_attributes = self.get_exiftool_attributes()
if exiftool_attributes is None:
return None
if(self.title_key not in exiftool_attributes):
return None
return exiftool_attributes[self.title_key]
def reset_cache(self):
"""Resets any internal cache
"""
self.exiftool_attributes = None
super(Media, self).reset_cache()
def set_album(self, album):
"""Set album for a photo
:param str name: Name of album
:returns: bool
"""
if(not self.is_valid()):
return None
tags = {self.album_keys[0]: album}
status = self.__set_tags(tags)
self.reset_cache()
return status
def set_date_taken(self, time):
"""Set the date/time a photo was taken.
:param datetime time: datetime object of when the photo was taken
:returns: bool
"""
if(time is None):
return False
tags = {}
formatted_time = time.strftime('%Y:%m:%d %H:%M:%S')
for key in self.exif_map['date_taken']:
tags[key] = formatted_time
status = self.__set_tags(tags)
self.reset_cache()
return status
def set_location(self, latitude, longitude):
if(not self.is_valid()):
return None
# The lat/lon _keys array has an order of precedence.
# The first key is writable and we will give the writable
# key precence when reading.
tags = {
self.latitude_keys[0]: latitude,
self.longitude_keys[0]: longitude,
}
# If self.set_gps_ref == True then it means we are writing an EXIF
# GPS tag which requires us to set the reference key.
# That's because the lat/lon are absolute values.
if self.set_gps_ref:
if latitude < 0:
tags[self.latitude_ref_key] = 'S'
if longitude < 0:
tags[self.longitude_ref_key] = 'W'
status = self.__set_tags(tags)
self.reset_cache()
return status
def set_title(self, title):
"""Set title for a photo.
:param str title: Title of the photo.
:returns: bool
"""
if(not self.is_valid()):
return None
if(title is None):
return None
tags = {self.title_key: title}
status = self.__set_tags(tags)
self.reset_cache()
return status
def __set_tags(self, tags):
if(not self.is_valid()):
return None
source = self.source
status = ''
with ExifTool(addedargs=self.exiftool_addedargs) as et:
status = et.set_tags(tags, source)
return status != ''
| 30.081633 | 88 | 0.589281 | from __future__ import print_function
from elodie import constants
from elodie.dependencies import get_exiftool
from elodie.external.pyexiftool import ExifTool
from elodie.media.base import Base
class Media(Base):
__name__ = 'Media'
d_coordinates = {
'latitude': 'latitude_ref',
'longitude': 'longitude_ref'
}
def __init__(self, source=None):
super(Media, self).__init__(source)
self.exif_map = {
'date_taken': [
'EXIF:DateTimeOriginal',
'EXIF:CreateDate',
'EXIF:ModifyDate'
]
}
self.album_keys = ['XMP-xmpDM:Album', 'XMP:Album']
self.title_key = 'XMP:Title'
self.latitude_keys = ['EXIF:GPSLatitude']
self.longitude_keys = ['EXIF:GPSLongitude']
self.latitude_ref_key = 'EXIF:GPSLatitudeRef'
self.longitude_ref_key = 'EXIF:GPSLongitudeRef'
self.set_gps_ref = True
self.exiftool_addedargs = [
'-overwrite_original',
u'-config',
u'"{}"'.format(constants.exiftool_config)
]
def get_album(self):
if(not self.is_valid()):
return None
exiftool_attributes = self.get_exiftool_attributes()
if exiftool_attributes is None:
return None
for album_key in self.album_keys:
if album_key in exiftool_attributes:
return exiftool_attributes[album_key]
return None
def get_coordinate(self, type='latitude'):
exif = self.get_exiftool_attributes()
if not exif:
return None
direction_multiplier = 1.0
for key in self.latitude_keys + self.longitude_keys:
if key not in exif:
continue
# -json output format.
# https://github.com/jmathai/elodie/issues/171
# http://u88.n24.queensu.ca/exiftool/forum/index.php/topic,7952.0.html #noqa
this_coordinate = float(exif[key])
# TODO: verify that we need to check ref key
# when self.set_gps_ref != True
if type == 'latitude' and key in self.latitude_keys:
if self.latitude_ref_key in exif and \
exif[self.latitude_ref_key] == 'S':
direction_multiplier = -1.0
return this_coordinate * direction_multiplier
elif type == 'longitude' and key in self.longitude_keys:
if self.longitude_ref_key in exif and \
exif[self.longitude_ref_key] == 'W':
direction_multiplier = -1.0
return this_coordinate * direction_multiplier
return None
def get_exiftool_attributes(self):
source = self.source
exiftool = get_exiftool()
if(exiftool is None):
return False
with ExifTool(addedargs=self.exiftool_addedargs) as et:
metadata = et.get_metadata(source)
if not metadata:
return False
return metadata
def get_title(self):
if(not self.is_valid()):
return None
exiftool_attributes = self.get_exiftool_attributes()
if exiftool_attributes is None:
return None
if(self.title_key not in exiftool_attributes):
return None
return exiftool_attributes[self.title_key]
def reset_cache(self):
self.exiftool_attributes = None
super(Media, self).reset_cache()
def set_album(self, album):
if(not self.is_valid()):
return None
tags = {self.album_keys[0]: album}
status = self.__set_tags(tags)
self.reset_cache()
return status
def set_date_taken(self, time):
if(time is None):
return False
tags = {}
formatted_time = time.strftime('%Y:%m:%d %H:%M:%S')
for key in self.exif_map['date_taken']:
tags[key] = formatted_time
status = self.__set_tags(tags)
self.reset_cache()
return status
def set_location(self, latitude, longitude):
if(not self.is_valid()):
return None
# The lat/lon _keys array has an order of precedence.
# The first key is writable and we will give the writable
# key precence when reading.
tags = {
self.latitude_keys[0]: latitude,
self.longitude_keys[0]: longitude,
}
# If self.set_gps_ref == True then it means we are writing an EXIF
# GPS tag which requires us to set the reference key.
# That's because the lat/lon are absolute values.
if self.set_gps_ref:
if latitude < 0:
tags[self.latitude_ref_key] = 'S'
if longitude < 0:
tags[self.longitude_ref_key] = 'W'
status = self.__set_tags(tags)
self.reset_cache()
return status
def set_title(self, title):
if(not self.is_valid()):
return None
if(title is None):
return None
tags = {self.title_key: title}
status = self.__set_tags(tags)
self.reset_cache()
return status
def __set_tags(self, tags):
if(not self.is_valid()):
return None
source = self.source
status = ''
with ExifTool(addedargs=self.exiftool_addedargs) as et:
status = et.set_tags(tags, source)
return status != ''
| true | true |
f72119fc9448d568049ba81365d0643f5fc6eaa0 | 7,330 | py | Python | src/satlas2/models/hfsModel.py | woutergins/satlas2 | 51afdc445c8c603372bb26abe19d1eb7bd3f3f24 | [
"MIT"
] | null | null | null | src/satlas2/models/hfsModel.py | woutergins/satlas2 | 51afdc445c8c603372bb26abe19d1eb7bd3f3f24 | [
"MIT"
] | null | null | null | src/satlas2/models/hfsModel.py | woutergins/satlas2 | 51afdc445c8c603372bb26abe19d1eb7bd3f3f24 | [
"MIT"
] | null | null | null | from satlas2.core import Model, Parameter
import numpy as np
from scipy.special import wofz
from sympy.physics.wigner import wigner_6j, wigner_3j
__all__ = ['HFS']
sqrt2 = 2 ** 0.5
sqrt2log2t2 = 2 * np.sqrt(2 * np.log(2))
log2 = np.log(2)
class HFS(Model):
def __init__(self, I, J, A=[0, 0], B=[0, 0], C=[0, 0], df=0, fwhm=50, bkg=1, name=None, N=None, offset=0, poisson=0, scale=1.0, racah=True, prefunc=None):
super().__init__(name=name, prefunc=prefunc)
J1, J2 = J
lower_F = np.arange(abs(I - J1), I+J1+1, 1)
upper_F = np.arange(abs(I - J2), I+J2+1, 1)
self.lines = []
self.intensities = {}
self.scaling_Al = {}
self.scaling_Bl = {}
self.scaling_Cl = {}
self.scaling_Au = {}
self.scaling_Bu = {}
self.scaling_Cu = {}
for i, F1 in enumerate(lower_F):
for j, F2 in enumerate(upper_F):
if abs(F2 - F1) <= 1 and not F2 == F1 == 0.0:
if F1 % 1 == 0:
F1_str = '{:.0f}'.format(F1)
else:
F1_str = '{:.0f}_2'.format(2*F1)
if F2 % 1 == 0:
F2_str = '{:.0f}'.format(F2)
else:
F2_str = '{:.0f}_2'.format(2*F2)
line = '{}to{}'.format(F1_str, F2_str)
self.lines.append(line)
C1, D1, E1 = self.calcShift(I, J1, F1)
C2, D2, E2 = self.calcShift(I, J2, F2)
self.scaling_Al[line] = C1
self.scaling_Bl[line] = D1
self.scaling_Cl[line] = E1
self.scaling_Au[line] = C2
self.scaling_Bu[line] = D2
self.scaling_Cu[line] = E2
intens = float((2 * F1 + 1) * (2 * F2 + 1) * \
wigner_6j(J2, F2, I, F1, J1, 1.0) ** 2) # DO NOT REMOVE CAST TO FLOAT!!!
self.intensities['Amp'+line] = Parameter(value=intens, min=0, vary=not racah)
norm = max([p.value for p in self.intensities.values()])
for n, v in self.intensities.items():
v.value /= norm
pars = {'centroid': Parameter(value=df),
'Al': Parameter(value=A[0]),
'Au': Parameter(value=A[1]),
'Bl': Parameter(value=B[0]),
'Bu': Parameter(value=B[1]),
'Cl': Parameter(value=C[0], vary=False),
'Cu': Parameter(value=C[1], vary=False),
'bkg': Parameter(value=bkg),
'FWHMG': Parameter(value=fwhm, min=0.01),
'FWHML': Parameter(value=fwhm, min=0.01),
'scale': Parameter(value=scale, min=0, vary=racah)}
if N is not None:
pars['N'] = Parameter(value=N, vary=False)
pars['Offset'] = Parameter(value=offset)
pars['Poisson'] = Parameter(value=poisson, min=0, max=1)
self.f = self.fShifted
else:
self.f = self.fUnshifted
pars = {**pars, **self.intensities}
self.params = pars
if I < 2 or J1 < 2:
self.params['Cl'].vary = False
if I < 2 or J2 < 2:
self.params['Cu'].vary = False
if I < 1 or J1 < 1:
self.params['Bl'].vary = False
if I < 1 or J2 < 1:
self.params['Bu'].vary = False
if I == 0 or J1 == 0:
self.params['Al'].vary = False
if I == 0 or J2 == 0:
self.params['Au'].vary = False
self.xtransformed = None
self.xhashed = None
def fUnshifted(self, x):
centroid = self.params['centroid'].value
Al = self.params['Al'].value
Au = self.params['Au'].value
Bl = self.params['Bl'].value
Bu = self.params['Bu'].value
Cl = self.params['Cl'].value
Cu = self.params['Cu'].value
FWHMG = self.params['FWHMG'].value
FWHML = self.params['FWHML'].value
scale = self.params['scale'].value
bkg = self.params['bkg'].value
result = np.zeros(len(x))
x = self.transform(x)
for line in self.lines:
pos = centroid + Au * self.scaling_Au[line] + Bu * self.scaling_Bu[line] + Cu * self.scaling_Cu[line] - Al * self.scaling_Al[line] - Bl * self.scaling_Bl[line] - Cl * self.scaling_Cl[line]
result += self.params['Amp' + line].value * self.peak(x - pos, FWHMG, FWHML)
return scale * result + bkg
def fShifted(self, x):
centroid = self.params['centroid'].value
Al = self.params['Al'].value
Au = self.params['Au'].value
Bl = self.params['Bl'].value
Bu = self.params['Bu'].value
FWHMG = self.params['FWHMG'].value
FWHML = self.params['FWHML'].value
scale = self.params['scale'].value
N = self.params['N'].value
offset = self.params['Offset'].value
poisson = self.params['Poisson'].value
bkg = self.params['bkg'].value
result = np.zeros(len(x))
for line in self.lines:
pos = centroid + Au * self.scaling_Au[line] + Bu * self.scaling_Bu[line] + Cu * self.scaling_Cu[line] - Al * self.scaling_Al[line] - Bl * self.scaling_Bl[line] - Cl * self.scaling_Cl[line]
for i in range(N + 1):
if self.prefunc:
result += self.params['Amp' + line].value * self.peak(self.prefunc(x - i * offset) - pos, FWHMG, FWHML) * (poisson**i)/np.math.factorial(i)
else:
result += self.params['Amp' + line].value * self.peak(x - pos - i * offset, FWHMG, FWHML) * (poisson**i)/np.math.factorial(i)
return scale * result + bkg
def peak(self, x, FWHMG, FWHML):
z = self.preparePeak(x, FWHMG, FWHML)
n = self.norm(FWHML, FWHMG)
ret = wofz(z).real
return ret/n
def norm(self, FWHML, FWHMG):
return wofz(1j * FWHML / (FWHMG * sqrt2)).real
def preparePeak(self, x, FWHMG, FWHML):
sigma, gamma = FWHMG / sqrt2log2t2, FWHML / 2
z = (x + 1j * gamma) / (sigma * sqrt2)
return z
def calcShift(self, I, J, F):
phase = (-1)**(I+J+F)
contrib = []
for k in range(1, 4):
n = float(wigner_6j(I, J, F, J, I, k))
d = float(wigner_3j(I, k, I, -I, 0, I) * wigner_3j(J, k, J, -J, 0, J))
shift = phase * n / d
if not np.isfinite(shift):
contrib.append(0)
else:
if k == 1:
shift = shift * (I*J)
elif k == 2:
shift = shift / 4
contrib.append(shift)
return contrib
def pos(self):
centroid = self.params['centroid'].value
Al = self.params['Al'].value
Au = self.params['Au'].value
Bl = self.params['Bl'].value
Bu = self.params['Bu'].value
Cl = self.params['Cl'].value
Cu = self.params['Cu'].value
pos = []
for line in self.lines:
pos.append(centroid + Au * self.scaling_Au[line] + Bu * self.scaling_Bu[line] + Cu * self.scaling_Cu[line] - Al * self.scaling_Al[line] - Bl * self.scaling_Bl[line] - Cl * self.scaling_Cl[line])
return pos
| 38.783069 | 206 | 0.502456 | from satlas2.core import Model, Parameter
import numpy as np
from scipy.special import wofz
from sympy.physics.wigner import wigner_6j, wigner_3j
__all__ = ['HFS']
sqrt2 = 2 ** 0.5
sqrt2log2t2 = 2 * np.sqrt(2 * np.log(2))
log2 = np.log(2)
class HFS(Model):
def __init__(self, I, J, A=[0, 0], B=[0, 0], C=[0, 0], df=0, fwhm=50, bkg=1, name=None, N=None, offset=0, poisson=0, scale=1.0, racah=True, prefunc=None):
super().__init__(name=name, prefunc=prefunc)
J1, J2 = J
lower_F = np.arange(abs(I - J1), I+J1+1, 1)
upper_F = np.arange(abs(I - J2), I+J2+1, 1)
self.lines = []
self.intensities = {}
self.scaling_Al = {}
self.scaling_Bl = {}
self.scaling_Cl = {}
self.scaling_Au = {}
self.scaling_Bu = {}
self.scaling_Cu = {}
for i, F1 in enumerate(lower_F):
for j, F2 in enumerate(upper_F):
if abs(F2 - F1) <= 1 and not F2 == F1 == 0.0:
if F1 % 1 == 0:
F1_str = '{:.0f}'.format(F1)
else:
F1_str = '{:.0f}_2'.format(2*F1)
if F2 % 1 == 0:
F2_str = '{:.0f}'.format(F2)
else:
F2_str = '{:.0f}_2'.format(2*F2)
line = '{}to{}'.format(F1_str, F2_str)
self.lines.append(line)
C1, D1, E1 = self.calcShift(I, J1, F1)
C2, D2, E2 = self.calcShift(I, J2, F2)
self.scaling_Al[line] = C1
self.scaling_Bl[line] = D1
self.scaling_Cl[line] = E1
self.scaling_Au[line] = C2
self.scaling_Bu[line] = D2
self.scaling_Cu[line] = E2
intens = float((2 * F1 + 1) * (2 * F2 + 1) * \
wigner_6j(J2, F2, I, F1, J1, 1.0) ** 2)
self.intensities['Amp'+line] = Parameter(value=intens, min=0, vary=not racah)
norm = max([p.value for p in self.intensities.values()])
for n, v in self.intensities.items():
v.value /= norm
pars = {'centroid': Parameter(value=df),
'Al': Parameter(value=A[0]),
'Au': Parameter(value=A[1]),
'Bl': Parameter(value=B[0]),
'Bu': Parameter(value=B[1]),
'Cl': Parameter(value=C[0], vary=False),
'Cu': Parameter(value=C[1], vary=False),
'bkg': Parameter(value=bkg),
'FWHMG': Parameter(value=fwhm, min=0.01),
'FWHML': Parameter(value=fwhm, min=0.01),
'scale': Parameter(value=scale, min=0, vary=racah)}
if N is not None:
pars['N'] = Parameter(value=N, vary=False)
pars['Offset'] = Parameter(value=offset)
pars['Poisson'] = Parameter(value=poisson, min=0, max=1)
self.f = self.fShifted
else:
self.f = self.fUnshifted
pars = {**pars, **self.intensities}
self.params = pars
if I < 2 or J1 < 2:
self.params['Cl'].vary = False
if I < 2 or J2 < 2:
self.params['Cu'].vary = False
if I < 1 or J1 < 1:
self.params['Bl'].vary = False
if I < 1 or J2 < 1:
self.params['Bu'].vary = False
if I == 0 or J1 == 0:
self.params['Al'].vary = False
if I == 0 or J2 == 0:
self.params['Au'].vary = False
self.xtransformed = None
self.xhashed = None
def fUnshifted(self, x):
centroid = self.params['centroid'].value
Al = self.params['Al'].value
Au = self.params['Au'].value
Bl = self.params['Bl'].value
Bu = self.params['Bu'].value
Cl = self.params['Cl'].value
Cu = self.params['Cu'].value
FWHMG = self.params['FWHMG'].value
FWHML = self.params['FWHML'].value
scale = self.params['scale'].value
bkg = self.params['bkg'].value
result = np.zeros(len(x))
x = self.transform(x)
for line in self.lines:
pos = centroid + Au * self.scaling_Au[line] + Bu * self.scaling_Bu[line] + Cu * self.scaling_Cu[line] - Al * self.scaling_Al[line] - Bl * self.scaling_Bl[line] - Cl * self.scaling_Cl[line]
result += self.params['Amp' + line].value * self.peak(x - pos, FWHMG, FWHML)
return scale * result + bkg
def fShifted(self, x):
centroid = self.params['centroid'].value
Al = self.params['Al'].value
Au = self.params['Au'].value
Bl = self.params['Bl'].value
Bu = self.params['Bu'].value
FWHMG = self.params['FWHMG'].value
FWHML = self.params['FWHML'].value
scale = self.params['scale'].value
N = self.params['N'].value
offset = self.params['Offset'].value
poisson = self.params['Poisson'].value
bkg = self.params['bkg'].value
result = np.zeros(len(x))
for line in self.lines:
pos = centroid + Au * self.scaling_Au[line] + Bu * self.scaling_Bu[line] + Cu * self.scaling_Cu[line] - Al * self.scaling_Al[line] - Bl * self.scaling_Bl[line] - Cl * self.scaling_Cl[line]
for i in range(N + 1):
if self.prefunc:
result += self.params['Amp' + line].value * self.peak(self.prefunc(x - i * offset) - pos, FWHMG, FWHML) * (poisson**i)/np.math.factorial(i)
else:
result += self.params['Amp' + line].value * self.peak(x - pos - i * offset, FWHMG, FWHML) * (poisson**i)/np.math.factorial(i)
return scale * result + bkg
def peak(self, x, FWHMG, FWHML):
z = self.preparePeak(x, FWHMG, FWHML)
n = self.norm(FWHML, FWHMG)
ret = wofz(z).real
return ret/n
def norm(self, FWHML, FWHMG):
return wofz(1j * FWHML / (FWHMG * sqrt2)).real
def preparePeak(self, x, FWHMG, FWHML):
sigma, gamma = FWHMG / sqrt2log2t2, FWHML / 2
z = (x + 1j * gamma) / (sigma * sqrt2)
return z
def calcShift(self, I, J, F):
phase = (-1)**(I+J+F)
contrib = []
for k in range(1, 4):
n = float(wigner_6j(I, J, F, J, I, k))
d = float(wigner_3j(I, k, I, -I, 0, I) * wigner_3j(J, k, J, -J, 0, J))
shift = phase * n / d
if not np.isfinite(shift):
contrib.append(0)
else:
if k == 1:
shift = shift * (I*J)
elif k == 2:
shift = shift / 4
contrib.append(shift)
return contrib
def pos(self):
centroid = self.params['centroid'].value
Al = self.params['Al'].value
Au = self.params['Au'].value
Bl = self.params['Bl'].value
Bu = self.params['Bu'].value
Cl = self.params['Cl'].value
Cu = self.params['Cu'].value
pos = []
for line in self.lines:
pos.append(centroid + Au * self.scaling_Au[line] + Bu * self.scaling_Bu[line] + Cu * self.scaling_Cu[line] - Al * self.scaling_Al[line] - Bl * self.scaling_Bl[line] - Cl * self.scaling_Cl[line])
return pos
| true | true |
f7211a6c4ae21fd092ed3210d9ed20271e7afe65 | 19,979 | py | Python | collect/TwHistory.py | mcuiteallen/stock | 06c56db6c712ab88fabdc67a8812869ad4180f6f | [
"MIT"
] | null | null | null | collect/TwHistory.py | mcuiteallen/stock | 06c56db6c712ab88fabdc67a8812869ad4180f6f | [
"MIT"
] | null | null | null | collect/TwHistory.py | mcuiteallen/stock | 06c56db6c712ab88fabdc67a8812869ad4180f6f | [
"MIT"
] | null | null | null | import calendar
import math
import pandas as pd
import time
import twstock
import requests
from datetime import datetime, timedelta
from dateutil import relativedelta
from db.Connection import session
from enum import Enum
from model.StockHistory import StockHistory
from sys import float_info
from talib import abstract
class HistoryType(Enum):
DAY = ("0", "日", "短線")
WEEK = ("1", "週", "中短線")
MONTH = ("2", "月", "中長線")
class HistoryTypeTo(Enum):
DB = 0
HUMAN = 1
EXPLAIN = 2
class TwHistory:
"""TwHistory class"""
dateFormatForTwStock = None
dateFormat = None
rsiDict = None
williamsDict = None
macdDict = None
bbandDict = None
def __init__(self):
self.dateFormatForTwStock = "%Y/%m/%d"
self.dateFormat = "%Y-%m-%d"
def transformStrToDateTimeForTwStock(self, targetStr):
return datetime.strptime(targetStr, self.dateFormatForTwStock)
def transformStrToDateTime(self, targetStr):
return datetime.strptime(targetStr, self.dateFormat)
def transformDateTimeToStr(self, date):
return date.strftime(self.dateFormat)
def retIfNaN(self, num):
if math.isnan(num):
return None
else:
return num
def createDataFrame(self, history):
df = pd.DataFrame([h.as_simple_dict() for h in history])
df['date'] = pd.to_datetime(df['date'])
df.set_index('date', inplace=True)
return df
def deleteHistory(self, code, type, startDate, endDate):
session.query(StockHistory).\
filter(StockHistory.code == code).\
filter(StockHistory.type == type).\
filter(StockHistory.date >= self.transformDateTimeToStr(startDate)).\
filter(StockHistory.date <= self.transformDateTimeToStr(endDate)).\
delete()
session.commit()
def calculateRSI(self, df):
rsi = abstract.RSI(df, timeperiod=5)
self.rsiDict = {}
for index, number in rsi.iteritems():
self.rsiDict[self.transformDateTimeToStr(index)] = number
def calculateWilliams(self, df):
williams = abstract.WILLR(df, timeperiod=5)
self.williamsDict = {}
for index, number in williams.iteritems():
self.williamsDict[self.transformDateTimeToStr(index)] = number
def calculateMACD(self, df):
macd = abstract.MACD(df)
self.macdDict = {}
for index, row in macd.iterrows():
self.macdDict[self.transformDateTimeToStr(index)] = row
def calculateBBAND(self, df):
bband = abstract.BBANDS(df, timeperiod=22)
self.bbandDict = {}
for index, row in bband.iterrows():
self.bbandDict[self.transformDateTimeToStr(index)] = row
def updateHistoryTechnicalIndicator(self, history):
date = history.date
updateFlag = False
if history.rsi is None:
history.rsi = self.retIfNaN(self.rsiDict[date])
updateFlag = updateFlag or history.rsi is not None
if history.williams is None:
history.williams = self.retIfNaN(self.williamsDict[date])
updateFlag = updateFlag or history.williams is not None
if history.macd is None:
history.macd = self.retIfNaN(self.macdDict[date].macd)
updateFlag = updateFlag or history.macd is not None
if history.macdsignal is None:
history.macdsignal = self.retIfNaN(self.macdDict[date].macdsignal)
updateFlag = updateFlag or history.macdsignal is not None
if history.macdhist is None:
history.macdhist = self.retIfNaN(self.macdDict[date].macdhist)
updateFlag = updateFlag or history.macdhist is not None
if history.upperband is None:
history.upperband = self.retIfNaN(self.bbandDict[date].upperband)
updateFlag = updateFlag or history.upperband is not None
if history.middleband is None:
history.middleband = self.retIfNaN(self.bbandDict[date].middleband)
updateFlag = updateFlag or history.middleband is not None
if history.lowerband is None:
history.lowerband = self.retIfNaN(self.bbandDict[date].lowerband)
updateFlag = updateFlag or history.lowerband is not None
if updateFlag:
session.merge(history)
def dayHistory(self):
for k, v in twstock.codes.items():
if self.isStockOrETF(v.type) and k == '3707':
print("dayHistory code: " + k)
dayType = self.translate(HistoryType.DAY, HistoryTypeTo.DB) #get type value for db
history = session.query(StockHistory).\
filter(StockHistory.code == k).\
filter(StockHistory.type == dayType).\
order_by(StockHistory.date.desc()).\
first()
nowDate = datetime.now()
endDateStr = self.transformDateTimeToStr(nowDate)
startDateStr = self.transformDateTimeToStr(self.transformStrToDateTimeForTwStock(v.start)) if history is None else history.date #如果DB撈的到相對應條件的資料,就只抓最後一天
self.finmindtrade(k, startDateStr, endDateStr, dayType)
def weekHistory(self):
today = self.transformStrToDateTime(self.transformDateTimeToStr(datetime.now()))
weekStart = today - timedelta(days=today.weekday())
for k, v in twstock.codes.items():
if self.isStockOrETF(v.type) and self.isHistoryExist(k):
print("weekHistory code: " + k)
latestHistoryWeek = session.query(StockHistory).\
filter(StockHistory.code == k).\
filter(StockHistory.type == self.translate(HistoryType.WEEK, HistoryTypeTo.DB)).\
order_by(StockHistory.date.desc()).\
first()
startdate = self.transformStrToDateTimeForTwStock(v.start) if latestHistoryWeek is None else self.transformStrToDateTime(latestHistoryWeek.date)
weekStartPast = startdate - timedelta(days=startdate.weekday())
weekEndPast = weekStartPast + timedelta(days=6)
while weekStartPast <= weekStart:
self.deleteHistory(k, self.translate(HistoryType.WEEK, HistoryTypeTo.DB), weekStartPast, weekEndPast)
historyWeek = StockHistory(code=k, type=self.translate(HistoryType.WEEK, HistoryTypeTo.DB),
capacity=0, turnover=0, high=0, low=float_info.max, close=0)
firstFlag = True
for historyDay in session.query(StockHistory).\
filter(StockHistory.code == k).\
filter(StockHistory.type == self.translate(HistoryType.DAY, HistoryTypeTo.DB)).\
filter(StockHistory.date >= self.transformDateTimeToStr(weekStartPast)).\
filter(StockHistory.date <= self.transformDateTimeToStr(weekEndPast)).\
order_by(StockHistory.date.asc()).\
all():
historyWeek.date = self.transformDateTimeToStr(weekStartPast)
historyWeek.close = historyDay.close
historyWeek.capacity += historyDay.capacity
historyWeek.turnover += historyDay.turnover
if firstFlag:
historyWeek.open = historyDay.open
firstFlag = False
historyWeek.high = max(historyWeek.high, historyDay.high)
historyWeek.low = min(historyWeek.low, historyDay.low)
if not firstFlag:
session.merge(historyWeek)
weekStartPast += timedelta(days=7)
weekEndPast += timedelta(days=7)
session.commit()
def monthHistory(self):
today = self.transformStrToDateTime(self.transformDateTimeToStr(datetime.now()))
monthStart = today.replace(day=1)
for k, v in twstock.codes.items():
if self.isStockOrETF(v.type) and self.isHistoryExist(k):
print("monthHistory code: " + k)
latestHistoryMonth = session.query(StockHistory).\
filter(StockHistory.code == k).\
filter(StockHistory.type == self.translate(HistoryType.MONTH, HistoryTypeTo.DB)).\
order_by(StockHistory.date.desc()).\
first()
startdate = self.transformStrToDateTimeForTwStock(v.start) if latestHistoryMonth is None else self.transformStrToDateTime(latestHistoryMonth.date)
monthStartPast = startdate.replace(day=1)
monthEndPast = monthStartPast.replace(day=calendar.monthrange(monthStartPast.year, monthStartPast.month)[1])
while monthStartPast <= monthStart:
self.deleteHistory(k, self.translate(HistoryType.MONTH, HistoryTypeTo.DB), monthStartPast, monthEndPast)
historyMonth = StockHistory(code=k, type=self.translate(HistoryType.MONTH, HistoryTypeTo.DB),
capacity=0, turnover=0, high=0, low=float_info.max, close=0)
firstFlag = True
for historyDay in session.query(StockHistory).\
filter(StockHistory.code == k).\
filter(StockHistory.type == self.translate(HistoryType.DAY, HistoryTypeTo.DB)).\
filter(StockHistory.date >= self.transformDateTimeToStr(monthStartPast)).\
filter(StockHistory.date <= self.transformDateTimeToStr(monthEndPast)).\
order_by(StockHistory.date.asc()).\
all():
historyMonth.date = self.transformDateTimeToStr(monthStartPast)
historyMonth.close = historyDay.close
historyMonth.capacity += historyDay.capacity
historyMonth.turnover += historyDay.turnover
if firstFlag:
historyMonth.open = historyDay.open
firstFlag = False
historyMonth.high = max(historyMonth.high, historyDay.high)
historyMonth.low = min(historyMonth.low, historyDay.low)
if not firstFlag:
session.merge(historyMonth)
monthStartPast = monthStartPast + relativedelta.relativedelta(months=1)
monthEndPast = monthStartPast.replace(day=calendar.monthrange(monthStartPast.year, monthStartPast.month)[1])
session.commit()
def technicalIndicator(self):
for k, v in twstock.codes.items():
if self.isStockOrETF(v.type) and self.isHistoryExist(k):
for historyType in HistoryType:
print("technicalIndicator code: " + k + ", type: " + self.translate(historyType, HistoryTypeTo.HUMAN))
historyList = session.query(StockHistory).\
filter(StockHistory.code == k).\
filter(StockHistory.type == self.translate(historyType, HistoryTypeTo.DB)).\
order_by(StockHistory.date.asc()).\
all()
if len(historyList) == 0:
continue
df = self.createDataFrame(historyList)
self.calculateRSI(df)
self.calculateWilliams(df)
self.calculateMACD(df)
self.calculateBBAND(df)
for history in historyList:
self.updateHistoryTechnicalIndicator(history)
session.commit()
def diverge(self, highRsi, lowRsi, highWilliams, lowWilliams):
turnoverDict = {}
nameDict = {}
for k, v in twstock.codes.items():
if self.isStockOrETF(v.type) and self.isHistoryExist(k):
history = session.query(StockHistory).\
filter(StockHistory.code == k).\
filter(StockHistory.type == self.translate(HistoryType.DAY, HistoryTypeTo.DB)).\
order_by(StockHistory.date.desc()).\
first()
turnoverDict[k] = history.turnover
nameDict[k] = v.name
rankDict = {k: v for k, v in sorted(turnoverDict.items(), key=lambda item: item[1], reverse=True)}
print("按當日成交值由大至小排名,背離條件: rsi > " + str(highRsi) + " or rsi < " + str(lowRsi))
for rankIdx, code in enumerate(rankDict.keys()):
closePrice = None
divergeDict = {}
for historyType in HistoryType:
historyTypeHuman = self.translate(historyType, HistoryTypeTo.HUMAN)
historyTypeExplain = self.translate(historyType, HistoryTypeTo.EXPLAIN)
historyList = session.query(StockHistory).\
filter(StockHistory.code == code).\
filter(StockHistory.type == self.translate(historyType, HistoryTypeTo.DB)).\
filter(StockHistory.rsi.isnot(None)).\
order_by(StockHistory.date.desc()).\
limit(self.recentHistoryLimit(historyType)).\
all()
historyListLength = len(historyList)
if historyListLength > 0:
closePrice = historyList[0].close
if historyListLength > 1:
if self.isHighRsi(highRsi, historyList) and historyList[0].rsi > historyList[1].rsi and historyList[0].williams < historyList[1].williams:
divergeDict[historyTypeHuman + " 相鄰背離 " + historyTypeExplain + "看空"] = "rsi up williams down"
elif self.isLowRsi(lowRsi, historyList) and historyList[0].rsi < historyList[1].rsi and historyList[0].williams > historyList[1].williams:
divergeDict[historyTypeHuman + " 相鄰背離 " + historyTypeExplain + "看多"] = "rsi down williams up"
if historyListLength > 2:
highPeak = []
lowPeak = []
for i, history in enumerate(historyList):
if i == 0 or i == historyListLength - 1:
continue
if len(highPeak) < 2 and historyList[i-1].rsi < history.rsi and history.rsi > historyList[i+1].rsi:
highPeak.append(history)
if len(lowPeak) < 2 and historyList[i-1].rsi > history.rsi and history.rsi < historyList[i+1].rsi:
lowPeak.append(history)
if len(highPeak) == 2 and len(lowPeak) == 2:
break
if len(highPeak) == 2 and self.isHighRsi(highRsi, highPeak):
if highPeak[0].rsi > highPeak[1].rsi and highPeak[0].williams < highPeak[1].williams:
divergeDict[historyTypeHuman + " 波峰背離 " + historyTypeExplain + "看空: " + highPeak[1].date + " and " + highPeak[0].date] = "rsi up williams down"
elif highPeak[0].rsi < highPeak[1].rsi and highPeak[0].williams > highPeak[1].williams and highPeak[0].williams >= highWilliams:
for low in lowPeak:
if highPeak[0].date > low.date and highPeak[1].date < low.date and low.williams <= lowWilliams:
divergeDict[historyTypeHuman + " 波峰背離 反彈不過前高 " + historyTypeExplain + "看空: " + highPeak[1].date + " and " + highPeak[0].date] = "rsi down williams fast up"
break
if len(lowPeak) == 2 and self.isLowRsi(lowRsi, lowPeak):
if lowPeak[0].rsi < lowPeak[1].rsi and lowPeak[0].williams > lowPeak[1].williams:
divergeDict[historyTypeHuman + " 波谷背離 " + historyTypeExplain + "看多: " + lowPeak[1].date + " and " + lowPeak[0].date] = "rsi down williams up"
elif lowPeak[0].rsi > lowPeak[1].rsi and lowPeak[0].williams < lowPeak[1].williams and lowPeak[0].williams <= lowWilliams:
for high in highPeak:
if lowPeak[0].date > high.date and lowPeak[1].date < high.date and high.williams >= highWilliams:
divergeDict[historyTypeHuman + " 波谷背離 回測不過前低 " + historyTypeExplain + "看多: " + lowPeak[1].date + " and " + lowPeak[0].date] = "rsi up williams fast down"
break
if len(divergeDict) > 0:
print("code: " + code + ", name: " + nameDict[code] + ", rank: " + str(rankIdx+1) + "/" + str(len(rankDict)) + ", close price: " + str(closePrice))
for k, v in divergeDict.items():
print(k + " => " + v)
print("")
print("========================================================================================")
def isStockOrETF(self, type):
return type == "股票" or type == "ETF"
def isHistoryExist(self, code):
if code=='3707':
return session.query(StockHistory).\
filter(StockHistory.code == code).\
filter(StockHistory.type == self.translate(HistoryType.DAY, HistoryTypeTo.DB)).\
filter(StockHistory.date == self.transformDateTimeToStr(datetime.now())).\
first() is not None
return False
def isHighRsi(self, highRsi, historyList):
for i, history in enumerate(historyList):
if i < 2 and history.rsi < highRsi:
return False
elif i == 2:
break
return True
def isLowRsi(self, lowRsi, historyList):
for i, history in enumerate(historyList):
if i < 2 and history.rsi > lowRsi:
return False
elif i == 2:
break
return True
def recentHistoryLimit(self, historyType):
if historyType == HistoryType.DAY:
return 40
elif historyType == HistoryType.WEEK:
return 16
else:
return 6
def translate(self, historyType, historyTypeTo):
return historyType.value[historyTypeTo.value]
def finmindtrade(self, code, start, end, dayType):
url = "https://api.finmindtrade.com/api/v4/data"
parameter = {
"dataset": "TaiwanStockPrice",
"data_id": code,
"start_date": start,
"end_date": end,
"token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJkYXRlIjoiMjAyMS0xMC0wMSAxNjoyMzoyNSIsInVzZXJfaWQiOiJtY3VpdGVhbGxlbiIsImlwIjoiMTE4LjE2My4xNDcuMTgyIn0.vXMykagq4kOKGrKOusgfAR3yhgcri0N_Wpe1Nb4DOiA"
}
resp = requests.get(url, params=parameter)
json = resp.json()
if json is not None:
for data in resp.json()["data"]:
history = StockHistory(code=code, type=dayType, date=data["date"],
capacity=data["Trading_Volume"], turnover=data["Trading_money"],
open=data["open"], high=data["max"], low=data["min"], close=data["close"])
session.merge(history)
session.commit()
time.sleep(6.1)
twHistory = TwHistory()
twHistory.dayHistory()
twHistory.weekHistory()
twHistory.monthHistory()
twHistory.technicalIndicator()
#twHistory.diverge(90, 10, -20, -80)
#twHistory.diverge(80, 20, -20, -80)
twHistory.diverge(70, 30, -20, -80) | 51.359897 | 207 | 0.569198 | import calendar
import math
import pandas as pd
import time
import twstock
import requests
from datetime import datetime, timedelta
from dateutil import relativedelta
from db.Connection import session
from enum import Enum
from model.StockHistory import StockHistory
from sys import float_info
from talib import abstract
class HistoryType(Enum):
DAY = ("0", "日", "短線")
WEEK = ("1", "週", "中短線")
MONTH = ("2", "月", "中長線")
class HistoryTypeTo(Enum):
DB = 0
HUMAN = 1
EXPLAIN = 2
class TwHistory:
dateFormatForTwStock = None
dateFormat = None
rsiDict = None
williamsDict = None
macdDict = None
bbandDict = None
def __init__(self):
self.dateFormatForTwStock = "%Y/%m/%d"
self.dateFormat = "%Y-%m-%d"
def transformStrToDateTimeForTwStock(self, targetStr):
return datetime.strptime(targetStr, self.dateFormatForTwStock)
def transformStrToDateTime(self, targetStr):
return datetime.strptime(targetStr, self.dateFormat)
def transformDateTimeToStr(self, date):
return date.strftime(self.dateFormat)
def retIfNaN(self, num):
if math.isnan(num):
return None
else:
return num
def createDataFrame(self, history):
df = pd.DataFrame([h.as_simple_dict() for h in history])
df['date'] = pd.to_datetime(df['date'])
df.set_index('date', inplace=True)
return df
def deleteHistory(self, code, type, startDate, endDate):
session.query(StockHistory).\
filter(StockHistory.code == code).\
filter(StockHistory.type == type).\
filter(StockHistory.date >= self.transformDateTimeToStr(startDate)).\
filter(StockHistory.date <= self.transformDateTimeToStr(endDate)).\
delete()
session.commit()
def calculateRSI(self, df):
rsi = abstract.RSI(df, timeperiod=5)
self.rsiDict = {}
for index, number in rsi.iteritems():
self.rsiDict[self.transformDateTimeToStr(index)] = number
def calculateWilliams(self, df):
williams = abstract.WILLR(df, timeperiod=5)
self.williamsDict = {}
for index, number in williams.iteritems():
self.williamsDict[self.transformDateTimeToStr(index)] = number
def calculateMACD(self, df):
macd = abstract.MACD(df)
self.macdDict = {}
for index, row in macd.iterrows():
self.macdDict[self.transformDateTimeToStr(index)] = row
def calculateBBAND(self, df):
bband = abstract.BBANDS(df, timeperiod=22)
self.bbandDict = {}
for index, row in bband.iterrows():
self.bbandDict[self.transformDateTimeToStr(index)] = row
def updateHistoryTechnicalIndicator(self, history):
date = history.date
updateFlag = False
if history.rsi is None:
history.rsi = self.retIfNaN(self.rsiDict[date])
updateFlag = updateFlag or history.rsi is not None
if history.williams is None:
history.williams = self.retIfNaN(self.williamsDict[date])
updateFlag = updateFlag or history.williams is not None
if history.macd is None:
history.macd = self.retIfNaN(self.macdDict[date].macd)
updateFlag = updateFlag or history.macd is not None
if history.macdsignal is None:
history.macdsignal = self.retIfNaN(self.macdDict[date].macdsignal)
updateFlag = updateFlag or history.macdsignal is not None
if history.macdhist is None:
history.macdhist = self.retIfNaN(self.macdDict[date].macdhist)
updateFlag = updateFlag or history.macdhist is not None
if history.upperband is None:
history.upperband = self.retIfNaN(self.bbandDict[date].upperband)
updateFlag = updateFlag or history.upperband is not None
if history.middleband is None:
history.middleband = self.retIfNaN(self.bbandDict[date].middleband)
updateFlag = updateFlag or history.middleband is not None
if history.lowerband is None:
history.lowerband = self.retIfNaN(self.bbandDict[date].lowerband)
updateFlag = updateFlag or history.lowerband is not None
if updateFlag:
session.merge(history)
def dayHistory(self):
for k, v in twstock.codes.items():
if self.isStockOrETF(v.type) and k == '3707':
print("dayHistory code: " + k)
dayType = self.translate(HistoryType.DAY, HistoryTypeTo.DB)
history = session.query(StockHistory).\
filter(StockHistory.code == k).\
filter(StockHistory.type == dayType).\
order_by(StockHistory.date.desc()).\
first()
nowDate = datetime.now()
endDateStr = self.transformDateTimeToStr(nowDate)
startDateStr = self.transformDateTimeToStr(self.transformStrToDateTimeForTwStock(v.start)) if history is None else history.date
self.finmindtrade(k, startDateStr, endDateStr, dayType)
def weekHistory(self):
today = self.transformStrToDateTime(self.transformDateTimeToStr(datetime.now()))
weekStart = today - timedelta(days=today.weekday())
for k, v in twstock.codes.items():
if self.isStockOrETF(v.type) and self.isHistoryExist(k):
print("weekHistory code: " + k)
latestHistoryWeek = session.query(StockHistory).\
filter(StockHistory.code == k).\
filter(StockHistory.type == self.translate(HistoryType.WEEK, HistoryTypeTo.DB)).\
order_by(StockHistory.date.desc()).\
first()
startdate = self.transformStrToDateTimeForTwStock(v.start) if latestHistoryWeek is None else self.transformStrToDateTime(latestHistoryWeek.date)
weekStartPast = startdate - timedelta(days=startdate.weekday())
weekEndPast = weekStartPast + timedelta(days=6)
while weekStartPast <= weekStart:
self.deleteHistory(k, self.translate(HistoryType.WEEK, HistoryTypeTo.DB), weekStartPast, weekEndPast)
historyWeek = StockHistory(code=k, type=self.translate(HistoryType.WEEK, HistoryTypeTo.DB),
capacity=0, turnover=0, high=0, low=float_info.max, close=0)
firstFlag = True
for historyDay in session.query(StockHistory).\
filter(StockHistory.code == k).\
filter(StockHistory.type == self.translate(HistoryType.DAY, HistoryTypeTo.DB)).\
filter(StockHistory.date >= self.transformDateTimeToStr(weekStartPast)).\
filter(StockHistory.date <= self.transformDateTimeToStr(weekEndPast)).\
order_by(StockHistory.date.asc()).\
all():
historyWeek.date = self.transformDateTimeToStr(weekStartPast)
historyWeek.close = historyDay.close
historyWeek.capacity += historyDay.capacity
historyWeek.turnover += historyDay.turnover
if firstFlag:
historyWeek.open = historyDay.open
firstFlag = False
historyWeek.high = max(historyWeek.high, historyDay.high)
historyWeek.low = min(historyWeek.low, historyDay.low)
if not firstFlag:
session.merge(historyWeek)
weekStartPast += timedelta(days=7)
weekEndPast += timedelta(days=7)
session.commit()
def monthHistory(self):
today = self.transformStrToDateTime(self.transformDateTimeToStr(datetime.now()))
monthStart = today.replace(day=1)
for k, v in twstock.codes.items():
if self.isStockOrETF(v.type) and self.isHistoryExist(k):
print("monthHistory code: " + k)
latestHistoryMonth = session.query(StockHistory).\
filter(StockHistory.code == k).\
filter(StockHistory.type == self.translate(HistoryType.MONTH, HistoryTypeTo.DB)).\
order_by(StockHistory.date.desc()).\
first()
startdate = self.transformStrToDateTimeForTwStock(v.start) if latestHistoryMonth is None else self.transformStrToDateTime(latestHistoryMonth.date)
monthStartPast = startdate.replace(day=1)
monthEndPast = monthStartPast.replace(day=calendar.monthrange(monthStartPast.year, monthStartPast.month)[1])
while monthStartPast <= monthStart:
self.deleteHistory(k, self.translate(HistoryType.MONTH, HistoryTypeTo.DB), monthStartPast, monthEndPast)
historyMonth = StockHistory(code=k, type=self.translate(HistoryType.MONTH, HistoryTypeTo.DB),
capacity=0, turnover=0, high=0, low=float_info.max, close=0)
firstFlag = True
for historyDay in session.query(StockHistory).\
filter(StockHistory.code == k).\
filter(StockHistory.type == self.translate(HistoryType.DAY, HistoryTypeTo.DB)).\
filter(StockHistory.date >= self.transformDateTimeToStr(monthStartPast)).\
filter(StockHistory.date <= self.transformDateTimeToStr(monthEndPast)).\
order_by(StockHistory.date.asc()).\
all():
historyMonth.date = self.transformDateTimeToStr(monthStartPast)
historyMonth.close = historyDay.close
historyMonth.capacity += historyDay.capacity
historyMonth.turnover += historyDay.turnover
if firstFlag:
historyMonth.open = historyDay.open
firstFlag = False
historyMonth.high = max(historyMonth.high, historyDay.high)
historyMonth.low = min(historyMonth.low, historyDay.low)
if not firstFlag:
session.merge(historyMonth)
monthStartPast = monthStartPast + relativedelta.relativedelta(months=1)
monthEndPast = monthStartPast.replace(day=calendar.monthrange(monthStartPast.year, monthStartPast.month)[1])
session.commit()
def technicalIndicator(self):
for k, v in twstock.codes.items():
if self.isStockOrETF(v.type) and self.isHistoryExist(k):
for historyType in HistoryType:
print("technicalIndicator code: " + k + ", type: " + self.translate(historyType, HistoryTypeTo.HUMAN))
historyList = session.query(StockHistory).\
filter(StockHistory.code == k).\
filter(StockHistory.type == self.translate(historyType, HistoryTypeTo.DB)).\
order_by(StockHistory.date.asc()).\
all()
if len(historyList) == 0:
continue
df = self.createDataFrame(historyList)
self.calculateRSI(df)
self.calculateWilliams(df)
self.calculateMACD(df)
self.calculateBBAND(df)
for history in historyList:
self.updateHistoryTechnicalIndicator(history)
session.commit()
def diverge(self, highRsi, lowRsi, highWilliams, lowWilliams):
turnoverDict = {}
nameDict = {}
for k, v in twstock.codes.items():
if self.isStockOrETF(v.type) and self.isHistoryExist(k):
history = session.query(StockHistory).\
filter(StockHistory.code == k).\
filter(StockHistory.type == self.translate(HistoryType.DAY, HistoryTypeTo.DB)).\
order_by(StockHistory.date.desc()).\
first()
turnoverDict[k] = history.turnover
nameDict[k] = v.name
rankDict = {k: v for k, v in sorted(turnoverDict.items(), key=lambda item: item[1], reverse=True)}
print("按當日成交值由大至小排名,背離條件: rsi > " + str(highRsi) + " or rsi < " + str(lowRsi))
for rankIdx, code in enumerate(rankDict.keys()):
closePrice = None
divergeDict = {}
for historyType in HistoryType:
historyTypeHuman = self.translate(historyType, HistoryTypeTo.HUMAN)
historyTypeExplain = self.translate(historyType, HistoryTypeTo.EXPLAIN)
historyList = session.query(StockHistory).\
filter(StockHistory.code == code).\
filter(StockHistory.type == self.translate(historyType, HistoryTypeTo.DB)).\
filter(StockHistory.rsi.isnot(None)).\
order_by(StockHistory.date.desc()).\
limit(self.recentHistoryLimit(historyType)).\
all()
historyListLength = len(historyList)
if historyListLength > 0:
closePrice = historyList[0].close
if historyListLength > 1:
if self.isHighRsi(highRsi, historyList) and historyList[0].rsi > historyList[1].rsi and historyList[0].williams < historyList[1].williams:
divergeDict[historyTypeHuman + " 相鄰背離 " + historyTypeExplain + "看空"] = "rsi up williams down"
elif self.isLowRsi(lowRsi, historyList) and historyList[0].rsi < historyList[1].rsi and historyList[0].williams > historyList[1].williams:
divergeDict[historyTypeHuman + " 相鄰背離 " + historyTypeExplain + "看多"] = "rsi down williams up"
if historyListLength > 2:
highPeak = []
lowPeak = []
for i, history in enumerate(historyList):
if i == 0 or i == historyListLength - 1:
continue
if len(highPeak) < 2 and historyList[i-1].rsi < history.rsi and history.rsi > historyList[i+1].rsi:
highPeak.append(history)
if len(lowPeak) < 2 and historyList[i-1].rsi > history.rsi and history.rsi < historyList[i+1].rsi:
lowPeak.append(history)
if len(highPeak) == 2 and len(lowPeak) == 2:
break
if len(highPeak) == 2 and self.isHighRsi(highRsi, highPeak):
if highPeak[0].rsi > highPeak[1].rsi and highPeak[0].williams < highPeak[1].williams:
divergeDict[historyTypeHuman + " 波峰背離 " + historyTypeExplain + "看空: " + highPeak[1].date + " and " + highPeak[0].date] = "rsi up williams down"
elif highPeak[0].rsi < highPeak[1].rsi and highPeak[0].williams > highPeak[1].williams and highPeak[0].williams >= highWilliams:
for low in lowPeak:
if highPeak[0].date > low.date and highPeak[1].date < low.date and low.williams <= lowWilliams:
divergeDict[historyTypeHuman + " 波峰背離 反彈不過前高 " + historyTypeExplain + "看空: " + highPeak[1].date + " and " + highPeak[0].date] = "rsi down williams fast up"
break
if len(lowPeak) == 2 and self.isLowRsi(lowRsi, lowPeak):
if lowPeak[0].rsi < lowPeak[1].rsi and lowPeak[0].williams > lowPeak[1].williams:
divergeDict[historyTypeHuman + " 波谷背離 " + historyTypeExplain + "看多: " + lowPeak[1].date + " and " + lowPeak[0].date] = "rsi down williams up"
elif lowPeak[0].rsi > lowPeak[1].rsi and lowPeak[0].williams < lowPeak[1].williams and lowPeak[0].williams <= lowWilliams:
for high in highPeak:
if lowPeak[0].date > high.date and lowPeak[1].date < high.date and high.williams >= highWilliams:
divergeDict[historyTypeHuman + " 波谷背離 回測不過前低 " + historyTypeExplain + "看多: " + lowPeak[1].date + " and " + lowPeak[0].date] = "rsi up williams fast down"
break
if len(divergeDict) > 0:
print("code: " + code + ", name: " + nameDict[code] + ", rank: " + str(rankIdx+1) + "/" + str(len(rankDict)) + ", close price: " + str(closePrice))
for k, v in divergeDict.items():
print(k + " => " + v)
print("")
print("========================================================================================")
def isStockOrETF(self, type):
return type == "股票" or type == "ETF"
def isHistoryExist(self, code):
if code=='3707':
return session.query(StockHistory).\
filter(StockHistory.code == code).\
filter(StockHistory.type == self.translate(HistoryType.DAY, HistoryTypeTo.DB)).\
filter(StockHistory.date == self.transformDateTimeToStr(datetime.now())).\
first() is not None
return False
def isHighRsi(self, highRsi, historyList):
for i, history in enumerate(historyList):
if i < 2 and history.rsi < highRsi:
return False
elif i == 2:
break
return True
def isLowRsi(self, lowRsi, historyList):
for i, history in enumerate(historyList):
if i < 2 and history.rsi > lowRsi:
return False
elif i == 2:
break
return True
def recentHistoryLimit(self, historyType):
if historyType == HistoryType.DAY:
return 40
elif historyType == HistoryType.WEEK:
return 16
else:
return 6
def translate(self, historyType, historyTypeTo):
return historyType.value[historyTypeTo.value]
def finmindtrade(self, code, start, end, dayType):
url = "https://api.finmindtrade.com/api/v4/data"
parameter = {
"dataset": "TaiwanStockPrice",
"data_id": code,
"start_date": start,
"end_date": end,
"token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJkYXRlIjoiMjAyMS0xMC0wMSAxNjoyMzoyNSIsInVzZXJfaWQiOiJtY3VpdGVhbGxlbiIsImlwIjoiMTE4LjE2My4xNDcuMTgyIn0.vXMykagq4kOKGrKOusgfAR3yhgcri0N_Wpe1Nb4DOiA"
}
resp = requests.get(url, params=parameter)
json = resp.json()
if json is not None:
for data in resp.json()["data"]:
history = StockHistory(code=code, type=dayType, date=data["date"],
capacity=data["Trading_Volume"], turnover=data["Trading_money"],
open=data["open"], high=data["max"], low=data["min"], close=data["close"])
session.merge(history)
session.commit()
time.sleep(6.1)
twHistory = TwHistory()
twHistory.dayHistory()
twHistory.weekHistory()
twHistory.monthHistory()
twHistory.technicalIndicator()
twHistory.diverge(70, 30, -20, -80) | true | true |
f7211ab5f9fd402c221ac94f5f39ef29a6d25331 | 88,960 | py | Python | pandas/tests/arithmetic/test_datetime64.py | naomi172839/pandas | c5f11ab79e5553a28a91fc7036c8dcbfc8cbc697 | [
"BSD-3-Clause"
] | 6 | 2020-09-10T15:03:25.000Z | 2021-04-01T22:48:33.000Z | pandas/tests/arithmetic/test_datetime64.py | naomi172839/pandas | c5f11ab79e5553a28a91fc7036c8dcbfc8cbc697 | [
"BSD-3-Clause"
] | null | null | null | pandas/tests/arithmetic/test_datetime64.py | naomi172839/pandas | c5f11ab79e5553a28a91fc7036c8dcbfc8cbc697 | [
"BSD-3-Clause"
] | 4 | 2020-02-07T05:05:32.000Z | 2020-05-11T06:06:17.000Z | # Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import datetime, timedelta
from itertools import product, starmap
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.compat.numpy import np_datetime64_compat
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import DatetimeArray, TimedeltaArray
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(self, other, tz_naive_fixture):
# We don't parametrize this over box_with_array because listlike
# other plays poorly with assert_invalid_comparison reversed checks
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
assert_invalid_comparison(dta, other, tm.to_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], pd.Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
xbox = box if box is not pd.Index else np.ndarray
ts = pd.Timestamp.now(tz)
ser = pd.Series([ts, pd.NaT])
# FIXME: Can't transpose because that loses the tz dtype on
# the NaT column
obj = tm.box_expected(ser, box, transpose=False)
expected = pd.Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox, transpose=False)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[pd.Timestamp("2011-01-01"), NaT, pd.Timestamp("2011-01-03")],
[NaT, NaT, pd.Timestamp("2011-01-03")],
),
(
[pd.Timedelta("1 days"), NaT, pd.Timedelta("3 days")],
[NaT, NaT, pd.Timedelta("3 days")],
),
(
[pd.Period("2011-01", freq="M"), NaT, pd.Period("2011-03", freq="M")],
[NaT, NaT, pd.Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons(self, dtype, index_or_series, reverse, pair):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
# Series, Index
expected = Series([False, False, True])
tm.assert_series_equal(left == right, expected)
expected = Series([True, True, False])
tm.assert_series_equal(left != right, expected)
expected = Series([False, False, False])
tm.assert_series_equal(left < right, expected)
expected = Series([False, False, False])
tm.assert_series_equal(left > right, expected)
expected = Series([False, False, True])
tm.assert_series_equal(left >= right, expected)
expected = Series([False, False, True])
tm.assert_series_equal(left <= right, expected)
def test_comparison_invalid(self, tz_naive_fixture, box_with_array):
# GH#4968
# invalid date/int comparisons
tz = tz_naive_fixture
ser = Series(range(5))
ser2 = Series(pd.date_range("20010101", periods=5, tz=tz))
ser = tm.box_expected(ser, box_with_array)
ser2 = tm.box_expected(ser2, box_with_array)
assert_invalid_comparison(ser, ser2, box_with_array)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
if box_with_array is tm.to_array and dtype is object:
# dont bother testing ndarray comparison methods as this fails
# on older numpys (since they check object identity)
return
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box_with_array)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = pd.Series(pd.date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = pd.Timestamp("nat")
ser[3] = pd.Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, pd.Timestamp("20010109"))
result = right_f(pd.Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, pd.Timestamp("nat"))
result = right_f(pd.Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, pd.Timestamp("20010109"))
result = right_f(pd.Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, pd.Timestamp("nat"))
result = right_f(pd.Timestamp("nat"), s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
ser = pd.Series([pd.Timestamp("2000-01-29 01:59:00"), "NaT"])
ser = tm.box_expected(ser, box_with_array)
result = ser != ser
expected = tm.box_expected([False, True], xbox)
tm.assert_equal(result, expected)
result = ser != ser[0]
expected = tm.box_expected([False, True], xbox)
tm.assert_equal(result, expected)
result = ser != ser[1]
expected = tm.box_expected([True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, False], xbox)
tm.assert_equal(result, expected)
result = ser == ser[0]
expected = tm.box_expected([True, False], xbox)
tm.assert_equal(result, expected)
result = ser == ser[1]
expected = tm.box_expected([False, False], xbox)
tm.assert_equal(result, expected)
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.gt, operator.lt, operator.ge, operator.le],
)
def test_comparators(self, op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = op(arr, element)
index_result = op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = pd.date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
if box_with_array is tm.to_array and dtype is object:
# dont bother testing ndarray comparison methods as this fails
# on older numpys (since they check object identity)
return
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
left = pd.DatetimeIndex(
[pd.Timestamp("2011-01-01"), pd.NaT, pd.Timestamp("2011-01-03")]
)
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == pd.NaT, expected)
tm.assert_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != pd.NaT, expected)
tm.assert_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < pd.NaT, expected)
tm.assert_equal(pd.NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = pd.DatetimeIndex(
["2014-01-01", pd.NaT, "2014-03-01", pd.NaT, "2014-05-01", "2014-07-01"]
)
didx2 = pd.DatetimeIndex(
["2014-02-01", "2014-03-01", pd.NaT, pd.NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np_datetime64_compat("2014-02-01 00:00Z"),
np_datetime64_compat("2014-03-01 00:00Z"),
np_datetime64_compat("nat"),
np.datetime64("nat"),
np_datetime64_compat("2014-06-01 00:00Z"),
np_datetime64_compat("2014-07-01 00:00Z"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, pd.NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],
)
def test_comparison_tzawareness_compat(self, op, box_df_fail):
# GH#18162
box = box_df_fail
dr = pd.date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = tm.box_expected(dz, box)
msg = "Cannot compare tz-naive and tz-aware"
with pytest.raises(TypeError, match=msg):
op(dr, dz)
# FIXME: DataFrame case fails to raise for == and !=, wrong
# message for inequalities
with pytest.raises(TypeError, match=msg):
op(dr, list(dz))
with pytest.raises(TypeError, match=msg):
op(dr, np.array(list(dz), dtype=object))
with pytest.raises(TypeError, match=msg):
op(dz, dr)
# FIXME: DataFrame case fails to raise for == and !=, wrong
# message for inequalities
with pytest.raises(TypeError, match=msg):
op(dz, list(dr))
with pytest.raises(TypeError, match=msg):
op(dz, np.array(list(dr), dtype=object))
# The aware==aware and naive==naive comparisons should *not* raise
assert np.all(dr == dr)
assert np.all(dr == list(dr))
assert np.all(list(dr) == dr)
assert np.all(np.array(list(dr), dtype=object) == dr)
assert np.all(dr == np.array(list(dr), dtype=object))
assert np.all(dz == dz)
assert np.all(dz == list(dz))
assert np.all(list(dz) == dz)
assert np.all(np.array(list(dz), dtype=object) == dz)
assert np.all(dz == np.array(list(dz), dtype=object))
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],
)
def test_comparison_tzawareness_compat_scalars(self, op, box_with_array):
# GH#18162
dr = pd.date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box_with_array)
dz = tm.box_expected(dz, box_with_array)
# Check comparisons against scalar Timestamps
ts = pd.Timestamp("2000-03-14 01:59")
ts_tz = pd.Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
assert np.all(dr > ts)
msg = "Cannot compare tz-naive and tz-aware"
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
assert np.all(dz > ts_tz)
with pytest.raises(TypeError, match=msg):
op(dz, ts)
# GH#12601: Check comparison against Timestamps and DatetimeIndex
with pytest.raises(TypeError, match=msg):
op(ts, dz)
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],
)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
# Bug in NumPy? https://github.com/numpy/numpy/issues/13841
# Raising in __eq__ will fallback to NumPy, which warns, fails,
# then re-raises the original exception. So we just need to ignore.
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
self, op, other, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = pd.date_range("2016-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
msg = "Cannot compare tz-naive and tz-aware"
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],
)
def test_nat_comparison_tzawareness(self, op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
dti = pd.DatetimeIndex(
["2014-01-01", pd.NaT, "2014-03-01", pd.NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, pd.NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), pd.NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
pd.timedelta_range("1D", periods=10),
pd.timedelta_range("1D", periods=10).to_series(),
pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ call _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.astype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
msg = "Cannot compare tz-naive and tz-aware"
with pytest.raises(TypeError, match=msg):
# tzawareness failure
dti != other
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = "Cannot compare type"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
def test_dt64arr_add_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = pd.date_range("2000-01-01", "2000-02-01", tz=tz)
expected = pd.date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng + two_hours
tm.assert_equal(result, expected)
def test_dt64arr_iadd_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = pd.date_range("2000-01-01", "2000-02-01", tz=tz)
expected = pd.date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = pd.date_range("2000-01-01", "2000-02-01", tz=tz)
expected = pd.date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng - two_hours
tm.assert_equal(result, expected)
def test_dt64arr_isub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = pd.date_range("2000-01-01", "2000-02-01", tz=tz)
expected = pd.date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
rng -= two_hours
tm.assert_equal(rng, expected)
# TODO: redundant with test_dt64arr_add_timedeltalike_scalar
def test_dt64arr_add_td64_scalar(self, box_with_array):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
)
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, "s")
tm.assert_equal(result, expected)
result = np.timedelta64(1, "s") + dtarr
tm.assert_equal(result, expected)
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, "ms")
tm.assert_equal(result, expected)
result = np.timedelta64(5, "ms") + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
# GH#23320 special handling for timedelta64("NaT")
tz = tz_naive_fixture
dti = pd.date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
expected = pd.DatetimeIndex(["NaT"] * 9, tz=tz)
# FIXME: fails with transpose=True due to tz-aware DataFrame
# transpose bug
obj = tm.box_expected(dti, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
other - obj
def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = pd.date_range("2016-01-01", periods=3, tz=tz)
tdi = pd.TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = pd.date_range("2015-12-31", periods=3, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + tdarr
tm.assert_equal(result, expected)
result = tdarr + dtarr
tm.assert_equal(result, expected)
expected = pd.date_range("2016-01-02", periods=3, tz=tz)
expected = tm.box_expected(expected, box_with_array)
result = dtarr - tdarr
tm.assert_equal(result, expected)
msg = "cannot subtract|(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
tdarr - dtarr
# -----------------------------------------------------------------
# Subtraction of datetime-like scalars
@pytest.mark.parametrize(
"ts",
[
pd.Timestamp("2013-01-01"),
pd.Timestamp("2013-01-01").to_pydatetime(),
pd.Timestamp("2013-01-01").to_datetime64(),
],
)
def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
# GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype
idx = pd.date_range("2013-01-01", periods=3)
idx = tm.box_expected(idx, box_with_array)
expected = pd.TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = idx - ts
tm.assert_equal(result, expected)
def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):
# GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano
# for DataFrame operation
dt64 = np.datetime64("2013-01-01")
assert dt64.dtype == "datetime64[D]"
dti = pd.date_range("20130101", periods=3)
dtarr = tm.box_expected(dti, box_with_array)
expected = pd.TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = dtarr - dt64
tm.assert_equal(result, expected)
result = dt64 - dtarr
tm.assert_equal(result, -expected)
def test_dt64arr_sub_timestamp(self, box_with_array):
ser = pd.date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern")
ts = ser[0]
ser = tm.box_expected(ser, box_with_array)
delta_series = pd.Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
expected = tm.box_expected(delta_series, box_with_array)
tm.assert_equal(ser - ts, expected)
tm.assert_equal(ts - ser, -expected)
def test_dt64arr_sub_NaT(self, box_with_array):
# GH#18808
dti = pd.DatetimeIndex([pd.NaT, pd.Timestamp("19900315")])
ser = tm.box_expected(dti, box_with_array)
result = ser - pd.NaT
expected = pd.Series([pd.NaT, pd.NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
dti_tz = dti.tz_localize("Asia/Tokyo")
ser_tz = tm.box_expected(dti_tz, box_with_array)
result = ser_tz - pd.NaT
expected = pd.Series([pd.NaT, pd.NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
# -------------------------------------------------------------
# Subtraction of datetime-like array-like
def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):
dti = pd.date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
expected = dti - dti
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
warn = PerformanceWarning if box_with_array is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
result = obj - obj.astype(object)
tm.assert_equal(result, expected)
def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):
dti = pd.date_range("2016-01-01", periods=3, tz=None)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
expected = dtarr - dtarr
result = dtarr - dt64vals
tm.assert_equal(result, expected)
result = dt64vals - dtarr
tm.assert_equal(result, expected)
def test_dt64arr_aware_sub_dt64ndarray_raises(
self, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = pd.date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dtarr - dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals - dtarr
# -------------------------------------------------------------
# Addition of datetime-like others (invalid)
def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = pd.date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "cannot add"
with pytest.raises(TypeError, match=msg):
dtarr + dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals + dtarr
def test_dt64arr_add_timestamp_raises(self, box_with_array):
# GH#22163 ensure DataFrame doesn't cast Timestamp to i8
idx = DatetimeIndex(["2011-01-01", "2011-01-02"])
idx = tm.box_expected(idx, box_with_array)
msg = "cannot add"
with pytest.raises(TypeError, match=msg):
idx + Timestamp("2011-01-01")
with pytest.raises(TypeError, match=msg):
Timestamp("2011-01-01") + idx
# -------------------------------------------------------------
# Other Invalid Addition/Subtraction
@pytest.mark.parametrize(
"other",
[
3.14,
np.array([2.0, 3.0]),
# GH#13078 datetime +/- Period is invalid
pd.Period("2011-01-01", freq="D"),
],
)
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
dtarr = tm.box_expected(dti, box_with_array)
msg = "|".join(
[
"unsupported operand type",
"cannot (add|subtract)",
"cannot use operands with types",
"ufunc '?(add|subtract)'? cannot use operands with types",
]
)
assert_invalid_addsub_type(dtarr, other, msg)
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_parr(
self, dti_freq, pi_freq, box_with_array, box_with_array2
):
# GH#20049 subtracting PeriodIndex should raise TypeError
dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
pi = dti.to_period(pi_freq)
dtarr = tm.box_expected(dti, box_with_array)
parr = tm.box_expected(pi, box_with_array2)
msg = "|".join(
[
"cannot (add|subtract)",
"unsupported operand",
"descriptor.*requires",
"ufunc.*cannot use operands",
]
)
assert_invalid_addsub_type(dtarr, parr, msg)
class TestDatetime64DateOffsetArithmetic:
# -------------------------------------------------------------
# Tick DateOffsets
# TODO: parametrize over timezone?
def test_dt64arr_series_add_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:05"), Timestamp("20130101 9:02:05")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser + pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:00:55"), Timestamp("20130101 9:01:55")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser - pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = -pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
pd.offsets.Second(5) - ser
@pytest.mark.parametrize(
"cls_name", ["Day", "Hour", "Minute", "Second", "Milli", "Micro", "Nano"]
)
def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array):
# GH#4532
# smoke tests for valid DateOffsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
ser = tm.box_expected(ser, box_with_array)
offset_cls = getattr(pd.offsets, cls_name)
ser + offset_cls(5)
offset_cls(5) + ser
ser - offset_cls(5)
def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array):
# GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype
tz = tz_aware_fixture
if tz == "US/Pacific":
dates = date_range("2012-11-01", periods=3, tz=tz)
offset = dates + pd.offsets.Hour(5)
assert dates[0] + pd.offsets.Hour(5) == offset[0]
dates = date_range("2010-11-01 00:00", periods=3, tz=tz, freq="H")
expected = DatetimeIndex(
["2010-11-01 05:00", "2010-11-01 06:00", "2010-11-01 07:00"],
freq="H",
tz=tz,
)
dates = tm.box_expected(dates, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# TODO: parametrize over the scalar being added? radd? sub?
offset = dates + pd.offsets.Hour(5)
tm.assert_equal(offset, expected)
offset = dates + np.timedelta64(5, "h")
tm.assert_equal(offset, expected)
offset = dates + timedelta(hours=5)
tm.assert_equal(offset, expected)
# -------------------------------------------------------------
# RelativeDelta DateOffsets
def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):
# GH#10699
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.squeeze() if box_with_array is pd.DataFrame else vec
# DateOffset relativedelta fastpath
relative_kwargs = [
("years", 2),
("months", 5),
("days", 3),
("hours", 5),
("minutes", 10),
("seconds", 2),
("microseconds", 5),
]
for i, kwd in enumerate(relative_kwargs):
off = pd.DateOffset(**dict([kwd]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
off = pd.DateOffset(**dict(relative_kwargs[: i + 1]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
off - vec
# -------------------------------------------------------------
# Non-Tick, Non-RelativeDelta DateOffsets
# TODO: redundant with test_dt64arr_add_sub_DateOffset? that includes
# tz-aware cases which this does not
@pytest.mark.parametrize(
"cls_and_kwargs",
[
"YearBegin",
("YearBegin", {"month": 5}),
"YearEnd",
("YearEnd", {"month": 5}),
"MonthBegin",
"MonthEnd",
"SemiMonthEnd",
"SemiMonthBegin",
"Week",
("Week", {"weekday": 3}),
"Week",
("Week", {"weekday": 6}),
"BusinessDay",
"BDay",
"QuarterEnd",
"QuarterBegin",
"CustomBusinessDay",
"CDay",
"CBMonthEnd",
"CBMonthBegin",
"BMonthBegin",
"BMonthEnd",
"BusinessHour",
"BYearBegin",
"BYearEnd",
"BQuarterBegin",
("LastWeekOfMonth", {"weekday": 2}),
(
"FY5253Quarter",
{
"qtr_with_extra_week": 1,
"startingMonth": 1,
"weekday": 2,
"variation": "nearest",
},
),
("FY5253", {"weekday": 0, "startingMonth": 2, "variation": "nearest"}),
("WeekOfMonth", {"weekday": 2, "week": 2}),
"Easter",
("DateOffset", {"day": 4}),
("DateOffset", {"month": 5}),
],
)
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("n", [0, 5])
def test_dt64arr_add_sub_DateOffsets(
self, box_with_array, n, normalize, cls_and_kwargs
):
# GH#10699
# assert vectorized operation matches pointwise operations
if isinstance(cls_and_kwargs, tuple):
# If cls_name param is a tuple, then 2nd entry is kwargs for
# the offset constructor
cls_name, kwargs = cls_and_kwargs
else:
cls_name = cls_and_kwargs
kwargs = {}
if n == 0 and cls_name in [
"WeekOfMonth",
"LastWeekOfMonth",
"FY5253Quarter",
"FY5253",
]:
# passing n = 0 is invalid for these offset classes
return
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.squeeze() if box_with_array is pd.DataFrame else vec
offset_cls = getattr(pd.offsets, cls_name)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
offset = offset_cls(n, normalize=normalize, **kwargs)
expected = DatetimeIndex([x + offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + offset)
expected = DatetimeIndex([x - offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - offset)
expected = DatetimeIndex([offset + x for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, offset + vec)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
offset - vec
def test_dt64arr_add_sub_DateOffset(self, box_with_array):
# GH#10699
s = date_range("2000-01-01", "2000-01-31", name="a")
s = tm.box_expected(s, box_with_array)
result = s + pd.DateOffset(years=1)
result2 = pd.DateOffset(years=1) + s
exp = date_range("2001-01-01", "2001-01-31", name="a")
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
result = s - pd.DateOffset(years=1)
exp = date_range("1999-01-01", "1999-01-31", name="a")
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-16 00:15:00", tz="US/Central"),
Timestamp("2000-02-16", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-31 00:15:00", tz="US/Central"),
Timestamp("2000-02-29", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
# TODO: __sub__, __rsub__
def test_dt64arr_add_mixed_offset_array(self, box_with_array):
# GH#10699
# array of offsets
s = DatetimeIndex([Timestamp("2000-1-1"), Timestamp("2000-2-1")])
s = tm.box_expected(s, box_with_array)
warn = None if box_with_array is pd.DataFrame else PerformanceWarning
with tm.assert_produces_warning(warn):
other = pd.Index([pd.offsets.DateOffset(years=1), pd.offsets.MonthEnd()])
other = tm.box_expected(other, box_with_array)
result = s + other
exp = DatetimeIndex([Timestamp("2001-1-1"), Timestamp("2000-2-29")])
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
# same offset
other = pd.Index(
[pd.offsets.DateOffset(years=1), pd.offsets.DateOffset(years=1)]
)
other = tm.box_expected(other, box_with_array)
result = s + other
exp = DatetimeIndex([Timestamp("2001-1-1"), Timestamp("2001-2-1")])
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
# TODO: overlap with test_dt64arr_add_mixed_offset_array?
def test_dt64arr_add_sub_offset_ndarray(self, tz_naive_fixture, box_with_array):
# GH#18849
tz = tz_naive_fixture
dti = pd.date_range("2017-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
warn = None if box_with_array is pd.DataFrame else PerformanceWarning
with tm.assert_produces_warning(warn):
res = dtarr + other
expected = DatetimeIndex(
[dti[n] + other[n] for n in range(len(dti))], name=dti.name, freq="infer"
)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(res, expected)
with tm.assert_produces_warning(warn):
res2 = other + dtarr
tm.assert_equal(res2, expected)
with tm.assert_produces_warning(warn):
res = dtarr - other
expected = DatetimeIndex(
[dti[n] - other[n] for n in range(len(dti))], name=dti.name, freq="infer"
)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(res, expected)
@pytest.mark.parametrize(
"op, offset, exp, exp_freq",
[
(
"__add__",
pd.DateOffset(months=3, days=10),
[
Timestamp("2014-04-11"),
Timestamp("2015-04-11"),
Timestamp("2016-04-11"),
Timestamp("2017-04-11"),
],
None,
),
(
"__add__",
pd.DateOffset(months=3),
[
Timestamp("2014-04-01"),
Timestamp("2015-04-01"),
Timestamp("2016-04-01"),
Timestamp("2017-04-01"),
],
"AS-APR",
),
(
"__sub__",
pd.DateOffset(months=3, days=10),
[
Timestamp("2013-09-21"),
Timestamp("2014-09-21"),
Timestamp("2015-09-21"),
Timestamp("2016-09-21"),
],
None,
),
(
"__sub__",
pd.DateOffset(months=3),
[
Timestamp("2013-10-01"),
Timestamp("2014-10-01"),
Timestamp("2015-10-01"),
Timestamp("2016-10-01"),
],
"AS-OCT",
),
],
)
def test_dti_add_sub_nonzero_mth_offset(
self, op, offset, exp, exp_freq, tz_aware_fixture, box_with_array
):
# GH 26258
tz = tz_aware_fixture
date = date_range(start="01 Jan 2014", end="01 Jan 2017", freq="AS", tz=tz)
date = tm.box_expected(date, box_with_array, False)
mth = getattr(date, op)
result = mth(offset)
expected = pd.DatetimeIndex(exp, tz=tz, freq=exp_freq)
expected = tm.box_expected(expected, box_with_array, False)
tm.assert_equal(result, expected)
class TestDatetime64OverflowHandling:
# TODO: box + de-duplicate
def test_dt64_overflow_masking(self, box_with_array):
# GH#25317
left = Series([Timestamp("1969-12-31")])
right = Series([NaT])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
expected = TimedeltaIndex([NaT])
expected = tm.box_expected(expected, box_with_array)
result = left - right
tm.assert_equal(result, expected)
def test_dt64_series_arith_overflow(self):
# GH#12534, fixed by GH#19024
dt = pd.Timestamp("1700-01-31")
td = pd.Timedelta("20000 Days")
dti = pd.date_range("1949-09-30", freq="100Y", periods=4)
ser = pd.Series(dti)
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
ser - dt
with pytest.raises(OverflowError, match=msg):
dt - ser
with pytest.raises(OverflowError, match=msg):
ser + td
with pytest.raises(OverflowError, match=msg):
td + ser
ser.iloc[-1] = pd.NaT
expected = pd.Series(
["2004-10-03", "2104-10-04", "2204-10-04", "NaT"], dtype="datetime64[ns]"
)
res = ser + td
tm.assert_series_equal(res, expected)
res = td + ser
tm.assert_series_equal(res, expected)
ser.iloc[1:] = pd.NaT
expected = pd.Series(
["91279 Days", "NaT", "NaT", "NaT"], dtype="timedelta64[ns]"
)
res = ser - dt
tm.assert_series_equal(res, expected)
res = dt - ser
tm.assert_series_equal(res, -expected)
def test_datetimeindex_sub_timestamp_overflow(self):
dtimax = pd.to_datetime(["now", pd.Timestamp.max])
dtimin = pd.to_datetime(["now", pd.Timestamp.min])
tsneg = Timestamp("1950-01-01")
ts_neg_variants = [
tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype("datetime64[ns]"),
tsneg.to_datetime64().astype("datetime64[D]"),
]
tspos = Timestamp("1980-01-01")
ts_pos_variants = [
tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype("datetime64[ns]"),
tspos.to_datetime64().astype("datetime64[D]"),
]
msg = "Overflow in int64 addition"
for variant in ts_neg_variants:
with pytest.raises(OverflowError, match=msg):
dtimax - variant
expected = pd.Timestamp.max.value - tspos.value
for variant in ts_pos_variants:
res = dtimax - variant
assert res[1].value == expected
expected = pd.Timestamp.min.value - tsneg.value
for variant in ts_neg_variants:
res = dtimin - variant
assert res[1].value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError, match=msg):
dtimin - variant
def test_datetimeindex_sub_datetimeindex_overflow(self):
# GH#22492, GH#22508
dtimax = pd.to_datetime(["now", pd.Timestamp.max])
dtimin = pd.to_datetime(["now", pd.Timestamp.min])
ts_neg = pd.to_datetime(["1950-01-01", "1950-01-01"])
ts_pos = pd.to_datetime(["1980-01-01", "1980-01-01"])
# General tests
expected = pd.Timestamp.max.value - ts_pos[1].value
result = dtimax - ts_pos
assert result[1].value == expected
expected = pd.Timestamp.min.value - ts_neg[1].value
result = dtimin - ts_neg
assert result[1].value == expected
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
dtimax - ts_neg
with pytest.raises(OverflowError, match=msg):
dtimin - ts_pos
# Edge cases
tmin = pd.to_datetime([pd.Timestamp.min])
t1 = tmin + pd.Timedelta.max + pd.Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
t1 - tmin
tmax = pd.to_datetime([pd.Timestamp.max])
t2 = tmax + pd.Timedelta.min - pd.Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
tmax - t2
class TestTimestampSeriesArithmetic:
def test_empty_series_add_sub(self):
# GH#13844
a = Series(dtype="M8[ns]")
b = Series(dtype="m8[ns]")
tm.assert_series_equal(a, a + b)
tm.assert_series_equal(a, a - b)
tm.assert_series_equal(a, b + a)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
b - a
def test_operators_datetimelike(self):
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[
pd.Timestamp("20111230"),
pd.Timestamp("20120101"),
pd.Timestamp("20120103"),
]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[
pd.Timestamp("20111231"),
pd.Timestamp("20120102"),
pd.Timestamp("20120104"),
]
)
dt1 - dt2
dt2 - dt1
# datetime64 with timetimedelta
dt1 + td1
td1 + dt1
dt1 - td1
# timetimedelta with datetime64
td1 + dt1
dt1 + td1
def test_dt64ser_sub_datetime_dtype(self):
ts = Timestamp(datetime(1993, 1, 7, 13, 30, 00))
dt = datetime(1993, 6, 22, 13, 30)
ser = Series([ts])
result = pd.to_timedelta(np.abs(ser - dt))
assert result.dtype == "timedelta64[ns]"
# -------------------------------------------------------------
# TODO: This next block of tests came from tests.series.test_operators,
# needs to be de-duplicated and parametrized over `box` classes
def test_operators_datetimelike_invalid(self, all_arithmetic_operators):
# these are all TypeEror ops
op_str = all_arithmetic_operators
def check(get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
op = getattr(get_ser, op_str, None)
# Previously, _validate_for_numeric_binop in core/indexes/base.py
# did this for us.
with pytest.raises(
TypeError, match="operate|[cC]annot|unsupported operand"
):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[Timestamp("20111230"), Timestamp("20120101"), Timestamp("20120103")]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[Timestamp("20111231"), Timestamp("20120102"), Timestamp("20120104")]
)
if op_str not in ["__sub__", "__rsub__"]:
check(dt1, dt2)
# ## datetime64 with timetimedelta ###
# TODO(jreback) __rsub__ should raise?
if op_str not in ["__add__", "__radd__", "__sub__"]:
check(dt1, td1)
# 8260, 10763
# datetime64 with tz
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
if op_str not in ["__add__", "__radd__", "__sub__", "__rsub__"]:
check(dt2, td2)
def test_sub_single_tz(self):
# GH#12290
s1 = Series([pd.Timestamp("2016-02-10", tz="America/Sao_Paulo")])
s2 = Series([pd.Timestamp("2016-02-08", tz="America/Sao_Paulo")])
result = s1 - s2
expected = Series([Timedelta("2days")])
tm.assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta("-2days")])
tm.assert_series_equal(result, expected)
def test_dt64tz_series_sub_dtitz(self):
# GH#19071 subtracting tzaware DatetimeIndex from tzaware Series
# (with same tz) raises, fixed by #19024
dti = pd.date_range("1999-09-30", periods=10, tz="US/Pacific")
ser = pd.Series(dti)
expected = pd.Series(pd.TimedeltaIndex(["0days"] * 10))
res = dti - ser
tm.assert_series_equal(res, expected)
res = ser - dti
tm.assert_series_equal(res, expected)
def test_sub_datetime_compat(self):
# see GH#14088
s = Series([datetime(2016, 8, 23, 12, tzinfo=pytz.utc), pd.NaT])
dt = datetime(2016, 8, 22, 12, tzinfo=pytz.utc)
exp = Series([Timedelta("1 days"), pd.NaT])
tm.assert_series_equal(s - dt, exp)
tm.assert_series_equal(s - Timestamp(dt), exp)
def test_dt64_series_add_mixed_tick_DateOffset(self):
# GH#4532
# operate with pd.offsets
s = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series(
[Timestamp("20130101 9:06:00.005"), Timestamp("20130101 9:07:00.005")]
)
tm.assert_series_equal(result, expected)
def test_datetime64_ops_nat(self):
# GH#11349
datetime_series = Series([NaT, Timestamp("19900315")])
nat_series_dtype_timestamp = Series([NaT, NaT], dtype="datetime64[ns]")
single_nat_dtype_datetime = Series([NaT], dtype="datetime64[ns]")
# subtraction
tm.assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
msg = "Unary negative expects"
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + datetime_series
tm.assert_series_equal(
-NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
# addition
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
# -------------------------------------------------------------
# Invalid Operations
# TODO: this block also needs to be de-duplicated and parametrized
@pytest.mark.parametrize(
"dt64_series",
[
Series([Timestamp("19900315"), Timestamp("19900315")]),
Series([pd.NaT, Timestamp("19900315")]),
Series([pd.NaT, pd.NaT], dtype="datetime64[ns]"),
],
)
@pytest.mark.parametrize("one", [1, 1.0, np.array(1)])
def test_dt64_mul_div_numeric_invalid(self, one, dt64_series):
# multiplication
msg = "cannot perform .* with this index type"
with pytest.raises(TypeError, match=msg):
dt64_series * one
with pytest.raises(TypeError, match=msg):
one * dt64_series
# division
with pytest.raises(TypeError, match=msg):
dt64_series / one
with pytest.raises(TypeError, match=msg):
one / dt64_series
# TODO: parametrize over box
@pytest.mark.parametrize("op", ["__add__", "__radd__", "__sub__", "__rsub__"])
@pytest.mark.parametrize("tz", [None, "Asia/Tokyo"])
def test_dt64_series_add_intlike(self, tz, op):
# GH#19123
dti = pd.DatetimeIndex(["2016-01-02", "2016-02-03", "NaT"], tz=tz)
ser = Series(dti)
other = Series([20, 30, 40], dtype="uint8")
method = getattr(ser, op)
msg = "|".join(
[
"Addition/subtraction of integers and integer-arrays",
"cannot subtract .* from ndarray",
]
)
with pytest.raises(TypeError, match=msg):
method(1)
with pytest.raises(TypeError, match=msg):
method(other)
with pytest.raises(TypeError, match=msg):
method(np.array(other))
with pytest.raises(TypeError, match=msg):
method(pd.Index(other))
# -------------------------------------------------------------
# Timezone-Centric Tests
def test_operators_datetimelike_with_timezones(self):
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
td1[0] - dt1
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
with pytest.raises(TypeError, match=msg):
td2[0] - dt2
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
msg = "cannot (add|subtract)"
with pytest.raises(TypeError, match=msg):
td1 - dt1
with pytest.raises(TypeError, match=msg):
td2 - dt2
class TestDatetimeIndexArithmetic:
# -------------------------------------------------------------
# Binary operations DatetimeIndex and int
def test_dti_addsub_int(self, tz_naive_fixture, one):
# Variants of `one` for #19012
tz = tz_naive_fixture
rng = pd.date_range("2000-01-01 09:00", freq="H", periods=10, tz=tz)
msg = "Addition/subtraction of integers"
with pytest.raises(TypeError, match=msg):
rng + one
with pytest.raises(TypeError, match=msg):
rng += one
with pytest.raises(TypeError, match=msg):
rng - one
with pytest.raises(TypeError, match=msg):
rng -= one
# -------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize("freq", ["H", "D"])
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_tick(self, int_holder, freq):
# GH#19959
dti = pd.date_range("2016-01-01", periods=2, freq=freq)
other = int_holder([4, -1])
msg = "Addition/subtraction of integers|cannot subtract DatetimeArray from"
assert_invalid_addsub_type(dti, other, msg)
@pytest.mark.parametrize("freq", ["W", "M", "MS", "Q"])
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_non_tick(self, int_holder, freq):
# GH#19959
dti = pd.date_range("2016-01-01", periods=2, freq=freq)
other = int_holder([4, -1])
msg = "Addition/subtraction of integers|cannot subtract DatetimeArray from"
assert_invalid_addsub_type(dti, other, msg)
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_no_freq(self, int_holder):
# GH#19959
dti = pd.DatetimeIndex(["2016-01-01", "NaT", "2017-04-05 06:07:08"])
other = int_holder([9, 4, -1])
msg = "|".join(
["cannot subtract DatetimeArray from", "Addition/subtraction of integers"]
)
assert_invalid_addsub_type(dti, other, msg)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and TimedeltaIndex/array
def test_dti_add_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = pd.date_range("2017-01-01", periods=10, tz=tz)
# add with TimdeltaIndex
result = dti + tdi
tm.assert_index_equal(result, expected)
result = tdi + dti
tm.assert_index_equal(result, expected)
# add with timedelta64 array
result = dti + tdi.values
tm.assert_index_equal(result, expected)
result = tdi.values + dti
tm.assert_index_equal(result, expected)
def test_dti_iadd_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = pd.date_range("2017-01-01", periods=10, tz=tz)
# iadd with TimdeltaIndex
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result += tdi
tm.assert_index_equal(result, expected)
result = pd.timedelta_range("0 days", periods=10)
result += dti
tm.assert_index_equal(result, expected)
# iadd with timedelta64 array
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result += tdi.values
tm.assert_index_equal(result, expected)
result = pd.timedelta_range("0 days", periods=10)
result += dti
tm.assert_index_equal(result, expected)
def test_dti_sub_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = pd.date_range("2017-01-01", periods=10, tz=tz, freq="-1D")
# sub with TimedeltaIndex
result = dti - tdi
tm.assert_index_equal(result, expected)
msg = "cannot subtract .*TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dti
# sub with timedelta64 array
result = dti - tdi.values
tm.assert_index_equal(result, expected)
msg = "cannot subtract DatetimeArray from"
with pytest.raises(TypeError, match=msg):
tdi.values - dti
def test_dti_isub_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = pd.date_range("2017-01-01", periods=10, tz=tz, freq="-1D")
# isub with TimedeltaIndex
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result -= tdi
tm.assert_index_equal(result, expected)
msg = "cannot subtract .* from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi -= dti
# isub with timedelta64 array
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result -= tdi.values
tm.assert_index_equal(result, expected)
msg = "|".join(
[
"cannot perform __neg__ with this index type:",
"ufunc subtract cannot use operands with types",
"cannot subtract DatetimeArray from",
]
)
with pytest.raises(TypeError, match=msg):
tdi.values -= dti
# -------------------------------------------------------------
# Binary Operations DatetimeIndex and datetime-like
# TODO: A couple other tests belong in this section. Move them in
# A PR where there isn't already a giant diff.
@pytest.mark.parametrize(
"addend",
[
datetime(2011, 1, 1),
DatetimeIndex(["2011-01-01", "2011-01-02"]),
DatetimeIndex(["2011-01-01", "2011-01-02"]).tz_localize("US/Eastern"),
np.datetime64("2011-01-01"),
Timestamp("2011-01-01"),
],
ids=lambda x: type(x).__name__,
)
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
def test_add_datetimelike_and_dtarr(self, box_with_array, addend, tz):
# GH#9631
dti = DatetimeIndex(["2011-01-01", "2011-01-02"]).tz_localize(tz)
dtarr = tm.box_expected(dti, box_with_array)
msg = "cannot add DatetimeArray and"
with pytest.raises(TypeError, match=msg):
dtarr + addend
with pytest.raises(TypeError, match=msg):
addend + dtarr
# -------------------------------------------------------------
def test_dta_add_sub_index(self, tz_naive_fixture):
# Check that DatetimeArray defers to Index classes
dti = date_range("20130101", periods=3, tz=tz_naive_fixture)
dta = dti.array
result = dta - dti
expected = dti - dti
tm.assert_index_equal(result, expected)
tdi = result
result = dta + tdi
expected = dti + tdi
tm.assert_index_equal(result, expected)
result = dta - tdi
expected = dti - tdi
tm.assert_index_equal(result, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range("20130101", periods=3)
dti_tz = date_range("20130101", periods=3).tz_localize("US/Eastern")
dti_tz2 = date_range("20130101", periods=3).tz_localize("UTC")
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
msg = "DatetimeArray subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dti_tz - dti
with pytest.raises(TypeError, match=msg):
dti - dti_tz
with pytest.raises(TypeError, match=msg):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range("20130101", periods=3)
dti2 = date_range("20130101", periods=4)
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(["2012-01-01", np.nan, "2012-01-03"])
dti2 = DatetimeIndex(["2012-01-02", "2012-01-03", np.nan])
expected = TimedeltaIndex(["1 days", np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
# -------------------------------------------------------------------
# TODO: Most of this block is moved from series or frame tests, needs
# cleanup, box-parametrization, and de-duplication
@pytest.mark.parametrize("op", [operator.add, operator.sub])
def test_timedelta64_equal_timedelta_supported_ops(self, op):
ser = Series(
[
Timestamp("20130301"),
Timestamp("20130228 23:00:00"),
Timestamp("20130228 22:00:00"),
Timestamp("20130228 21:00:00"),
]
)
intervals = ["D", "h", "m", "s", "us"]
def timedelta64(*args):
# see casting notes in NumPy gh-12927
return np.sum(list(starmap(np.timedelta64, zip(args, intervals))))
for d, h, m, s, us in product(*([range(2)] * 5)):
nptd = timedelta64(d, h, m, s, us)
pytd = timedelta(days=d, hours=h, minutes=m, seconds=s, microseconds=us)
lhs = op(ser, nptd)
rhs = op(ser, pytd)
tm.assert_series_equal(lhs, rhs)
def test_ops_nat_mixed_datetime64_timedelta64(self):
# GH#11349
timedelta_series = Series([NaT, Timedelta("1s")])
datetime_series = Series([NaT, Timestamp("19900315")])
nat_series_dtype_timedelta = Series([NaT, NaT], dtype="timedelta64[ns]")
nat_series_dtype_timestamp = Series([NaT, NaT], dtype="datetime64[ns]")
single_nat_dtype_datetime = Series([NaT], dtype="datetime64[ns]")
single_nat_dtype_timedelta = Series([NaT], dtype="timedelta64[ns]")
# subtraction
tm.assert_series_equal(
datetime_series - single_nat_dtype_datetime, nat_series_dtype_timedelta
)
tm.assert_series_equal(
datetime_series - single_nat_dtype_timedelta, nat_series_dtype_timestamp
)
tm.assert_series_equal(
-single_nat_dtype_timedelta + datetime_series, nat_series_dtype_timestamp
)
# without a Series wrapping the NaT, it is ambiguous
# whether it is a datetime64 or timedelta64
# defaults to interpreting it as timedelta64
tm.assert_series_equal(
nat_series_dtype_timestamp - single_nat_dtype_datetime,
nat_series_dtype_timedelta,
)
tm.assert_series_equal(
nat_series_dtype_timestamp - single_nat_dtype_timedelta,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
-single_nat_dtype_timedelta + nat_series_dtype_timestamp,
nat_series_dtype_timestamp,
)
msg = "cannot subtract a datelike"
with pytest.raises(TypeError, match=msg):
timedelta_series - single_nat_dtype_datetime
# addition
tm.assert_series_equal(
nat_series_dtype_timestamp + single_nat_dtype_timedelta,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
single_nat_dtype_timedelta + nat_series_dtype_timestamp,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
nat_series_dtype_timestamp + single_nat_dtype_timedelta,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
single_nat_dtype_timedelta + nat_series_dtype_timestamp,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
nat_series_dtype_timedelta + single_nat_dtype_datetime,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
single_nat_dtype_datetime + nat_series_dtype_timedelta,
nat_series_dtype_timestamp,
)
def test_ufunc_coercions(self):
idx = date_range("2011-01-01", periods=3, freq="2D", name="x")
delta = np.timedelta64(1, "D")
exp = date_range("2011-01-02", periods=3, freq="2D", name="x")
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
tm.assert_index_equal(result, exp)
assert result.freq == "2D"
exp = date_range("2010-12-31", periods=3, freq="2D", name="x")
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
tm.assert_index_equal(result, exp)
assert result.freq == "2D"
delta = np.array(
[np.timedelta64(1, "D"), np.timedelta64(2, "D"), np.timedelta64(3, "D")]
)
exp = DatetimeIndex(
["2011-01-02", "2011-01-05", "2011-01-08"], freq="3D", name="x"
)
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
tm.assert_index_equal(result, exp)
assert result.freq == "3D"
exp = DatetimeIndex(
["2010-12-31", "2011-01-01", "2011-01-02"], freq="D", name="x"
)
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
tm.assert_index_equal(result, exp)
assert result.freq == "D"
@pytest.mark.parametrize(
"names", [("foo", None, None), ("baz", "bar", None), ("bar", "bar", "bar")]
)
@pytest.mark.parametrize("tz", [None, "America/Chicago"])
def test_dti_add_series(self, tz, names):
# GH#13905
index = DatetimeIndex(
["2016-06-28 05:30", "2016-06-28 05:31"], tz=tz, name=names[0]
)
ser = Series([Timedelta(seconds=5)] * 2, index=index, name=names[1])
expected = Series(index + Timedelta(seconds=5), index=index, name=names[2])
# passing name arg isn't enough when names[2] is None
expected.name = names[2]
assert expected.dtype == index.dtype
result = ser + index
tm.assert_series_equal(result, expected)
result2 = index + ser
tm.assert_series_equal(result2, expected)
expected = index + Timedelta(seconds=5)
result3 = ser.values + index
tm.assert_index_equal(result3, expected)
result4 = index + ser.values
tm.assert_index_equal(result4, expected)
@pytest.mark.parametrize("op", [operator.add, roperator.radd, operator.sub])
@pytest.mark.parametrize(
"names", [(None, None, None), ("foo", "bar", None), ("foo", "foo", "foo")]
)
def test_dti_addsub_offset_arraylike(
self, tz_naive_fixture, names, op, index_or_series
):
# GH#18849, GH#19744
box = pd.Index
other_box = index_or_series
tz = tz_naive_fixture
dti = pd.date_range("2017-01-01", periods=2, tz=tz, name=names[0])
other = other_box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)], name=names[1])
xbox = get_upcast_box(box, other)
with tm.assert_produces_warning(PerformanceWarning):
res = op(dti, other)
expected = DatetimeIndex(
[op(dti[n], other[n]) for n in range(len(dti))], name=names[2], freq="infer"
)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(res, expected)
@pytest.mark.parametrize("other_box", [pd.Index, np.array])
def test_dti_addsub_object_arraylike(
self, tz_naive_fixture, box_with_array, other_box
):
tz = tz_naive_fixture
dti = pd.date_range("2017-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
other = other_box([pd.offsets.MonthEnd(), pd.Timedelta(days=4)])
xbox = get_upcast_box(box_with_array, other)
expected = pd.DatetimeIndex(["2017-01-31", "2017-01-06"], tz=tz_naive_fixture)
expected = tm.box_expected(expected, xbox)
warn = None if box_with_array is pd.DataFrame else PerformanceWarning
with tm.assert_produces_warning(warn):
result = dtarr + other
tm.assert_equal(result, expected)
expected = pd.DatetimeIndex(["2016-12-31", "2016-12-29"], tz=tz_naive_fixture)
expected = tm.box_expected(expected, xbox)
with tm.assert_produces_warning(warn):
result = dtarr - other
tm.assert_equal(result, expected)
@pytest.mark.parametrize("years", [-1, 0, 1])
@pytest.mark.parametrize("months", [-2, 0, 2])
def test_shift_months(years, months):
dti = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
]
)
actual = DatetimeIndex(shift_months(dti.asi8, years * 12 + months))
raw = [x + pd.offsets.DateOffset(years=years, months=months) for x in dti]
expected = DatetimeIndex(raw)
tm.assert_index_equal(actual, expected)
def test_dt64arr_addsub_object_dtype_2d():
# block-wise DataFrame operations will require operating on 2D
# DatetimeArray/TimedeltaArray, so check that specifically.
dti = pd.date_range("1994-02-13", freq="2W", periods=4)
dta = dti._data.reshape((4, 1))
other = np.array([[pd.offsets.Day(n)] for n in range(4)])
assert other.shape == dta.shape
with tm.assert_produces_warning(PerformanceWarning):
result = dta + other
with tm.assert_produces_warning(PerformanceWarning):
expected = (dta[:, 0] + other[:, 0]).reshape(-1, 1)
assert isinstance(result, DatetimeArray)
assert result.freq is None
tm.assert_numpy_array_equal(result._data, expected._data)
with tm.assert_produces_warning(PerformanceWarning):
# Case where we expect to get a TimedeltaArray back
result2 = dta - dta.astype(object)
assert isinstance(result2, TimedeltaArray)
assert result2.shape == (4, 1)
assert result2.freq is None
assert (result2.asi8 == 0).all()
| 36.473965 | 88 | 0.584364 |
from datetime import datetime, timedelta
from itertools import product, starmap
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.compat.numpy import np_datetime64_compat
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import DatetimeArray, TimedeltaArray
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
class TestDatetime64ArrayLikeComparisons:
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
box = box_with_array
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(self, other, tz_naive_fixture):
# other plays poorly with assert_invalid_comparison reversed checks
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
assert_invalid_comparison(dta, other, tm.to_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], pd.Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
xbox = box if box is not pd.Index else np.ndarray
ts = pd.Timestamp.now(tz)
ser = pd.Series([ts, pd.NaT])
# FIXME: Can't transpose because that loses the tz dtype on
obj = tm.box_expected(ser, box, transpose=False)
expected = pd.Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox, transpose=False)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
@pytest.mark.parametrize(
"pair",
[
(
[pd.Timestamp("2011-01-01"), NaT, pd.Timestamp("2011-01-03")],
[NaT, NaT, pd.Timestamp("2011-01-03")],
),
(
[pd.Timedelta("1 days"), NaT, pd.Timedelta("3 days")],
[NaT, NaT, pd.Timedelta("3 days")],
),
(
[pd.Period("2011-01", freq="M"), NaT, pd.Period("2011-03", freq="M")],
[NaT, NaT, pd.Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons(self, dtype, index_or_series, reverse, pair):
box = index_or_series
l, r = pair
if reverse:
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
expected = Series([False, False, True])
tm.assert_series_equal(left == right, expected)
expected = Series([True, True, False])
tm.assert_series_equal(left != right, expected)
expected = Series([False, False, False])
tm.assert_series_equal(left < right, expected)
expected = Series([False, False, False])
tm.assert_series_equal(left > right, expected)
expected = Series([False, False, True])
tm.assert_series_equal(left >= right, expected)
expected = Series([False, False, True])
tm.assert_series_equal(left <= right, expected)
def test_comparison_invalid(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
ser = Series(range(5))
ser2 = Series(pd.date_range("20010101", periods=5, tz=tz))
ser = tm.box_expected(ser, box_with_array)
ser2 = tm.box_expected(ser2, box_with_array)
assert_invalid_comparison(ser, ser2, box_with_array)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
if box_with_array is tm.to_array and dtype is object:
return
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box_with_array)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
ser = pd.Series(pd.date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = pd.Timestamp("nat")
ser[3] = pd.Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
expected = left_f(ser, pd.Timestamp("20010109"))
result = right_f(pd.Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
expected = left_f(ser, pd.Timestamp("nat"))
result = right_f(pd.Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
expected = left_f(s_nat, pd.Timestamp("20010109"))
result = right_f(pd.Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
expected = left_f(s_nat, pd.Timestamp("nat"))
result = right_f(pd.Timestamp("nat"), s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
ser = pd.Series([pd.Timestamp("2000-01-29 01:59:00"), "NaT"])
ser = tm.box_expected(ser, box_with_array)
result = ser != ser
expected = tm.box_expected([False, True], xbox)
tm.assert_equal(result, expected)
result = ser != ser[0]
expected = tm.box_expected([False, True], xbox)
tm.assert_equal(result, expected)
result = ser != ser[1]
expected = tm.box_expected([True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, False], xbox)
tm.assert_equal(result, expected)
result = ser == ser[0]
expected = tm.box_expected([True, False], xbox)
tm.assert_equal(result, expected)
result = ser == ser[1]
expected = tm.box_expected([False, False], xbox)
tm.assert_equal(result, expected)
class TestDatetimeIndexComparisons:
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.gt, operator.lt, operator.ge, operator.le],
)
def test_comparators(self, op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = op(arr, element)
index_result = op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = pd.date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
if box_with_array is tm.to_array and dtype is object:
return
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
left = pd.DatetimeIndex(
[pd.Timestamp("2011-01-01"), pd.NaT, pd.Timestamp("2011-01-03")]
)
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == pd.NaT, expected)
tm.assert_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != pd.NaT, expected)
tm.assert_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < pd.NaT, expected)
tm.assert_equal(pd.NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = pd.DatetimeIndex(
["2014-01-01", pd.NaT, "2014-03-01", pd.NaT, "2014-05-01", "2014-07-01"]
)
didx2 = pd.DatetimeIndex(
["2014-02-01", "2014-03-01", pd.NaT, pd.NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np_datetime64_compat("2014-02-01 00:00Z"),
np_datetime64_compat("2014-03-01 00:00Z"),
np_datetime64_compat("nat"),
np.datetime64("nat"),
np_datetime64_compat("2014-06-01 00:00Z"),
np_datetime64_compat("2014-07-01 00:00Z"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, pd.NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],
)
def test_comparison_tzawareness_compat(self, op, box_df_fail):
box = box_df_fail
dr = pd.date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = tm.box_expected(dz, box)
msg = "Cannot compare tz-naive and tz-aware"
with pytest.raises(TypeError, match=msg):
op(dr, dz)
with pytest.raises(TypeError, match=msg):
op(dr, list(dz))
with pytest.raises(TypeError, match=msg):
op(dr, np.array(list(dz), dtype=object))
with pytest.raises(TypeError, match=msg):
op(dz, dr)
with pytest.raises(TypeError, match=msg):
op(dz, list(dr))
with pytest.raises(TypeError, match=msg):
op(dz, np.array(list(dr), dtype=object))
assert np.all(dr == dr)
assert np.all(dr == list(dr))
assert np.all(list(dr) == dr)
assert np.all(np.array(list(dr), dtype=object) == dr)
assert np.all(dr == np.array(list(dr), dtype=object))
assert np.all(dz == dz)
assert np.all(dz == list(dz))
assert np.all(list(dz) == dz)
assert np.all(np.array(list(dz), dtype=object) == dz)
assert np.all(dz == np.array(list(dz), dtype=object))
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],
)
def test_comparison_tzawareness_compat_scalars(self, op, box_with_array):
dr = pd.date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box_with_array)
dz = tm.box_expected(dz, box_with_array)
ts = pd.Timestamp("2000-03-14 01:59")
ts_tz = pd.Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
assert np.all(dr > ts)
msg = "Cannot compare tz-naive and tz-aware"
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
assert np.all(dz > ts_tz)
with pytest.raises(TypeError, match=msg):
op(dz, ts)
op(ts, dz)
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],
)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
self, op, other, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = pd.date_range("2016-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
msg = "Cannot compare tz-naive and tz-aware"
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le],
)
def test_nat_comparison_tzawareness(self, op):
dti = pd.DatetimeIndex(
["2014-01-01", pd.NaT, "2014-03-01", pd.NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, pd.NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), pd.NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
pd.timedelta_range("1D", periods=10),
pd.timedelta_range("1D", periods=10).to_series(),
pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.astype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
msg = "Cannot compare tz-naive and tz-aware"
with pytest.raises(TypeError, match=msg):
# tzawareness failure
dti != other
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = "Cannot compare type"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
def test_dt64arr_add_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = pd.date_range("2000-01-01", "2000-02-01", tz=tz)
expected = pd.date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng + two_hours
tm.assert_equal(result, expected)
def test_dt64arr_iadd_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = pd.date_range("2000-01-01", "2000-02-01", tz=tz)
expected = pd.date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = pd.date_range("2000-01-01", "2000-02-01", tz=tz)
expected = pd.date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng - two_hours
tm.assert_equal(result, expected)
def test_dt64arr_isub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = pd.date_range("2000-01-01", "2000-02-01", tz=tz)
expected = pd.date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
rng -= two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_add_td64_scalar(self, box_with_array):
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
)
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, "s")
tm.assert_equal(result, expected)
result = np.timedelta64(1, "s") + dtarr
tm.assert_equal(result, expected)
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, "ms")
tm.assert_equal(result, expected)
result = np.timedelta64(5, "ms") + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
pd.date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
expected = pd.DatetimeIndex(["NaT"] * 9, tz=tz)
obj = tm.box_expected(dti, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
other - obj
def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = pd.date_range("2016-01-01", periods=3, tz=tz)
tdi = pd.TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = pd.date_range("2015-12-31", periods=3, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + tdarr
tm.assert_equal(result, expected)
result = tdarr + dtarr
tm.assert_equal(result, expected)
expected = pd.date_range("2016-01-02", periods=3, tz=tz)
expected = tm.box_expected(expected, box_with_array)
result = dtarr - tdarr
tm.assert_equal(result, expected)
msg = "cannot subtract|(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
tdarr - dtarr
@pytest.mark.parametrize(
"ts",
[
pd.Timestamp("2013-01-01"),
pd.Timestamp("2013-01-01").to_pydatetime(),
pd.Timestamp("2013-01-01").to_datetime64(),
],
)
def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
expected = pd.TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = idx - ts
tm.assert_equal(result, expected)
def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):
ate_range("20130101", periods=3)
dtarr = tm.box_expected(dti, box_with_array)
expected = pd.TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = dtarr - dt64
tm.assert_equal(result, expected)
result = dt64 - dtarr
tm.assert_equal(result, -expected)
def test_dt64arr_sub_timestamp(self, box_with_array):
ser = pd.date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern")
ts = ser[0]
ser = tm.box_expected(ser, box_with_array)
delta_series = pd.Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
expected = tm.box_expected(delta_series, box_with_array)
tm.assert_equal(ser - ts, expected)
tm.assert_equal(ts - ser, -expected)
def test_dt64arr_sub_NaT(self, box_with_array):
dti = pd.DatetimeIndex([pd.NaT, pd.Timestamp("19900315")])
ser = tm.box_expected(dti, box_with_array)
result = ser - pd.NaT
expected = pd.Series([pd.NaT, pd.NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
dti_tz = dti.tz_localize("Asia/Tokyo")
ser_tz = tm.box_expected(dti_tz, box_with_array)
result = ser_tz - pd.NaT
expected = pd.Series([pd.NaT, pd.NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):
dti = pd.date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
expected = dti - dti
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
warn = PerformanceWarning if box_with_array is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
result = obj - obj.astype(object)
tm.assert_equal(result, expected)
def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):
dti = pd.date_range("2016-01-01", periods=3, tz=None)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
expected = dtarr - dtarr
result = dtarr - dt64vals
tm.assert_equal(result, expected)
result = dt64vals - dtarr
tm.assert_equal(result, expected)
def test_dt64arr_aware_sub_dt64ndarray_raises(
self, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = pd.date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dtarr - dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals - dtarr
def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = pd.date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "cannot add"
with pytest.raises(TypeError, match=msg):
dtarr + dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals + dtarr
def test_dt64arr_add_timestamp_raises(self, box_with_array):
-02"])
idx = tm.box_expected(idx, box_with_array)
msg = "cannot add"
with pytest.raises(TypeError, match=msg):
idx + Timestamp("2011-01-01")
with pytest.raises(TypeError, match=msg):
Timestamp("2011-01-01") + idx
# -------------------------------------------------------------
# Other Invalid Addition/Subtraction
@pytest.mark.parametrize(
"other",
[
3.14,
np.array([2.0, 3.0]),
# GH#13078 datetime +/- Period is invalid
pd.Period("2011-01-01", freq="D"),
],
)
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
dtarr = tm.box_expected(dti, box_with_array)
msg = "|".join(
[
"unsupported operand type",
"cannot (add|subtract)",
"cannot use operands with types",
"ufunc '?(add|subtract)'? cannot use operands with types",
]
)
assert_invalid_addsub_type(dtarr, other, msg)
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_parr(
self, dti_freq, pi_freq, box_with_array, box_with_array2
):
# GH#20049 subtracting PeriodIndex should raise TypeError
dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
pi = dti.to_period(pi_freq)
dtarr = tm.box_expected(dti, box_with_array)
parr = tm.box_expected(pi, box_with_array2)
msg = "|".join(
[
"cannot (add|subtract)",
"unsupported operand",
"descriptor.*requires",
"ufunc.*cannot use operands",
]
)
assert_invalid_addsub_type(dtarr, parr, msg)
class TestDatetime64DateOffsetArithmetic:
# -------------------------------------------------------------
# Tick DateOffsets
# TODO: parametrize over timezone?
def test_dt64arr_series_add_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:05"), Timestamp("20130101 9:02:05")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser + pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:00:55"), Timestamp("20130101 9:01:55")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser - pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = -pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
pd.offsets.Second(5) - ser
@pytest.mark.parametrize(
"cls_name", ["Day", "Hour", "Minute", "Second", "Milli", "Micro", "Nano"]
)
def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array):
# GH#4532
# smoke tests for valid DateOffsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
ser = tm.box_expected(ser, box_with_array)
offset_cls = getattr(pd.offsets, cls_name)
ser + offset_cls(5)
offset_cls(5) + ser
ser - offset_cls(5)
def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array):
# GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype
tz = tz_aware_fixture
if tz == "US/Pacific":
dates = date_range("2012-11-01", periods=3, tz=tz)
offset = dates + pd.offsets.Hour(5)
assert dates[0] + pd.offsets.Hour(5) == offset[0]
dates = date_range("2010-11-01 00:00", periods=3, tz=tz, freq="H")
expected = DatetimeIndex(
["2010-11-01 05:00", "2010-11-01 06:00", "2010-11-01 07:00"],
freq="H",
tz=tz,
)
dates = tm.box_expected(dates, box_with_array)
expected = tm.box_expected(expected, box_with_array)
offset = dates + pd.offsets.Hour(5)
tm.assert_equal(offset, expected)
offset = dates + np.timedelta64(5, "h")
tm.assert_equal(offset, expected)
offset = dates + timedelta(hours=5)
tm.assert_equal(offset, expected)
def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.squeeze() if box_with_array is pd.DataFrame else vec
relative_kwargs = [
("years", 2),
("months", 5),
("days", 3),
("hours", 5),
("minutes", 10),
("seconds", 2),
("microseconds", 5),
]
for i, kwd in enumerate(relative_kwargs):
off = pd.DateOffset(**dict([kwd]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
off = pd.DateOffset(**dict(relative_kwargs[: i + 1]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
off - vec
@pytest.mark.parametrize(
"cls_and_kwargs",
[
"YearBegin",
("YearBegin", {"month": 5}),
"YearEnd",
("YearEnd", {"month": 5}),
"MonthBegin",
"MonthEnd",
"SemiMonthEnd",
"SemiMonthBegin",
"Week",
("Week", {"weekday": 3}),
"Week",
("Week", {"weekday": 6}),
"BusinessDay",
"BDay",
"QuarterEnd",
"QuarterBegin",
"CustomBusinessDay",
"CDay",
"CBMonthEnd",
"CBMonthBegin",
"BMonthBegin",
"BMonthEnd",
"BusinessHour",
"BYearBegin",
"BYearEnd",
"BQuarterBegin",
("LastWeekOfMonth", {"weekday": 2}),
(
"FY5253Quarter",
{
"qtr_with_extra_week": 1,
"startingMonth": 1,
"weekday": 2,
"variation": "nearest",
},
),
("FY5253", {"weekday": 0, "startingMonth": 2, "variation": "nearest"}),
("WeekOfMonth", {"weekday": 2, "week": 2}),
"Easter",
("DateOffset", {"day": 4}),
("DateOffset", {"month": 5}),
],
)
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("n", [0, 5])
def test_dt64arr_add_sub_DateOffsets(
self, box_with_array, n, normalize, cls_and_kwargs
):
if isinstance(cls_and_kwargs, tuple):
cls_name, kwargs = cls_and_kwargs
else:
cls_name = cls_and_kwargs
kwargs = {}
if n == 0 and cls_name in [
"WeekOfMonth",
"LastWeekOfMonth",
"FY5253Quarter",
"FY5253",
]:
return
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.squeeze() if box_with_array is pd.DataFrame else vec
offset_cls = getattr(pd.offsets, cls_name)
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", PerformanceWarning)
offset = offset_cls(n, normalize=normalize, **kwargs)
expected = DatetimeIndex([x + offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + offset)
expected = DatetimeIndex([x - offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - offset)
expected = DatetimeIndex([offset + x for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, offset + vec)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
offset - vec
def test_dt64arr_add_sub_DateOffset(self, box_with_array):
# GH#10699
s = date_range("2000-01-01", "2000-01-31", name="a")
s = tm.box_expected(s, box_with_array)
result = s + pd.DateOffset(years=1)
result2 = pd.DateOffset(years=1) + s
exp = date_range("2001-01-01", "2001-01-31", name="a")
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
result = s - pd.DateOffset(years=1)
exp = date_range("1999-01-01", "1999-01-31", name="a")
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-16 00:15:00", tz="US/Central"),
Timestamp("2000-02-16", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-31 00:15:00", tz="US/Central"),
Timestamp("2000-02-29", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
# TODO: __sub__, __rsub__
def test_dt64arr_add_mixed_offset_array(self, box_with_array):
# GH#10699
# array of offsets
s = DatetimeIndex([Timestamp("2000-1-1"), Timestamp("2000-2-1")])
s = tm.box_expected(s, box_with_array)
warn = None if box_with_array is pd.DataFrame else PerformanceWarning
with tm.assert_produces_warning(warn):
other = pd.Index([pd.offsets.DateOffset(years=1), pd.offsets.MonthEnd()])
other = tm.box_expected(other, box_with_array)
result = s + other
exp = DatetimeIndex([Timestamp("2001-1-1"), Timestamp("2000-2-29")])
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
# same offset
other = pd.Index(
[pd.offsets.DateOffset(years=1), pd.offsets.DateOffset(years=1)]
)
other = tm.box_expected(other, box_with_array)
result = s + other
exp = DatetimeIndex([Timestamp("2001-1-1"), Timestamp("2001-2-1")])
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
# TODO: overlap with test_dt64arr_add_mixed_offset_array?
def test_dt64arr_add_sub_offset_ndarray(self, tz_naive_fixture, box_with_array):
# GH#18849
tz = tz_naive_fixture
dti = pd.date_range("2017-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
warn = None if box_with_array is pd.DataFrame else PerformanceWarning
with tm.assert_produces_warning(warn):
res = dtarr + other
expected = DatetimeIndex(
[dti[n] + other[n] for n in range(len(dti))], name=dti.name, freq="infer"
)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(res, expected)
with tm.assert_produces_warning(warn):
res2 = other + dtarr
tm.assert_equal(res2, expected)
with tm.assert_produces_warning(warn):
res = dtarr - other
expected = DatetimeIndex(
[dti[n] - other[n] for n in range(len(dti))], name=dti.name, freq="infer"
)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(res, expected)
@pytest.mark.parametrize(
"op, offset, exp, exp_freq",
[
(
"__add__",
pd.DateOffset(months=3, days=10),
[
Timestamp("2014-04-11"),
Timestamp("2015-04-11"),
Timestamp("2016-04-11"),
Timestamp("2017-04-11"),
],
None,
),
(
"__add__",
pd.DateOffset(months=3),
[
Timestamp("2014-04-01"),
Timestamp("2015-04-01"),
Timestamp("2016-04-01"),
Timestamp("2017-04-01"),
],
"AS-APR",
),
(
"__sub__",
pd.DateOffset(months=3, days=10),
[
Timestamp("2013-09-21"),
Timestamp("2014-09-21"),
Timestamp("2015-09-21"),
Timestamp("2016-09-21"),
],
None,
),
(
"__sub__",
pd.DateOffset(months=3),
[
Timestamp("2013-10-01"),
Timestamp("2014-10-01"),
Timestamp("2015-10-01"),
Timestamp("2016-10-01"),
],
"AS-OCT",
),
],
)
def test_dti_add_sub_nonzero_mth_offset(
self, op, offset, exp, exp_freq, tz_aware_fixture, box_with_array
):
# GH 26258
tz = tz_aware_fixture
date = date_range(start="01 Jan 2014", end="01 Jan 2017", freq="AS", tz=tz)
date = tm.box_expected(date, box_with_array, False)
mth = getattr(date, op)
result = mth(offset)
expected = pd.DatetimeIndex(exp, tz=tz, freq=exp_freq)
expected = tm.box_expected(expected, box_with_array, False)
tm.assert_equal(result, expected)
class TestDatetime64OverflowHandling:
# TODO: box + de-duplicate
def test_dt64_overflow_masking(self, box_with_array):
# GH#25317
left = Series([Timestamp("1969-12-31")])
right = Series([NaT])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
expected = TimedeltaIndex([NaT])
expected = tm.box_expected(expected, box_with_array)
result = left - right
tm.assert_equal(result, expected)
def test_dt64_series_arith_overflow(self):
# GH#12534, fixed by GH#19024
dt = pd.Timestamp("1700-01-31")
td = pd.Timedelta("20000 Days")
dti = pd.date_range("1949-09-30", freq="100Y", periods=4)
ser = pd.Series(dti)
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
ser - dt
with pytest.raises(OverflowError, match=msg):
dt - ser
with pytest.raises(OverflowError, match=msg):
ser + td
with pytest.raises(OverflowError, match=msg):
td + ser
ser.iloc[-1] = pd.NaT
expected = pd.Series(
["2004-10-03", "2104-10-04", "2204-10-04", "NaT"], dtype="datetime64[ns]"
)
res = ser + td
tm.assert_series_equal(res, expected)
res = td + ser
tm.assert_series_equal(res, expected)
ser.iloc[1:] = pd.NaT
expected = pd.Series(
["91279 Days", "NaT", "NaT", "NaT"], dtype="timedelta64[ns]"
)
res = ser - dt
tm.assert_series_equal(res, expected)
res = dt - ser
tm.assert_series_equal(res, -expected)
def test_datetimeindex_sub_timestamp_overflow(self):
dtimax = pd.to_datetime(["now", pd.Timestamp.max])
dtimin = pd.to_datetime(["now", pd.Timestamp.min])
tsneg = Timestamp("1950-01-01")
ts_neg_variants = [
tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype("datetime64[ns]"),
tsneg.to_datetime64().astype("datetime64[D]"),
]
tspos = Timestamp("1980-01-01")
ts_pos_variants = [
tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype("datetime64[ns]"),
tspos.to_datetime64().astype("datetime64[D]"),
]
msg = "Overflow in int64 addition"
for variant in ts_neg_variants:
with pytest.raises(OverflowError, match=msg):
dtimax - variant
expected = pd.Timestamp.max.value - tspos.value
for variant in ts_pos_variants:
res = dtimax - variant
assert res[1].value == expected
expected = pd.Timestamp.min.value - tsneg.value
for variant in ts_neg_variants:
res = dtimin - variant
assert res[1].value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError, match=msg):
dtimin - variant
def test_datetimeindex_sub_datetimeindex_overflow(self):
# GH#22492, GH#22508
dtimax = pd.to_datetime(["now", pd.Timestamp.max])
dtimin = pd.to_datetime(["now", pd.Timestamp.min])
ts_neg = pd.to_datetime(["1950-01-01", "1950-01-01"])
ts_pos = pd.to_datetime(["1980-01-01", "1980-01-01"])
# General tests
expected = pd.Timestamp.max.value - ts_pos[1].value
result = dtimax - ts_pos
assert result[1].value == expected
expected = pd.Timestamp.min.value - ts_neg[1].value
result = dtimin - ts_neg
assert result[1].value == expected
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
dtimax - ts_neg
with pytest.raises(OverflowError, match=msg):
dtimin - ts_pos
# Edge cases
tmin = pd.to_datetime([pd.Timestamp.min])
t1 = tmin + pd.Timedelta.max + pd.Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
t1 - tmin
tmax = pd.to_datetime([pd.Timestamp.max])
t2 = tmax + pd.Timedelta.min - pd.Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
tmax - t2
class TestTimestampSeriesArithmetic:
def test_empty_series_add_sub(self):
# GH#13844
a = Series(dtype="M8[ns]")
b = Series(dtype="m8[ns]")
tm.assert_series_equal(a, a + b)
tm.assert_series_equal(a, a - b)
tm.assert_series_equal(a, b + a)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
b - a
def test_operators_datetimelike(self):
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[
pd.Timestamp("20111230"),
pd.Timestamp("20120101"),
pd.Timestamp("20120103"),
]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[
pd.Timestamp("20111231"),
pd.Timestamp("20120102"),
pd.Timestamp("20120104"),
]
)
dt1 - dt2
dt2 - dt1
# datetime64 with timetimedelta
dt1 + td1
td1 + dt1
dt1 - td1
# timetimedelta with datetime64
td1 + dt1
dt1 + td1
def test_dt64ser_sub_datetime_dtype(self):
ts = Timestamp(datetime(1993, 1, 7, 13, 30, 00))
dt = datetime(1993, 6, 22, 13, 30)
ser = Series([ts])
result = pd.to_timedelta(np.abs(ser - dt))
assert result.dtype == "timedelta64[ns]"
# -------------------------------------------------------------
# TODO: This next block of tests came from tests.series.test_operators,
# needs to be de-duplicated and parametrized over `box` classes
def test_operators_datetimelike_invalid(self, all_arithmetic_operators):
# these are all TypeEror ops
op_str = all_arithmetic_operators
def check(get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
op = getattr(get_ser, op_str, None)
# Previously, _validate_for_numeric_binop in core/indexes/base.py
# did this for us.
with pytest.raises(
TypeError, match="operate|[cC]annot|unsupported operand"
):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series(
[Timestamp("20111230"), Timestamp("20120101"), Timestamp("20120103")]
)
dt1.iloc[2] = np.nan
dt2 = Series(
[Timestamp("20111231"), Timestamp("20120102"), Timestamp("20120104")]
)
if op_str not in ["__sub__", "__rsub__"]:
check(dt1, dt2)
# ## datetime64 with timetimedelta ###
# TODO(jreback) __rsub__ should raise?
if op_str not in ["__add__", "__radd__", "__sub__"]:
check(dt1, td1)
# 8260, 10763
# datetime64 with tz
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
if op_str not in ["__add__", "__radd__", "__sub__", "__rsub__"]:
check(dt2, td2)
def test_sub_single_tz(self):
# GH#12290
s1 = Series([pd.Timestamp("2016-02-10", tz="America/Sao_Paulo")])
s2 = Series([pd.Timestamp("2016-02-08", tz="America/Sao_Paulo")])
result = s1 - s2
expected = Series([Timedelta("2days")])
tm.assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta("-2days")])
tm.assert_series_equal(result, expected)
def test_dt64tz_series_sub_dtitz(self):
# GH#19071 subtracting tzaware DatetimeIndex from tzaware Series
# (with same tz) raises, fixed by #19024
dti = pd.date_range("1999-09-30", periods=10, tz="US/Pacific")
ser = pd.Series(dti)
expected = pd.Series(pd.TimedeltaIndex(["0days"] * 10))
res = dti - ser
tm.assert_series_equal(res, expected)
res = ser - dti
tm.assert_series_equal(res, expected)
def test_sub_datetime_compat(self):
# see GH#14088
s = Series([datetime(2016, 8, 23, 12, tzinfo=pytz.utc), pd.NaT])
dt = datetime(2016, 8, 22, 12, tzinfo=pytz.utc)
exp = Series([Timedelta("1 days"), pd.NaT])
tm.assert_series_equal(s - dt, exp)
tm.assert_series_equal(s - Timestamp(dt), exp)
def test_dt64_series_add_mixed_tick_DateOffset(self):
# GH#4532
# operate with pd.offsets
s = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series(
[Timestamp("20130101 9:06:00.005"), Timestamp("20130101 9:07:00.005")]
)
tm.assert_series_equal(result, expected)
def test_datetime64_ops_nat(self):
# GH#11349
datetime_series = Series([NaT, Timestamp("19900315")])
nat_series_dtype_timestamp = Series([NaT, NaT], dtype="datetime64[ns]")
single_nat_dtype_datetime = Series([NaT], dtype="datetime64[ns]")
# subtraction
tm.assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
msg = "Unary negative expects"
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + datetime_series
tm.assert_series_equal(
-NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
with pytest.raises(TypeError, match=msg):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
# addition
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
tm.assert_series_equal(
nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp
)
tm.assert_series_equal(
NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp
)
# -------------------------------------------------------------
# Invalid Operations
# TODO: this block also needs to be de-duplicated and parametrized
@pytest.mark.parametrize(
"dt64_series",
[
Series([Timestamp("19900315"), Timestamp("19900315")]),
Series([pd.NaT, Timestamp("19900315")]),
Series([pd.NaT, pd.NaT], dtype="datetime64[ns]"),
],
)
@pytest.mark.parametrize("one", [1, 1.0, np.array(1)])
def test_dt64_mul_div_numeric_invalid(self, one, dt64_series):
# multiplication
msg = "cannot perform .* with this index type"
with pytest.raises(TypeError, match=msg):
dt64_series * one
with pytest.raises(TypeError, match=msg):
one * dt64_series
# division
with pytest.raises(TypeError, match=msg):
dt64_series / one
with pytest.raises(TypeError, match=msg):
one / dt64_series
# TODO: parametrize over box
@pytest.mark.parametrize("op", ["__add__", "__radd__", "__sub__", "__rsub__"])
@pytest.mark.parametrize("tz", [None, "Asia/Tokyo"])
def test_dt64_series_add_intlike(self, tz, op):
# GH#19123
dti = pd.DatetimeIndex(["2016-01-02", "2016-02-03", "NaT"], tz=tz)
ser = Series(dti)
other = Series([20, 30, 40], dtype="uint8")
method = getattr(ser, op)
msg = "|".join(
[
"Addition/subtraction of integers and integer-arrays",
"cannot subtract .* from ndarray",
]
)
with pytest.raises(TypeError, match=msg):
method(1)
with pytest.raises(TypeError, match=msg):
method(other)
with pytest.raises(TypeError, match=msg):
method(np.array(other))
with pytest.raises(TypeError, match=msg):
method(pd.Index(other))
# -------------------------------------------------------------
# Timezone-Centric Tests
def test_operators_datetimelike_with_timezones(self):
tz = "US/Eastern"
dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo")
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="H"))
td2 = td1.copy()
td2.iloc[1] = np.nan
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
td1[0] - dt1
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
with pytest.raises(TypeError, match=msg):
td2[0] - dt2
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
tm.assert_series_equal(result, exp)
msg = "cannot (add|subtract)"
with pytest.raises(TypeError, match=msg):
td1 - dt1
with pytest.raises(TypeError, match=msg):
td2 - dt2
class TestDatetimeIndexArithmetic:
# -------------------------------------------------------------
# Binary operations DatetimeIndex and int
def test_dti_addsub_int(self, tz_naive_fixture, one):
# Variants of `one` for #19012
tz = tz_naive_fixture
rng = pd.date_range("2000-01-01 09:00", freq="H", periods=10, tz=tz)
msg = "Addition/subtraction of integers"
with pytest.raises(TypeError, match=msg):
rng + one
with pytest.raises(TypeError, match=msg):
rng += one
with pytest.raises(TypeError, match=msg):
rng - one
with pytest.raises(TypeError, match=msg):
rng -= one
# -------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize("freq", ["H", "D"])
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_tick(self, int_holder, freq):
# GH#19959
dti = pd.date_range("2016-01-01", periods=2, freq=freq)
other = int_holder([4, -1])
msg = "Addition/subtraction of integers|cannot subtract DatetimeArray from"
assert_invalid_addsub_type(dti, other, msg)
@pytest.mark.parametrize("freq", ["W", "M", "MS", "Q"])
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_non_tick(self, int_holder, freq):
# GH#19959
dti = pd.date_range("2016-01-01", periods=2, freq=freq)
other = int_holder([4, -1])
msg = "Addition/subtraction of integers|cannot subtract DatetimeArray from"
assert_invalid_addsub_type(dti, other, msg)
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
def test_dti_add_intarray_no_freq(self, int_holder):
# GH#19959
dti = pd.DatetimeIndex(["2016-01-01", "NaT", "2017-04-05 06:07:08"])
other = int_holder([9, 4, -1])
msg = "|".join(
["cannot subtract DatetimeArray from", "Addition/subtraction of integers"]
)
assert_invalid_addsub_type(dti, other, msg)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and TimedeltaIndex/array
def test_dti_add_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = pd.date_range("2017-01-01", periods=10, tz=tz)
# add with TimdeltaIndex
result = dti + tdi
tm.assert_index_equal(result, expected)
result = tdi + dti
tm.assert_index_equal(result, expected)
# add with timedelta64 array
result = dti + tdi.values
tm.assert_index_equal(result, expected)
result = tdi.values + dti
tm.assert_index_equal(result, expected)
def test_dti_iadd_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = pd.date_range("2017-01-01", periods=10, tz=tz)
# iadd with TimdeltaIndex
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result += tdi
tm.assert_index_equal(result, expected)
result = pd.timedelta_range("0 days", periods=10)
result += dti
tm.assert_index_equal(result, expected)
# iadd with timedelta64 array
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result += tdi.values
tm.assert_index_equal(result, expected)
result = pd.timedelta_range("0 days", periods=10)
result += dti
tm.assert_index_equal(result, expected)
def test_dti_sub_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = pd.date_range("2017-01-01", periods=10, tz=tz, freq="-1D")
# sub with TimedeltaIndex
result = dti - tdi
tm.assert_index_equal(result, expected)
msg = "cannot subtract .*TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dti
# sub with timedelta64 array
result = dti - tdi.values
tm.assert_index_equal(result, expected)
msg = "cannot subtract DatetimeArray from"
with pytest.raises(TypeError, match=msg):
tdi.values - dti
def test_dti_isub_tdi(self, tz_naive_fixture):
# GH#17558
tz = tz_naive_fixture
dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
tdi = pd.timedelta_range("0 days", periods=10)
expected = pd.date_range("2017-01-01", periods=10, tz=tz, freq="-1D")
# isub with TimedeltaIndex
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result -= tdi
tm.assert_index_equal(result, expected)
msg = "cannot subtract .* from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi -= dti
# isub with timedelta64 array
result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10)
result -= tdi.values
tm.assert_index_equal(result, expected)
msg = "|".join(
[
"cannot perform __neg__ with this index type:",
"ufunc subtract cannot use operands with types",
"cannot subtract DatetimeArray from",
]
)
with pytest.raises(TypeError, match=msg):
tdi.values -= dti
# -------------------------------------------------------------
# Binary Operations DatetimeIndex and datetime-like
# TODO: A couple other tests belong in this section. Move them in
# A PR where there isn't already a giant diff.
@pytest.mark.parametrize(
"addend",
[
datetime(2011, 1, 1),
DatetimeIndex(["2011-01-01", "2011-01-02"]),
DatetimeIndex(["2011-01-01", "2011-01-02"]).tz_localize("US/Eastern"),
np.datetime64("2011-01-01"),
Timestamp("2011-01-01"),
],
ids=lambda x: type(x).__name__,
)
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
def test_add_datetimelike_and_dtarr(self, box_with_array, addend, tz):
dti = DatetimeIndex(["2011-01-01", "2011-01-02"]).tz_localize(tz)
dtarr = tm.box_expected(dti, box_with_array)
msg = "cannot add DatetimeArray and"
with pytest.raises(TypeError, match=msg):
dtarr + addend
with pytest.raises(TypeError, match=msg):
addend + dtarr
def test_dta_add_sub_index(self, tz_naive_fixture):
dti = date_range("20130101", periods=3, tz=tz_naive_fixture)
dta = dti.array
result = dta - dti
expected = dti - dti
tm.assert_index_equal(result, expected)
tdi = result
result = dta + tdi
expected = dti + tdi
tm.assert_index_equal(result, expected)
result = dta - tdi
expected = dti - tdi
tm.assert_index_equal(result, expected)
def test_sub_dti_dti(self):
dti = date_range("20130101", periods=3)
dti_tz = date_range("20130101", periods=3).tz_localize("US/Eastern")
dti_tz2 = date_range("20130101", periods=3).tz_localize("UTC")
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
msg = "DatetimeArray subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dti_tz - dti
with pytest.raises(TypeError, match=msg):
dti - dti_tz
with pytest.raises(TypeError, match=msg):
dti_tz - dti_tz2
dti -= dti
tm.assert_index_equal(dti, expected)
dti1 = date_range("20130101", periods=3)
dti2 = date_range("20130101", periods=4)
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
dti1 - dti2
dti1 = DatetimeIndex(["2012-01-01", np.nan, "2012-01-03"])
dti2 = DatetimeIndex(["2012-01-02", "2012-01-03", np.nan])
expected = TimedeltaIndex(["1 days", np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("op", [operator.add, operator.sub])
def test_timedelta64_equal_timedelta_supported_ops(self, op):
ser = Series(
[
Timestamp("20130301"),
Timestamp("20130228 23:00:00"),
Timestamp("20130228 22:00:00"),
Timestamp("20130228 21:00:00"),
]
)
intervals = ["D", "h", "m", "s", "us"]
def timedelta64(*args):
return np.sum(list(starmap(np.timedelta64, zip(args, intervals))))
for d, h, m, s, us in product(*([range(2)] * 5)):
nptd = timedelta64(d, h, m, s, us)
pytd = timedelta(days=d, hours=h, minutes=m, seconds=s, microseconds=us)
lhs = op(ser, nptd)
rhs = op(ser, pytd)
tm.assert_series_equal(lhs, rhs)
def test_ops_nat_mixed_datetime64_timedelta64(self):
timedelta_series = Series([NaT, Timedelta("1s")])
datetime_series = Series([NaT, Timestamp("19900315")])
nat_series_dtype_timedelta = Series([NaT, NaT], dtype="timedelta64[ns]")
nat_series_dtype_timestamp = Series([NaT, NaT], dtype="datetime64[ns]")
single_nat_dtype_datetime = Series([NaT], dtype="datetime64[ns]")
single_nat_dtype_timedelta = Series([NaT], dtype="timedelta64[ns]")
tm.assert_series_equal(
datetime_series - single_nat_dtype_datetime, nat_series_dtype_timedelta
)
tm.assert_series_equal(
datetime_series - single_nat_dtype_timedelta, nat_series_dtype_timestamp
)
tm.assert_series_equal(
-single_nat_dtype_timedelta + datetime_series, nat_series_dtype_timestamp
)
tm.assert_series_equal(
nat_series_dtype_timestamp - single_nat_dtype_datetime,
nat_series_dtype_timedelta,
)
tm.assert_series_equal(
nat_series_dtype_timestamp - single_nat_dtype_timedelta,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
-single_nat_dtype_timedelta + nat_series_dtype_timestamp,
nat_series_dtype_timestamp,
)
msg = "cannot subtract a datelike"
with pytest.raises(TypeError, match=msg):
timedelta_series - single_nat_dtype_datetime
tm.assert_series_equal(
nat_series_dtype_timestamp + single_nat_dtype_timedelta,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
single_nat_dtype_timedelta + nat_series_dtype_timestamp,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
nat_series_dtype_timestamp + single_nat_dtype_timedelta,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
single_nat_dtype_timedelta + nat_series_dtype_timestamp,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
nat_series_dtype_timedelta + single_nat_dtype_datetime,
nat_series_dtype_timestamp,
)
tm.assert_series_equal(
single_nat_dtype_datetime + nat_series_dtype_timedelta,
nat_series_dtype_timestamp,
)
def test_ufunc_coercions(self):
idx = date_range("2011-01-01", periods=3, freq="2D", name="x")
delta = np.timedelta64(1, "D")
exp = date_range("2011-01-02", periods=3, freq="2D", name="x")
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
tm.assert_index_equal(result, exp)
assert result.freq == "2D"
exp = date_range("2010-12-31", periods=3, freq="2D", name="x")
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
tm.assert_index_equal(result, exp)
assert result.freq == "2D"
delta = np.array(
[np.timedelta64(1, "D"), np.timedelta64(2, "D"), np.timedelta64(3, "D")]
)
exp = DatetimeIndex(
["2011-01-02", "2011-01-05", "2011-01-08"], freq="3D", name="x"
)
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
tm.assert_index_equal(result, exp)
assert result.freq == "3D"
exp = DatetimeIndex(
["2010-12-31", "2011-01-01", "2011-01-02"], freq="D", name="x"
)
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
tm.assert_index_equal(result, exp)
assert result.freq == "D"
@pytest.mark.parametrize(
"names", [("foo", None, None), ("baz", "bar", None), ("bar", "bar", "bar")]
)
@pytest.mark.parametrize("tz", [None, "America/Chicago"])
def test_dti_add_series(self, tz, names):
index = DatetimeIndex(
["2016-06-28 05:30", "2016-06-28 05:31"], tz=tz, name=names[0]
)
ser = Series([Timedelta(seconds=5)] * 2, index=index, name=names[1])
expected = Series(index + Timedelta(seconds=5), index=index, name=names[2])
expected.name = names[2]
assert expected.dtype == index.dtype
result = ser + index
tm.assert_series_equal(result, expected)
result2 = index + ser
tm.assert_series_equal(result2, expected)
expected = index + Timedelta(seconds=5)
result3 = ser.values + index
tm.assert_index_equal(result3, expected)
result4 = index + ser.values
tm.assert_index_equal(result4, expected)
@pytest.mark.parametrize("op", [operator.add, roperator.radd, operator.sub])
@pytest.mark.parametrize(
"names", [(None, None, None), ("foo", "bar", None), ("foo", "foo", "foo")]
)
def test_dti_addsub_offset_arraylike(
self, tz_naive_fixture, names, op, index_or_series
):
# GH#18849, GH#19744
box = pd.Index
other_box = index_or_series
tz = tz_naive_fixture
dti = pd.date_range("2017-01-01", periods=2, tz=tz, name=names[0])
other = other_box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)], name=names[1])
xbox = get_upcast_box(box, other)
with tm.assert_produces_warning(PerformanceWarning):
res = op(dti, other)
expected = DatetimeIndex(
[op(dti[n], other[n]) for n in range(len(dti))], name=names[2], freq="infer"
)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(res, expected)
@pytest.mark.parametrize("other_box", [pd.Index, np.array])
def test_dti_addsub_object_arraylike(
self, tz_naive_fixture, box_with_array, other_box
):
tz = tz_naive_fixture
dti = pd.date_range("2017-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
other = other_box([pd.offsets.MonthEnd(), pd.Timedelta(days=4)])
xbox = get_upcast_box(box_with_array, other)
expected = pd.DatetimeIndex(["2017-01-31", "2017-01-06"], tz=tz_naive_fixture)
expected = tm.box_expected(expected, xbox)
warn = None if box_with_array is pd.DataFrame else PerformanceWarning
with tm.assert_produces_warning(warn):
result = dtarr + other
tm.assert_equal(result, expected)
expected = pd.DatetimeIndex(["2016-12-31", "2016-12-29"], tz=tz_naive_fixture)
expected = tm.box_expected(expected, xbox)
with tm.assert_produces_warning(warn):
result = dtarr - other
tm.assert_equal(result, expected)
@pytest.mark.parametrize("years", [-1, 0, 1])
@pytest.mark.parametrize("months", [-2, 0, 2])
def test_shift_months(years, months):
dti = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
]
)
actual = DatetimeIndex(shift_months(dti.asi8, years * 12 + months))
raw = [x + pd.offsets.DateOffset(years=years, months=months) for x in dti]
expected = DatetimeIndex(raw)
tm.assert_index_equal(actual, expected)
def test_dt64arr_addsub_object_dtype_2d():
# block-wise DataFrame operations will require operating on 2D
# DatetimeArray/TimedeltaArray, so check that specifically.
dti = pd.date_range("1994-02-13", freq="2W", periods=4)
dta = dti._data.reshape((4, 1))
other = np.array([[pd.offsets.Day(n)] for n in range(4)])
assert other.shape == dta.shape
with tm.assert_produces_warning(PerformanceWarning):
result = dta + other
with tm.assert_produces_warning(PerformanceWarning):
expected = (dta[:, 0] + other[:, 0]).reshape(-1, 1)
assert isinstance(result, DatetimeArray)
assert result.freq is None
tm.assert_numpy_array_equal(result._data, expected._data)
with tm.assert_produces_warning(PerformanceWarning):
# Case where we expect to get a TimedeltaArray back
result2 = dta - dta.astype(object)
assert isinstance(result2, TimedeltaArray)
assert result2.shape == (4, 1)
assert result2.freq is None
assert (result2.asi8 == 0).all()
| true | true |
f7211b62c471429cc135fe0e8292971b94db291e | 1,167 | py | Python | app/database/api/models/resource.py | space-logistics-org/spacenet | fd004437ed7b27dd6dc41a374e1dedfcea92e37d | [
"MIT"
] | 1 | 2022-02-17T18:01:41.000Z | 2022-02-17T18:01:41.000Z | app/database/api/models/resource.py | space-logistics-org/spacenet | fd004437ed7b27dd6dc41a374e1dedfcea92e37d | [
"MIT"
] | 2 | 2021-06-19T19:41:15.000Z | 2021-07-21T17:07:48.000Z | app/database/api/models/resource.py | space-logistics-org/spacenet | fd004437ed7b27dd6dc41a374e1dedfcea92e37d | [
"MIT"
] | 3 | 2021-06-16T16:31:12.000Z | 2022-02-17T18:02:57.000Z | """
This module defines the database schema for resources and resource subclasses.
"""
from sqlalchemy import Column, Integer, String, Float
from ..database import Base
from spacenet.schemas.resource import ResourceType
__all__ = ["Resource", "ResourceType", "ContinuousResource", "DiscreteResource"]
class Resource(Base):
"""
A row representing a single resource, can be continuous or discrete.
"""
__tablename__ = "resource"
id = Column(Integer, primary_key=True, index=True)
type = Column(String)
name = Column(String)
description = Column(String)
class_of_supply = Column(Integer)
units = Column(String)
unit_mass = Column(Float)
unit_volume = Column(Float)
__mapper_args__ = {"polymorphic_identity": "resource", "polymorphic_on": type}
class DiscreteResource(Resource):
"""
A row representing a single discrete resource.
"""
__mapper_args__ = {"polymorphic_identity": ResourceType.Discrete.value}
class ContinuousResource(Resource):
"""
A row representing a single continuous resource.
"""
__mapper_args__ = {"polymorphic_identity": ResourceType.Continuous.value}
| 25.369565 | 82 | 0.717224 |
from sqlalchemy import Column, Integer, String, Float
from ..database import Base
from spacenet.schemas.resource import ResourceType
__all__ = ["Resource", "ResourceType", "ContinuousResource", "DiscreteResource"]
class Resource(Base):
__tablename__ = "resource"
id = Column(Integer, primary_key=True, index=True)
type = Column(String)
name = Column(String)
description = Column(String)
class_of_supply = Column(Integer)
units = Column(String)
unit_mass = Column(Float)
unit_volume = Column(Float)
__mapper_args__ = {"polymorphic_identity": "resource", "polymorphic_on": type}
class DiscreteResource(Resource):
__mapper_args__ = {"polymorphic_identity": ResourceType.Discrete.value}
class ContinuousResource(Resource):
__mapper_args__ = {"polymorphic_identity": ResourceType.Continuous.value}
| true | true |
f7211bd5305aa8d6dd9cc38d64504cc0312f6ab1 | 812 | py | Python | Latte/ex5.py | Latte-inc/Learn-Python3.6 | f3568cf2f8413f8730c2297bc39ae890bb82d962 | [
"CC0-1.0"
] | 1 | 2021-10-15T05:43:19.000Z | 2021-10-15T05:43:19.000Z | Latte/ex5.py | Latte-inc/Learn-Python3.6 | f3568cf2f8413f8730c2297bc39ae890bb82d962 | [
"CC0-1.0"
] | null | null | null | Latte/ex5.py | Latte-inc/Learn-Python3.6 | f3568cf2f8413f8730c2297bc39ae890bb82d962 | [
"CC0-1.0"
] | 1 | 2022-01-13T10:34:55.000Z | 2022-01-13T10:34:55.000Z | # # This code is learn Python new code, variable format string start!
# Time 2020/05/15 00:44
# fatcat like .....
my_name = 'fatcat'
my_age = 24 #肥猫真的24岁哦!
my_height = 176 #是厘米(CM)哦!
my_weight = 93 #是公斤(Kg)哦!
my_eyes = 'black'
my_teeth = 'white'
my_hair = 'black'
#上述变量被赋予了两种类型 一种是赋予变量数字值,一种是赋予变量字符串。
print(f"Let's talk about {my_name}.")
print(f"He's {my_height} CM.")
print(f"He's {my_weight} kilo.")
print("Actually that's not too heavy.")
print(f"His teeth are usually {my_teeth} depending on the coffee.")
# this line is tricky , try to get it exactly right
total = my_age + my_height + my_weight
print(f"If I add {my_age}, {my_height}, and {my_weight} I get {total}.")
# 上述代码(15-23 line)使用了 格式化字符串(format string) 并在字符串里嵌入变量
# 所使用的的方法为 print(f“{}”),在双引号前加入 f 相当于告诉编译器这是个格式化字符
| 30.074074 | 73 | 0.685961 | 3
my_eyes = 'black'
my_teeth = 'white'
my_hair = 'black'
print(f"Let's talk about {my_name}.")
print(f"He's {my_height} CM.")
print(f"He's {my_weight} kilo.")
print("Actually that's not too heavy.")
print(f"His teeth are usually {my_teeth} depending on the coffee.")
total = my_age + my_height + my_weight
print(f"If I add {my_age}, {my_height}, and {my_weight} I get {total}.")
| true | true |
f7211beca92603a62d9cbaad149c7663ec244549 | 881 | py | Python | examples/pylab_examples/contour_corner_mask.py | argriffing/matplotlib | 5555f5463fb5f995a59f7651c0034a5d6a4c7e84 | [
"MIT",
"BSD-3-Clause"
] | 1 | 2019-04-15T09:40:53.000Z | 2019-04-15T09:40:53.000Z | examples/pylab_examples/contour_corner_mask.py | argriffing/matplotlib | 5555f5463fb5f995a59f7651c0034a5d6a4c7e84 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | examples/pylab_examples/contour_corner_mask.py | argriffing/matplotlib | 5555f5463fb5f995a59f7651c0034a5d6a4c7e84 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
"""
Illustrate the difference between corner_mask=False and corner_mask=True
for masked contour plots.
"""
import matplotlib.pyplot as plt
import numpy as np
# Data to plot.
x, y = np.meshgrid(np.arange(7), np.arange(10))
z = np.sin(0.5*x)*np.cos(0.52*y)
# Mask various z values.
mask = np.zeros_like(z, dtype=np.bool)
mask[2, 3:5] = True
mask[3:5, 4] = True
mask[7, 2] = True
mask[5, 0] = True
mask[0, 6] = True
z = np.ma.array(z, mask=mask)
corner_masks = [False, True]
for i, corner_mask in enumerate(corner_masks):
plt.subplot(1, 2, i+1)
cs = plt.contourf(x, y, z, corner_mask=corner_mask)
plt.contour(cs, colors='k')
plt.title('corner_mask = {0}'.format(corner_mask))
# Plot grid.
plt.grid(c='k', ls='-', alpha=0.3)
# Indicate masked points with red circles.
plt.plot(np.ma.array(x, mask=~mask), y, 'ro')
plt.show()
| 24.472222 | 72 | 0.658343 |
import matplotlib.pyplot as plt
import numpy as np
x, y = np.meshgrid(np.arange(7), np.arange(10))
z = np.sin(0.5*x)*np.cos(0.52*y)
mask = np.zeros_like(z, dtype=np.bool)
mask[2, 3:5] = True
mask[3:5, 4] = True
mask[7, 2] = True
mask[5, 0] = True
mask[0, 6] = True
z = np.ma.array(z, mask=mask)
corner_masks = [False, True]
for i, corner_mask in enumerate(corner_masks):
plt.subplot(1, 2, i+1)
cs = plt.contourf(x, y, z, corner_mask=corner_mask)
plt.contour(cs, colors='k')
plt.title('corner_mask = {0}'.format(corner_mask))
plt.grid(c='k', ls='-', alpha=0.3)
plt.plot(np.ma.array(x, mask=~mask), y, 'ro')
plt.show()
| true | true |
f7211e7c6967282019c097e1107691531485b132 | 847 | py | Python | authors/apps/notify/migrations/0001_initial.py | andela/Ah-backend-valkyrie | f0eb64c27e1fe37d5c81e4b9a8762dcf3c336a79 | [
"BSD-3-Clause"
] | null | null | null | authors/apps/notify/migrations/0001_initial.py | andela/Ah-backend-valkyrie | f0eb64c27e1fe37d5c81e4b9a8762dcf3c336a79 | [
"BSD-3-Clause"
] | 46 | 2019-01-08T13:16:41.000Z | 2021-04-30T20:47:08.000Z | authors/apps/notify/migrations/0001_initial.py | andela/Ah-backend-valkyrie | f0eb64c27e1fe37d5c81e4b9a8762dcf3c336a79 | [
"BSD-3-Clause"
] | 3 | 2019-01-07T08:21:59.000Z | 2019-09-20T06:43:18.000Z | # Generated by Django 2.1.5 on 2019-01-30 03:13
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='MailList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('recieve_email_notifications', models.BooleanField(default=True)),
('recieve_push_notifications', models.BooleanField(default=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 31.37037 | 118 | 0.651712 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='MailList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('recieve_email_notifications', models.BooleanField(default=True)),
('recieve_push_notifications', models.BooleanField(default=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| true | true |
f7211e7d3cba1e8c8ec791ea66706c3f0cbcf0a0 | 3,776 | py | Python | search_engine_parser/core/utils.py | justfly50/search-engine-parser | 0418867b3529980d5a4eb71899dec37092fe7df1 | [
"MIT"
] | 276 | 2019-02-01T22:48:46.000Z | 2021-10-17T21:25:13.000Z | search_engine_parser/core/utils.py | justfly50/search-engine-parser | 0418867b3529980d5a4eb71899dec37092fe7df1 | [
"MIT"
] | 95 | 2019-02-03T00:04:11.000Z | 2021-09-22T17:45:56.000Z | search_engine_parser/core/utils.py | justfly50/search-engine-parser | 0418867b3529980d5a4eb71899dec37092fe7df1 | [
"MIT"
] | 74 | 2019-02-02T11:04:17.000Z | 2021-10-09T23:49:25.000Z | import os
import random
import pickle
import hashlib
import aiohttp
from fake_useragent import UserAgent
FILEPATH = os.path.dirname(os.path.abspath(__file__))
# prevent caching
USER_AGENT_LIST = [
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:11.0) Gecko/20100101 Firefox/11.0",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/72.0.3626.121 Safari/537.36",
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:22.0) Gecko/20100 101 Firefox/22.0",
"Mozilla/5.0 (Windows NT 6.1; rv:11.0) Gecko/20100101 Firefox/11.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_4) AppleWebKit/536.5 (KHTML, like Gecko) "
"Chrome/19.0.1084.46 Safari/536.5",
"Mozilla/5.0 (Windows; Windows NT 6.1) AppleWebKit/536.5 (KHTML, like Gecko) "
"Chrome/19.0.1084.46 Safari/536.5",
]
def get_rand_user_agent():
user_agent = random.choice(USER_AGENT_LIST)
try:
user_agent = UserAgent().random
except:
pass
return user_agent
class CacheHandler:
def __init__(self):
self.cache = os.path.join(FILEPATH, "cache")
engine_path = os.path.join(FILEPATH, "engines")
if not os.path.exists(self.cache):
os.makedirs(self.cache)
enginelist = os.listdir(engine_path)
self.engine_cache = {i[:-3]: os.path.join(self.cache, i[:-3]) for i in enginelist if i not in
("__init__.py")}
for cache in self.engine_cache.values():
if not os.path.exists(cache):
os.makedirs(cache)
async def get_source(self, engine, url, headers, cache=True,
proxy=None, proxy_auth=None):
"""
Retrieves source code of webpage from internet or from cache
:rtype: str, bool
:param engine: engine of the engine saving
:type engine: str
:param url: URL to pull source code from
:type url: str
:param headers: request headers to make use of
:type headers: dict
:param cache: use cache or not
:type cache: bool
:param proxy: proxy address to make use off
:type proxy: str
:param proxy_auth: (user, password) tuple to authenticate proxy
:type proxy_auth: (str, str)
"""
encodedUrl = url.encode("utf-8")
urlhash = hashlib.sha256(encodedUrl).hexdigest()
engine = engine.lower()
cache_path = os.path.join(self.engine_cache[engine], urlhash)
if os.path.exists(cache_path) and cache:
with open(cache_path, 'rb') as stream:
return pickle.load(stream), True
get_vars = { 'url':url, 'headers':headers }
if proxy and proxy_auth:
auth = aiohttp.BasicAuth(*proxy_auth)
get_vars.update({'proxy':proxy, 'proxy_auth': auth})
async with aiohttp.ClientSession() as session:
async with session.get(**get_vars) as resp:
html = await resp.text()
with open(cache_path, 'wb') as stream:
pickle.dump(str(html), stream)
return str(html), False
def clear(self, engine=None):
"""
Clear the entire cache either by engine name
or just all
:param engine: engine to clear
"""
if not engine:
for engine_cache in self.engine_cache.values():
for root, dirs, files in os.walk(engine_cache):
for f in files:
os.remove(os.path.join(engine_cache, f))
else:
engine_cache = self.engine_cache[engine.lower()]
for _, _, files in os.walk(engine_cache):
for f in files:
os.remove(os.path.join(engine_cache, f))
| 37.019608 | 101 | 0.598782 | import os
import random
import pickle
import hashlib
import aiohttp
from fake_useragent import UserAgent
FILEPATH = os.path.dirname(os.path.abspath(__file__))
USER_AGENT_LIST = [
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:11.0) Gecko/20100101 Firefox/11.0",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/72.0.3626.121 Safari/537.36",
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:22.0) Gecko/20100 101 Firefox/22.0",
"Mozilla/5.0 (Windows NT 6.1; rv:11.0) Gecko/20100101 Firefox/11.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_4) AppleWebKit/536.5 (KHTML, like Gecko) "
"Chrome/19.0.1084.46 Safari/536.5",
"Mozilla/5.0 (Windows; Windows NT 6.1) AppleWebKit/536.5 (KHTML, like Gecko) "
"Chrome/19.0.1084.46 Safari/536.5",
]
def get_rand_user_agent():
user_agent = random.choice(USER_AGENT_LIST)
try:
user_agent = UserAgent().random
except:
pass
return user_agent
class CacheHandler:
def __init__(self):
self.cache = os.path.join(FILEPATH, "cache")
engine_path = os.path.join(FILEPATH, "engines")
if not os.path.exists(self.cache):
os.makedirs(self.cache)
enginelist = os.listdir(engine_path)
self.engine_cache = {i[:-3]: os.path.join(self.cache, i[:-3]) for i in enginelist if i not in
("__init__.py")}
for cache in self.engine_cache.values():
if not os.path.exists(cache):
os.makedirs(cache)
async def get_source(self, engine, url, headers, cache=True,
proxy=None, proxy_auth=None):
encodedUrl = url.encode("utf-8")
urlhash = hashlib.sha256(encodedUrl).hexdigest()
engine = engine.lower()
cache_path = os.path.join(self.engine_cache[engine], urlhash)
if os.path.exists(cache_path) and cache:
with open(cache_path, 'rb') as stream:
return pickle.load(stream), True
get_vars = { 'url':url, 'headers':headers }
if proxy and proxy_auth:
auth = aiohttp.BasicAuth(*proxy_auth)
get_vars.update({'proxy':proxy, 'proxy_auth': auth})
async with aiohttp.ClientSession() as session:
async with session.get(**get_vars) as resp:
html = await resp.text()
with open(cache_path, 'wb') as stream:
pickle.dump(str(html), stream)
return str(html), False
def clear(self, engine=None):
if not engine:
for engine_cache in self.engine_cache.values():
for root, dirs, files in os.walk(engine_cache):
for f in files:
os.remove(os.path.join(engine_cache, f))
else:
engine_cache = self.engine_cache[engine.lower()]
for _, _, files in os.walk(engine_cache):
for f in files:
os.remove(os.path.join(engine_cache, f))
| true | true |
f7211f5a04fad86d5e96b8e6c5fee8d770e20d1e | 5,324 | py | Python | leddar_ros2/leddar_sensor.py | JulienStanguennec-Leddartech/leddar_ros2 | 15f2674d8e7c472bc56c4be9cfd41f0d8d39c0bf | [
"BSD-3-Clause"
] | null | null | null | leddar_ros2/leddar_sensor.py | JulienStanguennec-Leddartech/leddar_ros2 | 15f2674d8e7c472bc56c4be9cfd41f0d8d39c0bf | [
"BSD-3-Clause"
] | null | null | null | leddar_ros2/leddar_sensor.py | JulienStanguennec-Leddartech/leddar_ros2 | 15f2674d8e7c472bc56c4be9cfd41f0d8d39c0bf | [
"BSD-3-Clause"
] | null | null | null |
import sys
import os
import time
#Import ros2 py
import rclpy
from rclpy.node import Node
#Import messages
import sensor_msgs.msg as sensor_msgs
import std_msgs.msg as std_msgs
#Import parameters (to read parameters)
from rclpy.parameter import Parameter
import numpy as np
import leddar
def point_cloud(points, parent_frame):
""" Creates a point cloud message.
Args:
points: Nx3 array of xyz positions.
parent_frame: frame in which the point cloud is defined
Returns:
sensor_msgs/PointCloud2 message
Code source:
https://gist.github.com/pgorczak/5c717baa44479fa064eb8d33ea4587e0
References:
http://docs.ros.org/melodic/api/sensor_msgs/html/msg/PointCloud2.html
http://docs.ros.org/melodic/api/sensor_msgs/html/msg/PointField.html
http://docs.ros.org/melodic/api/std_msgs/html/msg/Header.html
"""
# In a PointCloud2 message, the point cloud is stored as an byte
# array. In order to unpack it, we also include some parameters
# which desribes the size of each individual point.
ros_dtype = sensor_msgs.PointField.FLOAT32
dtype = np.float32
itemsize = np.dtype(dtype).itemsize # A 32-bit float takes 4 bytes.
data = points.astype(dtype).tobytes()
# The fields specify what the bytes represents. The first 4 bytes
# represents the x-coordinate, the next 4 the y-coordinate, etc.
fields = [sensor_msgs.PointField(
name=n, offset=i*itemsize, datatype=ros_dtype, count=1)
for i, n in enumerate('xyz')]
# The PointCloud2 message also has a header which specifies which
# coordinate frame it is represented in.
header = std_msgs.Header(frame_id=parent_frame)
return sensor_msgs.PointCloud2(
header=header,
height=1,
width=points.shape[0],
is_dense=False,
is_bigendian=False,
fields=fields,
point_step=(itemsize * 3), # Every point consists of three float32s.
row_step=(itemsize * 3 * points.shape[0]),
data=data
)
class LeddarSensor(Node):
def __init__(self):
super().__init__('leddar_sensor')
#Declare point cloud publisher topic
self.publisher = self.create_publisher(sensor_msgs.PointCloud2, 'scan_cloud', 10)
#Declaire parameter for connection to leddar_sensor | Default values for pixell sensor (Ethernet)
self.declare_parameters(
namespace='',
parameters=[
('param1', '192.168.0.2'),
('device_type', 'Ethernet'),
('param3', 48630),
('param4', 0)
]
)
#Read parameters for connection to leddar_sensor
param1 = str(self.get_parameter('param1').value)
device_type = str(self.get_parameter('device_type').value)
param3 = int(self.get_parameter('param3').value)
param4 = int(self.get_parameter('param4').value)
#Create the sensor
self.dev = leddar.Device()
dev_type = 0
if(device_type != "not specified"):
dev_type = leddar.device_types[device_type]
if not self.dev.connect(param1, dev_type, param3, param4):
err_msg = 'Error connecting to device type {0} with connection info {1}/{2}/{3}.'.format(device_type, param1, str(param3), str(param4))
#rclpy.logerr(err_msg)
raise RuntimeError(err_msg)
self.get_logger().info('Connected to device type {0} with connection info {1}/{2}/{3}.'.format(device_type, param1, str(param3), str(param4)))
#dev_type_read = self.dev.get_property_value(leddar.property_ids["ID_DEVICE_TYPE"])
#dev_protocol = self.dev.get_property_value(leddar.property_ids["ID_DATA_SERVER_PROTOCOL"])
#Get info from sensor
#self.get_logger().info(f'ID_DEVICE_TYPE: {dev_protocol}')
#self.get_logger().info(f'ID_DATA_SERVER_PROTOCOL: {dev_protocol}')
#Set callback method
self.dev.set_callback_echo(self.echoes_callback)
#Set datamask to detections
self.dev.set_data_mask(leddar.data_masks["DM_ECHOES"])
#Optionnal : set the delay between two request to the sensor
self.dev.set_data_thread_delay(10000)
self.dev.start_data_thread()
#Callback functions for the data thread
def echoes_callback(self, echoes):
#keep valid echoes only
echoes['data'] = echoes['data'][np.bitwise_and(echoes['data']['flags'], 0x01).astype(np.bool)]
#extract data field
indices, flags, distances, amplitudes, x, y, z = [echoes['data'][x] for x in ['indices', 'flags', 'distances', 'amplitudes', 'x', 'y', 'z']]
#merge xyz into np array
xyz = np.array([x,y,z])
#convert xyz np array to sensors_msg.PointCloud2
message = point_cloud(xyz.T, 'map')
#publish PointCloud2
self.publisher.publish(message)
def main(args=None):
rclpy.init(args=args)
leddar_sensor = LeddarSensor()
rclpy.spin(leddar_sensor)
# Destroy the node explicitly
# (optional - otherwise it will be done automatically
# when the garbage collector destroys the node object)
leddar_sensor.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
| 33.696203 | 150 | 0.655147 |
import sys
import os
import time
import rclpy
from rclpy.node import Node
import sensor_msgs.msg as sensor_msgs
import std_msgs.msg as std_msgs
from rclpy.parameter import Parameter
import numpy as np
import leddar
def point_cloud(points, parent_frame):
ros_dtype = sensor_msgs.PointField.FLOAT32
dtype = np.float32
itemsize = np.dtype(dtype).itemsize
data = points.astype(dtype).tobytes()
fields = [sensor_msgs.PointField(
name=n, offset=i*itemsize, datatype=ros_dtype, count=1)
for i, n in enumerate('xyz')]
header = std_msgs.Header(frame_id=parent_frame)
return sensor_msgs.PointCloud2(
header=header,
height=1,
width=points.shape[0],
is_dense=False,
is_bigendian=False,
fields=fields,
point_step=(itemsize * 3),
row_step=(itemsize * 3 * points.shape[0]),
data=data
)
class LeddarSensor(Node):
def __init__(self):
super().__init__('leddar_sensor')
self.publisher = self.create_publisher(sensor_msgs.PointCloud2, 'scan_cloud', 10)
self.declare_parameters(
namespace='',
parameters=[
('param1', '192.168.0.2'),
('device_type', 'Ethernet'),
('param3', 48630),
('param4', 0)
]
)
param1 = str(self.get_parameter('param1').value)
device_type = str(self.get_parameter('device_type').value)
param3 = int(self.get_parameter('param3').value)
param4 = int(self.get_parameter('param4').value)
self.dev = leddar.Device()
dev_type = 0
if(device_type != "not specified"):
dev_type = leddar.device_types[device_type]
if not self.dev.connect(param1, dev_type, param3, param4):
err_msg = 'Error connecting to device type {0} with connection info {1}/{2}/{3}.'.format(device_type, param1, str(param3), str(param4))
raise RuntimeError(err_msg)
self.get_logger().info('Connected to device type {0} with connection info {1}/{2}/{3}.'.format(device_type, param1, str(param3), str(param4)))
self.dev.set_callback_echo(self.echoes_callback)
self.dev.set_data_mask(leddar.data_masks["DM_ECHOES"])
self.dev.set_data_thread_delay(10000)
self.dev.start_data_thread()
def echoes_callback(self, echoes):
echoes['data'] = echoes['data'][np.bitwise_and(echoes['data']['flags'], 0x01).astype(np.bool)]
indices, flags, distances, amplitudes, x, y, z = [echoes['data'][x] for x in ['indices', 'flags', 'distances', 'amplitudes', 'x', 'y', 'z']]
xyz = np.array([x,y,z])
message = point_cloud(xyz.T, 'map')
self.publisher.publish(message)
def main(args=None):
rclpy.init(args=args)
leddar_sensor = LeddarSensor()
rclpy.spin(leddar_sensor)
leddar_sensor.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
| true | true |
f7211f913ac30f34f3eb6b9c021cc65dc21ed271 | 3,962 | py | Python | StimRespFlow/DataStruct/WaveData.py | powerfulbean/StellarWave | 877d5113054f391f605c8e39f1a0f60f7bfeeee1 | [
"MIT"
] | 3 | 2020-09-16T06:14:00.000Z | 2021-03-17T00:05:06.000Z | StimRespFlow/DataStruct/WaveData.py | powerfulbean/StellarWave | 877d5113054f391f605c8e39f1a0f60f7bfeeee1 | [
"MIT"
] | null | null | null | StimRespFlow/DataStruct/WaveData.py | powerfulbean/StellarWave | 877d5113054f391f605c8e39f1a0f60f7bfeeee1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 9 23:21:06 2021
@author: ShiningStone
"""
import datetime
import numpy as np
from .Abstract import CWaveData,CTimeStampsGen
class CDateTimeStampsGen(CTimeStampsGen):
def __init__(self,start:datetime.datetime,delta:datetime.timedelta,nLen):
super().__init__(start,delta,nLen)
class CBitalinoWaveData(CWaveData): # EEG unit: uV; EOG unit: mv
def __init__(self):
super().__init__(-1,-1,CTimeStampsGen(0, 0, 1)) #still can't decide this param at this time for bitalino file
def readFile(self,filename,mode = 'EEG'):
print("start reading bitalinofile")
from pylab import loadtxt
#file_name = 'opensignals_001403173836_2019-03-04_12-02-59.txt'
fullCont = list()
dataDescription = ''
import json
#read data description part
with open(filename,'r') as f:
for rowCont in f.readlines():
if(rowCont[0] == '#' and rowCont[2] != '{'):
pass
elif(rowCont[2] == '{'):
rowCont = rowCont[2:]
dataDescription = json.loads(rowCont)
break
else:
rowArray = rowCont.split("\t")
rowArray = rowArray[0:-1]
fullCont.append(rowArray)
data = loadtxt(filename)
# rowArrayNum = np.array(fullCont)
rowArrayNum = data
for key in dataDescription.keys(): #now the key is just the mac address of the device
dataDescription = dataDescription[key]
self.timestamps = rowArrayNum[:,0]
self.description = dataDescription
# print(dateTime.datetime.now())
if mode=='EEG':
self.nChan = 1
self.data = np.expand_dims(np.array(self.getRealSignal(rowArrayNum[:,-1],10,3.3,40000,'uV')), 0)
# self.rawdata = np.expand_dims(rowArrayNum[:,-1],0)
self.description["channelInfo"] = [[1],['EarEEG']]
elif mode == 'EOG':
self.nChan= 1
self.data = np.expand_dims(np.array(self.getRealSignal(rowArrayNum[:,-2],10,3.3,2040, 'mV')), 0)
self.description["channelInfo"] = [[1],['Eog']]
elif mode == 'EEGandEOG':
data1 = np.expand_dims(np.array(self.getRealSignal(rowArrayNum[:,-1],10,3.3,40000,'uV')), 0)
data2 = np.expand_dims(np.array(self.getRealSignal(rowArrayNum[:,-2],10,3.3,2040, 'uV')), 0)
self.nChan = 2
self.data = np.concatenate([data1,data2],0)
self.description['channelInfo'] = [[1,2],['EarEEG','Eog']]
else:
print("bitalino error: doesn't support this mode!")
# print(dateTime.datetime.now())
startTime = datetime.datetime.strptime( dataDescription['date'] + ' ' + dataDescription['time'], '%Y-%m-%d %H:%M:%S.%f')
self.srate = dataDescription["sampling rate"]
print("reading bitalinofile Finished")
delta = datetime.timedelta(seconds = 1/self.srate)
self.timeStampsGen = CDateTimeStampsGen(startTime,delta,len(self.timestamps))#initiate the timestamp sequence generator
self.calTimeStamp(self.timeStampsGen)
return data, dataDescription
def getRealSignal(self,sampleDataArray, bitNumber ,VCC = 3.3 , Geeg = 40000, unit = 'uV'):
output = [self._eegTransferFuntion(i,bitNumber ,VCC , Geeg) for i in sampleDataArray]
output = np.array(output)
if(unit == 'uV'):
output = output * (10**6)
elif(unit == 'mV'):
output = output * (10**3)
return output
def _eegTransferFuntion(self,sampleValue, bitNumber ,VCC, Geeg):
output = (( (sampleValue/2**bitNumber) - 1/2) * VCC ) / Geeg
return output
def __len__(self):
return len(self.data)
| 39.62 | 128 | 0.575719 |
import datetime
import numpy as np
from .Abstract import CWaveData,CTimeStampsGen
class CDateTimeStampsGen(CTimeStampsGen):
def __init__(self,start:datetime.datetime,delta:datetime.timedelta,nLen):
super().__init__(start,delta,nLen)
class CBitalinoWaveData(CWaveData):
def __init__(self):
super().__init__(-1,-1,CTimeStampsGen(0, 0, 1))
def readFile(self,filename,mode = 'EEG'):
print("start reading bitalinofile")
from pylab import loadtxt
#file_name = 'opensignals_001403173836_2019-03-04_12-02-59.txt'
fullCont = list()
dataDescription = ''
import json
#read data description part
with open(filename,'r') as f:
for rowCont in f.readlines():
if(rowCont[0] == '
pass
elif(rowCont[2] == '{'):
rowCont = rowCont[2:]
dataDescription = json.loads(rowCont)
break
else:
rowArray = rowCont.split("\t")
rowArray = rowArray[0:-1]
fullCont.append(rowArray)
data = loadtxt(filename)
# rowArrayNum = np.array(fullCont)
rowArrayNum = data
for key in dataDescription.keys(): #now the key is just the mac address of the device
dataDescription = dataDescription[key]
self.timestamps = rowArrayNum[:,0]
self.description = dataDescription
# print(dateTime.datetime.now())
if mode=='EEG':
self.nChan = 1
self.data = np.expand_dims(np.array(self.getRealSignal(rowArrayNum[:,-1],10,3.3,40000,'uV')), 0)
# self.rawdata = np.expand_dims(rowArrayNum[:,-1],0)
self.description["channelInfo"] = [[1],['EarEEG']]
elif mode == 'EOG':
self.nChan= 1
self.data = np.expand_dims(np.array(self.getRealSignal(rowArrayNum[:,-2],10,3.3,2040, 'mV')), 0)
self.description["channelInfo"] = [[1],['Eog']]
elif mode == 'EEGandEOG':
data1 = np.expand_dims(np.array(self.getRealSignal(rowArrayNum[:,-1],10,3.3,40000,'uV')), 0)
data2 = np.expand_dims(np.array(self.getRealSignal(rowArrayNum[:,-2],10,3.3,2040, 'uV')), 0)
self.nChan = 2
self.data = np.concatenate([data1,data2],0)
self.description['channelInfo'] = [[1,2],['EarEEG','Eog']]
else:
print("bitalino error: doesn't support this mode!")
startTime = datetime.datetime.strptime( dataDescription['date'] + ' ' + dataDescription['time'], '%Y-%m-%d %H:%M:%S.%f')
self.srate = dataDescription["sampling rate"]
print("reading bitalinofile Finished")
delta = datetime.timedelta(seconds = 1/self.srate)
self.timeStampsGen = CDateTimeStampsGen(startTime,delta,len(self.timestamps))
self.calTimeStamp(self.timeStampsGen)
return data, dataDescription
def getRealSignal(self,sampleDataArray, bitNumber ,VCC = 3.3 , Geeg = 40000, unit = 'uV'):
output = [self._eegTransferFuntion(i,bitNumber ,VCC , Geeg) for i in sampleDataArray]
output = np.array(output)
if(unit == 'uV'):
output = output * (10**6)
elif(unit == 'mV'):
output = output * (10**3)
return output
def _eegTransferFuntion(self,sampleValue, bitNumber ,VCC, Geeg):
output = (( (sampleValue/2**bitNumber) - 1/2) * VCC ) / Geeg
return output
def __len__(self):
return len(self.data)
| true | true |
f721210773ad82cd155b9581ac29c5f1c9609d67 | 20,043 | py | Python | conda/models/match_spec.py | abar2day/najran | 3a30636f494275b0f259be7b1875fd0fd7759f20 | [
"BSD-3-Clause"
] | 1 | 2017-06-11T01:32:33.000Z | 2017-06-11T01:32:33.000Z | conda/models/match_spec.py | abar2day/najran | 3a30636f494275b0f259be7b1875fd0fd7759f20 | [
"BSD-3-Clause"
] | null | null | null | conda/models/match_spec.py | abar2day/najran | 3a30636f494275b0f259be7b1875fd0fd7759f20 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from abc import ABCMeta, abstractmethod, abstractproperty
from collections import Mapping
import re
from .channel import Channel, MultiChannel
from .dist import Dist
from .index_record import IndexRecord
from .version import BuildNumberMatch, VersionSpec
from .._vendor.auxlib.collection import frozendict
from ..base.constants import CONDA_TARBALL_EXTENSION
from ..common.compat import isiterable, iteritems, string_types, text_type, with_metaclass
from ..common.path import expand
from ..common.url import is_url, path_to_url, unquote
from ..exceptions import CondaValueError
try:
from cytoolz.itertoolz import concat
except ImportError: # pragma: no cover
from .._vendor.toolz.itertoolz import concat # NOQA
class MatchSpecType(type):
def __call__(cls, spec_arg=None, **kwargs):
if spec_arg:
if isinstance(spec_arg, MatchSpec) and not kwargs:
return spec_arg
elif isinstance(spec_arg, MatchSpec):
kwargs.setdefault('optional', spec_arg.optional)
kwargs.setdefault('target', spec_arg.target)
kwargs.update(spec_arg._match_components)
return super(MatchSpecType, cls).__call__(**kwargs)
elif isinstance(spec_arg, string_types):
parsed = _parse_spec_str(spec_arg)
parsed.update(kwargs)
return super(MatchSpecType, cls).__call__(**parsed)
elif isinstance(spec_arg, Mapping):
parsed = dict(spec_arg, **kwargs)
return super(MatchSpecType, cls).__call__(**parsed)
elif isinstance(spec_arg, Dist):
# TODO: remove this branch
parsed = {
'fn': spec_arg.to_filename(),
'channel': spec_arg.channel,
}
return super(MatchSpecType, cls).__call__(**parsed)
elif isinstance(spec_arg, IndexRecord):
# TODO: remove this branch
parsed = {
'name': spec_arg.name,
'fn': spec_arg.fn,
'channel': spec_arg.channel,
}
return super(MatchSpecType, cls).__call__(**parsed)
elif hasattr(spec_arg, 'dump'):
parsed = spec_arg.dump()
parsed.update(kwargs)
return super(MatchSpecType, cls).__call__(**parsed)
else:
raise CondaValueError("Invalid MatchSpec:\n spec_arg=%s\n kwargs=%s"
% (spec_arg, kwargs))
else:
return super(MatchSpecType, cls).__call__(**kwargs)
@with_metaclass(MatchSpecType)
class MatchSpec(object):
"""
The easiest way to build `MatchSpec` objects that match to arbitrary fields is to
use a keyword syntax. For instance,
MatchSpec(name='foo', build='py2*', channel='conda-forge')
matches any package named `foo` built with a Python 2 build string in the
`conda-forge` channel. Available keywords to be matched against are fields of
the `IndexRecord` model object.
Strings are interpreted using the following conventions:
- If the string begins with `^` and ends with `$`, it is converted to a regex.
- If the string contains an asterisk (`*`), it is transformed from a glob to a regex.
- Otherwise, an exact match to the string is sought.
The `.match()` method accepts an `IndexRecord` or dictionary, and matches can pull
from any field in that record.
Great pain has been taken to preserve back-compatibility with the standard
`name version build` syntax. But strictly speaking it is not necessary. Now, the
following are all equivalent:
- `MatchSpec('foo 1.0 py27_0', optional=True)`
- `MatchSpec("* [name='foo',version='1.0',build='py27_0']", optional=True)`
- `MatchSpec("foo[version='1.0',optional,build='py27_0']")`
- `MatchSpec(name='foo', optional=True, version='1.0', build='py27_0')`
"""
FIELD_NAMES = (
'channel',
'subdir',
'name',
'version',
'build',
'build_number',
'track_features',
'md5',
)
def __init__(self, optional=False, target=None, **kwargs):
self.optional = optional
self.target = target
self._match_components = self._build_components(**kwargs)
def get_exact_value(self, field_name):
v = self._match_components.get(field_name)
return v and v.exact_value
def get_raw_value(self, field_name):
v = self._match_components.get(field_name)
return v and v.raw_value
def _is_simple(self):
return len(self._match_components) == 1 and self.get_exact_value('name') is not None
def _is_single(self):
return len(self._match_components) == 1
def match(self, rec):
"""
Accepts an `IndexRecord` or a dict, and matches can pull from any field
in that record. Returns True for a match, and False for no match.
"""
for f, v in iteritems(self._match_components):
val = getattr(rec, f)
if not (v.match(val) if hasattr(v, 'match') else v == val):
return False
return True
def _to_filename_do_not_use(self):
# WARNING: this is potentially unreliable and use should probably be limited
# returns None if a filename can't be constructed
fn_field = self.get_exact_value('fn')
if fn_field:
return fn_field
vals = tuple(self.get_exact_value(x) for x in ('name', 'version', 'build'))
if not any(x is None for x in vals):
return '%s-%s-%s.tar.bz2' % vals
else:
return None
def __repr__(self):
builder = []
builder += ["%s=%r" % (c, self._match_components[c])
for c in self.FIELD_NAMES if c in self._match_components]
if self.optional:
builder.append("optional=True")
if self.target:
builder.append("target=%r" % self.target)
return "%s(%s)" % (self.__class__.__name__, ', '.join(builder))
def __str__(self):
builder = []
channel_matcher = self._match_components.get('channel')
if channel_matcher:
builder.append(text_type(channel_matcher))
subdir_matcher = self._match_components.get('subdir')
if subdir_matcher:
builder.append(('/%s' if builder else '*/%s') % subdir_matcher)
name_matcher = self._match_components.get('name', '*')
builder.append(('::%s' if builder else '%s') % name_matcher)
xtra = []
version = self._match_components.get('version')
if version:
version = text_type(version)
if any(s in version for s in '><$^|,'):
xtra.append("version='%s'" % version)
elif version.endswith('.*'):
builder.append('=' + version[:-2])
elif version.endswith('*'):
builder.append('=' + version[:-1])
else:
builder.append('==' + version)
_skip = ('channel', 'subdir', 'name', 'version')
for key in self.FIELD_NAMES:
if key not in _skip and key in self._match_components:
value = text_type(self._match_components[key])
if any(s in value for s in ', ='):
xtra.append("%s='%s'" % (key, self._match_components[key]))
else:
xtra.append("%s=%s" % (key, self._match_components[key]))
if xtra:
builder.append('[%s]' % ','.join(xtra))
return ''.join(builder)
def conda_build_form(self):
builder = []
name = self.get_exact_value('name')
assert name
builder.append(name)
build = self.get_raw_value('build')
version = self.get_raw_value('version')
if build:
assert version
builder += [version, build]
elif version:
builder.append(version)
return ' '.join(builder)
def __eq__(self, other):
if isinstance(other, MatchSpec):
self_key = self._match_components, self.optional, self.target
other_key = other._match_components, other.optional, other.target
return self_key == other_key
else:
return False
def __hash__(self):
return hash(self._match_components)
def __contains__(self, field):
return field in self._match_components
@staticmethod
def _build_components(**kwargs):
def _make(field_name, value):
if field_name not in IndexRecord.__fields__:
raise CondaValueError('Cannot match on field %s' % (field_name,))
elif isinstance(value, string_types):
value = text_type(value)
if hasattr(value, 'match'):
matcher = value
elif field_name in _implementors:
matcher = _implementors[field_name](value)
elif text_type(value):
matcher = StrMatch(value)
else:
raise NotImplementedError()
return matcher
return frozendict((key, _make(key, value)) for key, value in iteritems(kwargs))
@property
def name(self):
return self.get_exact_value('name') or '*'
#
# Remaining methods are for back compatibility with conda-build. Do not remove
# without coordination with the conda-build team.
#
@property
def strictness(self):
# With the old MatchSpec, strictness==3 if name, version, and
# build were all specified.
s = sum(f in self._match_components for f in ('name', 'version', 'build'))
if s < len(self._match_components):
return 3
elif not self.get_exact_value('name') or 'build' in self._match_components:
return 3
elif 'version' in self._match_components:
return 2
else:
return 1
@property
def spec(self):
return self.conda_build_form()
@property
def version(self):
# in the old MatchSpec object, version was a VersionSpec, not a str
# so we'll keep that API here
return self._match_components.get('version')
def _parse_version_plus_build(v_plus_b):
"""This should reliably pull the build string out of a version + build string combo.
Examples:
>>> _parse_version_plus_build("=1.2.3 0")
('=1.2.3', '0')
>>> _parse_version_plus_build("1.2.3=0")
('1.2.3', '0')
>>> _parse_version_plus_build(">=1.0 , < 2.0 py34_0")
('>=1.0,<2.0', 'py34_0')
>>> _parse_version_plus_build(">=1.0 , < 2.0 =py34_0")
('>=1.0,<2.0', 'py34_0')
>>> _parse_version_plus_build("=1.2.3 ")
('=1.2.3', None)
>>> _parse_version_plus_build(">1.8,<2|==1.7")
('>1.8,<2|==1.7', None)
>>> _parse_version_plus_build("* openblas_0")
('*', 'openblas_0')
>>> _parse_version_plus_build("* *")
('*', '*')
"""
parts = re.search(r'((?:.+?)[^><!,|]?)(?:(?<![=!|,<>])(?:[ =])([^-=,|<>]+?))?$', v_plus_b)
if parts:
version, build = parts.groups()
build = build and build.strip()
else:
version, build = v_plus_b, None
return version and version.replace(' ', ''), build
def _parse_legacy_dist(dist_str):
"""
Examples:
>>> _parse_legacy_dist("_license-1.1-py27_1.tar.bz2")
('_license', '1.1', 'py27_1')
>>> _parse_legacy_dist("_license-1.1-py27_1")
('_license', '1.1', 'py27_1')
"""
if dist_str.endswith(CONDA_TARBALL_EXTENSION):
dist_str = dist_str[:-len(CONDA_TARBALL_EXTENSION)]
name, version, build = dist_str.rsplit('-', 2)
return name, version, build
def _parse_channel(channel_val):
if not channel_val:
return None, None
chn = Channel(channel_val)
channel_name = chn.name if isinstance(chn, MultiChannel) else chn.canonical_name
return channel_name, chn.subdir
def _parse_spec_str(spec_str):
# Step 1. strip '#' comment
if '#' in spec_str:
ndx = spec_str.index('#')
spec_str, _ = spec_str[:ndx], spec_str[ndx:]
spec_str.strip()
# Step 2. done if spec_str is a tarball
if spec_str.endswith(CONDA_TARBALL_EXTENSION):
# treat as a normal url
if not is_url(spec_str):
spec_str = unquote(path_to_url(expand(spec_str)))
channel = Channel(spec_str)
if not channel.subdir:
# url is not a channel
raise CondaValueError("Invalid MatchSpec Channel: %s" % spec_str)
name, version, build = _parse_legacy_dist(channel.package_filename)
result = {
'channel': channel.canonical_name,
'subdir': channel.subdir,
'name': name,
'version': version,
'build': build,
'fn': channel.package_filename,
}
return result
# Step 3. strip off brackets portion
brackets = {}
m1 = re.match(r'^(.*)(?:\[(.*)\])$', spec_str)
if m1:
spec_str, brackets_str = m1.groups()
brackets_str = brackets_str.strip("[]\n\r\t ")
m5 = re.finditer(r'([a-zA-Z0-9_-]+?)=(["\']?)([^\'"]*?)(\2)(?:[, ]|$)', brackets_str)
for match in m5:
key, _, value, _ = match.groups()
if not key or not value:
raise CondaValueError("Invalid MatchSpec: %s" % spec_str)
brackets[key] = value
# Step 4. strip off '::' channel and namespace
m2 = spec_str.rsplit(':', 2)
m2_len = len(m2)
if m2_len == 3:
channel_str, namespace, spec_str = m2
elif m2_len == 2:
namespace, spec_str = m2
channel_str = None
elif m2_len:
spec_str = m2[0]
channel_str, namespace = None, None
else:
raise NotImplementedError()
channel, subdir = _parse_channel(channel_str)
if 'channel' in brackets:
b_channel, b_subdir = _parse_channel(brackets.pop('channel'))
if b_channel:
channel = b_channel
if b_subdir:
subdir = b_subdir
if 'subdir' in brackets:
subdir = brackets.pop('subdir')
# Step 5. strip off package name from remaining version + build
m3 = re.match(r'([^ =<>!]+)?([><!= ].+)?', spec_str)
if m3:
name, spec_str = m3.groups()
if name is None:
raise CondaValueError("Invalid MatchSpec: %s" % spec_str)
else:
raise CondaValueError("Invalid MatchSpec: %s" % spec_str)
# Step 6. sort out version + build
spec_str = spec_str and spec_str.strip()
if spec_str:
if '[' in spec_str:
raise CondaValueError("Invalid MatchSpec: %s" % spec_str)
version, build = _parse_version_plus_build(spec_str)
# translate version '=1.2.3' to '1.2.3*'
# is it a simple version starting with '='? i.e. '=1.2.3'
if version.startswith('='):
test_str = version[1:]
if version.startswith('==') and build is None:
version = version[2:]
elif not any(c in test_str for c in "=,|"):
if build is None and not test_str.endswith('*'):
version = test_str + '*'
else:
version = test_str
else:
version, build = None, None
# Step 7. now compile components together
components = {}
components['name'] = name if name else '*'
if channel is not None:
components['channel'] = channel
if subdir is not None:
components['subdir'] = subdir
if namespace is not None:
# components['namespace'] = namespace
pass
if version is not None:
components['version'] = version
if build is not None:
components['build'] = build
# anything in brackets will now strictly override key as set in other area of spec str
components.update(brackets)
return components
@with_metaclass(ABCMeta)
class MatchInterface(object):
def __init__(self, value):
self._raw_value = value
@abstractmethod
def match(self, other):
raise NotImplementedError
def matches(self, value):
return self.match(value)
@property
def raw_value(self):
return self._raw_value
@abstractproperty
def exact_value(self):
"""If the match value is an exact specification, returns the value.
Otherwise returns None.
"""
raise NotImplementedError()
class SplitStrMatch(MatchInterface):
__slots__ = '_raw_value',
def __init__(self, value):
super(SplitStrMatch, self).__init__(self._convert(value))
def _convert(self, value):
try:
return frozenset(value.replace(' ', ',').split(','))
except AttributeError:
if isiterable(value):
return frozenset(value)
raise
def match(self, other):
try:
return other and self._raw_value & other._raw_value
except AttributeError:
return self._raw_value & self._convert(other)
def __repr__(self):
if self._raw_value:
return "{%s}" % ', '.join("'%s'" % s for s in sorted(self._raw_value))
else:
return 'set()'
def __str__(self):
# this space delimiting makes me nauseous
return ' '.join(sorted(self._raw_value))
def __eq__(self, other):
return self.match(other)
def __hash__(self):
return hash(self._raw_value)
@property
def exact_value(self):
return self._raw_value
class ChannelMatch(MatchInterface):
__slots__ = '_raw_value',
def __init__(self, value):
super(ChannelMatch, self).__init__(Channel(value))
def match(self, other):
try:
return self._raw_value.canonical_name == other._raw_value.canonical_name
except AttributeError:
return self._raw_value.canonical_name == Channel(other).canonical_name
def __str__(self):
return "%s" % self._raw_value.canonical_name
def __repr__(self):
return "'%s'" % self._raw_value.canonical_name
def __eq__(self, other):
return self.match(other)
def __hash__(self):
return hash(self._raw_value)
@property
def exact_value(self):
return self._raw_value
class StrMatch(MatchInterface):
__slots__ = '_raw_value', '_re_match'
def __init__(self, value):
super(StrMatch, self).__init__(value)
self._re_match = None
if value.startswith('^') and value.endswith('$'):
self._re_match = re.compile(value).match
elif '*' in value:
self._re_match = re.compile(r'^(?:%s)$' % value.replace('*', r'.*')).match
def match(self, other):
try:
_other_val = other._raw_value
except AttributeError:
_other_val = text_type(other)
if self._re_match:
return self._re_match(_other_val)
else:
return self._raw_value == _other_val
def __str__(self):
return self._raw_value
def __repr__(self):
return "%s('%s')" % (self.__class__.__name__, self._raw_value)
def __eq__(self, other):
return self.match(other)
def __hash__(self):
return hash(self._raw_value)
@property
def exact_value(self):
return self._raw_value if self._re_match is None else None
class LowerStrMatch(StrMatch):
def __init__(self, value):
super(LowerStrMatch, self).__init__(value.lower())
_implementors = {
'name': LowerStrMatch,
'features': SplitStrMatch,
'track_features': SplitStrMatch,
'version': VersionSpec,
'build_number': BuildNumberMatch,
'channel': ChannelMatch,
}
| 32.857377 | 94 | 0.592925 |
from __future__ import absolute_import, division, print_function, unicode_literals
from abc import ABCMeta, abstractmethod, abstractproperty
from collections import Mapping
import re
from .channel import Channel, MultiChannel
from .dist import Dist
from .index_record import IndexRecord
from .version import BuildNumberMatch, VersionSpec
from .._vendor.auxlib.collection import frozendict
from ..base.constants import CONDA_TARBALL_EXTENSION
from ..common.compat import isiterable, iteritems, string_types, text_type, with_metaclass
from ..common.path import expand
from ..common.url import is_url, path_to_url, unquote
from ..exceptions import CondaValueError
try:
from cytoolz.itertoolz import concat
except ImportError:
from .._vendor.toolz.itertoolz import concat
class MatchSpecType(type):
def __call__(cls, spec_arg=None, **kwargs):
if spec_arg:
if isinstance(spec_arg, MatchSpec) and not kwargs:
return spec_arg
elif isinstance(spec_arg, MatchSpec):
kwargs.setdefault('optional', spec_arg.optional)
kwargs.setdefault('target', spec_arg.target)
kwargs.update(spec_arg._match_components)
return super(MatchSpecType, cls).__call__(**kwargs)
elif isinstance(spec_arg, string_types):
parsed = _parse_spec_str(spec_arg)
parsed.update(kwargs)
return super(MatchSpecType, cls).__call__(**parsed)
elif isinstance(spec_arg, Mapping):
parsed = dict(spec_arg, **kwargs)
return super(MatchSpecType, cls).__call__(**parsed)
elif isinstance(spec_arg, Dist):
parsed = {
'fn': spec_arg.to_filename(),
'channel': spec_arg.channel,
}
return super(MatchSpecType, cls).__call__(**parsed)
elif isinstance(spec_arg, IndexRecord):
parsed = {
'name': spec_arg.name,
'fn': spec_arg.fn,
'channel': spec_arg.channel,
}
return super(MatchSpecType, cls).__call__(**parsed)
elif hasattr(spec_arg, 'dump'):
parsed = spec_arg.dump()
parsed.update(kwargs)
return super(MatchSpecType, cls).__call__(**parsed)
else:
raise CondaValueError("Invalid MatchSpec:\n spec_arg=%s\n kwargs=%s"
% (spec_arg, kwargs))
else:
return super(MatchSpecType, cls).__call__(**kwargs)
@with_metaclass(MatchSpecType)
class MatchSpec(object):
FIELD_NAMES = (
'channel',
'subdir',
'name',
'version',
'build',
'build_number',
'track_features',
'md5',
)
def __init__(self, optional=False, target=None, **kwargs):
self.optional = optional
self.target = target
self._match_components = self._build_components(**kwargs)
def get_exact_value(self, field_name):
v = self._match_components.get(field_name)
return v and v.exact_value
def get_raw_value(self, field_name):
v = self._match_components.get(field_name)
return v and v.raw_value
def _is_simple(self):
return len(self._match_components) == 1 and self.get_exact_value('name') is not None
def _is_single(self):
return len(self._match_components) == 1
def match(self, rec):
for f, v in iteritems(self._match_components):
val = getattr(rec, f)
if not (v.match(val) if hasattr(v, 'match') else v == val):
return False
return True
def _to_filename_do_not_use(self):
fn_field = self.get_exact_value('fn')
if fn_field:
return fn_field
vals = tuple(self.get_exact_value(x) for x in ('name', 'version', 'build'))
if not any(x is None for x in vals):
return '%s-%s-%s.tar.bz2' % vals
else:
return None
def __repr__(self):
builder = []
builder += ["%s=%r" % (c, self._match_components[c])
for c in self.FIELD_NAMES if c in self._match_components]
if self.optional:
builder.append("optional=True")
if self.target:
builder.append("target=%r" % self.target)
return "%s(%s)" % (self.__class__.__name__, ', '.join(builder))
def __str__(self):
builder = []
channel_matcher = self._match_components.get('channel')
if channel_matcher:
builder.append(text_type(channel_matcher))
subdir_matcher = self._match_components.get('subdir')
if subdir_matcher:
builder.append(('/%s' if builder else '*/%s') % subdir_matcher)
name_matcher = self._match_components.get('name', '*')
builder.append(('::%s' if builder else '%s') % name_matcher)
xtra = []
version = self._match_components.get('version')
if version:
version = text_type(version)
if any(s in version for s in '><$^|,'):
xtra.append("version='%s'" % version)
elif version.endswith('.*'):
builder.append('=' + version[:-2])
elif version.endswith('*'):
builder.append('=' + version[:-1])
else:
builder.append('==' + version)
_skip = ('channel', 'subdir', 'name', 'version')
for key in self.FIELD_NAMES:
if key not in _skip and key in self._match_components:
value = text_type(self._match_components[key])
if any(s in value for s in ', ='):
xtra.append("%s='%s'" % (key, self._match_components[key]))
else:
xtra.append("%s=%s" % (key, self._match_components[key]))
if xtra:
builder.append('[%s]' % ','.join(xtra))
return ''.join(builder)
def conda_build_form(self):
builder = []
name = self.get_exact_value('name')
assert name
builder.append(name)
build = self.get_raw_value('build')
version = self.get_raw_value('version')
if build:
assert version
builder += [version, build]
elif version:
builder.append(version)
return ' '.join(builder)
def __eq__(self, other):
if isinstance(other, MatchSpec):
self_key = self._match_components, self.optional, self.target
other_key = other._match_components, other.optional, other.target
return self_key == other_key
else:
return False
def __hash__(self):
return hash(self._match_components)
def __contains__(self, field):
return field in self._match_components
@staticmethod
def _build_components(**kwargs):
def _make(field_name, value):
if field_name not in IndexRecord.__fields__:
raise CondaValueError('Cannot match on field %s' % (field_name,))
elif isinstance(value, string_types):
value = text_type(value)
if hasattr(value, 'match'):
matcher = value
elif field_name in _implementors:
matcher = _implementors[field_name](value)
elif text_type(value):
matcher = StrMatch(value)
else:
raise NotImplementedError()
return matcher
return frozendict((key, _make(key, value)) for key, value in iteritems(kwargs))
@property
def name(self):
return self.get_exact_value('name') or '*'
#
# Remaining methods are for back compatibility with conda-build. Do not remove
# without coordination with the conda-build team.
#
@property
def strictness(self):
# With the old MatchSpec, strictness==3 if name, version, and
# build were all specified.
s = sum(f in self._match_components for f in ('name', 'version', 'build'))
if s < len(self._match_components):
return 3
elif not self.get_exact_value('name') or 'build' in self._match_components:
return 3
elif 'version' in self._match_components:
return 2
else:
return 1
@property
def spec(self):
return self.conda_build_form()
@property
def version(self):
# in the old MatchSpec object, version was a VersionSpec, not a str
# so we'll keep that API here
return self._match_components.get('version')
def _parse_version_plus_build(v_plus_b):
parts = re.search(r'((?:.+?)[^><!,|]?)(?:(?<![=!|,<>])(?:[ =])([^-=,|<>]+?))?$', v_plus_b)
if parts:
version, build = parts.groups()
build = build and build.strip()
else:
version, build = v_plus_b, None
return version and version.replace(' ', ''), build
def _parse_legacy_dist(dist_str):
if dist_str.endswith(CONDA_TARBALL_EXTENSION):
dist_str = dist_str[:-len(CONDA_TARBALL_EXTENSION)]
name, version, build = dist_str.rsplit('-', 2)
return name, version, build
def _parse_channel(channel_val):
if not channel_val:
return None, None
chn = Channel(channel_val)
channel_name = chn.name if isinstance(chn, MultiChannel) else chn.canonical_name
return channel_name, chn.subdir
def _parse_spec_str(spec_str):
if '#' in spec_str:
ndx = spec_str.index('#')
spec_str, _ = spec_str[:ndx], spec_str[ndx:]
spec_str.strip()
if spec_str.endswith(CONDA_TARBALL_EXTENSION):
if not is_url(spec_str):
spec_str = unquote(path_to_url(expand(spec_str)))
channel = Channel(spec_str)
if not channel.subdir:
raise CondaValueError("Invalid MatchSpec Channel: %s" % spec_str)
name, version, build = _parse_legacy_dist(channel.package_filename)
result = {
'channel': channel.canonical_name,
'subdir': channel.subdir,
'name': name,
'version': version,
'build': build,
'fn': channel.package_filename,
}
return result
brackets = {}
m1 = re.match(r'^(.*)(?:\[(.*)\])$', spec_str)
if m1:
spec_str, brackets_str = m1.groups()
brackets_str = brackets_str.strip("[]\n\r\t ")
m5 = re.finditer(r'([a-zA-Z0-9_-]+?)=(["\']?)([^\'"]*?)(\2)(?:[, ]|$)', brackets_str)
for match in m5:
key, _, value, _ = match.groups()
if not key or not value:
raise CondaValueError("Invalid MatchSpec: %s" % spec_str)
brackets[key] = value
m2 = spec_str.rsplit(':', 2)
m2_len = len(m2)
if m2_len == 3:
channel_str, namespace, spec_str = m2
elif m2_len == 2:
namespace, spec_str = m2
channel_str = None
elif m2_len:
spec_str = m2[0]
channel_str, namespace = None, None
else:
raise NotImplementedError()
channel, subdir = _parse_channel(channel_str)
if 'channel' in brackets:
b_channel, b_subdir = _parse_channel(brackets.pop('channel'))
if b_channel:
channel = b_channel
if b_subdir:
subdir = b_subdir
if 'subdir' in brackets:
subdir = brackets.pop('subdir')
m3 = re.match(r'([^ =<>!]+)?([><!= ].+)?', spec_str)
if m3:
name, spec_str = m3.groups()
if name is None:
raise CondaValueError("Invalid MatchSpec: %s" % spec_str)
else:
raise CondaValueError("Invalid MatchSpec: %s" % spec_str)
spec_str = spec_str and spec_str.strip()
if spec_str:
if '[' in spec_str:
raise CondaValueError("Invalid MatchSpec: %s" % spec_str)
version, build = _parse_version_plus_build(spec_str)
if version.startswith('='):
test_str = version[1:]
if version.startswith('==') and build is None:
version = version[2:]
elif not any(c in test_str for c in "=,|"):
if build is None and not test_str.endswith('*'):
version = test_str + '*'
else:
version = test_str
else:
version, build = None, None
components = {}
components['name'] = name if name else '*'
if channel is not None:
components['channel'] = channel
if subdir is not None:
components['subdir'] = subdir
if namespace is not None:
pass
if version is not None:
components['version'] = version
if build is not None:
components['build'] = build
components.update(brackets)
return components
@with_metaclass(ABCMeta)
class MatchInterface(object):
def __init__(self, value):
self._raw_value = value
@abstractmethod
def match(self, other):
raise NotImplementedError
def matches(self, value):
return self.match(value)
@property
def raw_value(self):
return self._raw_value
@abstractproperty
def exact_value(self):
raise NotImplementedError()
class SplitStrMatch(MatchInterface):
__slots__ = '_raw_value',
def __init__(self, value):
super(SplitStrMatch, self).__init__(self._convert(value))
def _convert(self, value):
try:
return frozenset(value.replace(' ', ',').split(','))
except AttributeError:
if isiterable(value):
return frozenset(value)
raise
def match(self, other):
try:
return other and self._raw_value & other._raw_value
except AttributeError:
return self._raw_value & self._convert(other)
def __repr__(self):
if self._raw_value:
return "{%s}" % ', '.join("'%s'" % s for s in sorted(self._raw_value))
else:
return 'set()'
def __str__(self):
return ' '.join(sorted(self._raw_value))
def __eq__(self, other):
return self.match(other)
def __hash__(self):
return hash(self._raw_value)
@property
def exact_value(self):
return self._raw_value
class ChannelMatch(MatchInterface):
__slots__ = '_raw_value',
def __init__(self, value):
super(ChannelMatch, self).__init__(Channel(value))
def match(self, other):
try:
return self._raw_value.canonical_name == other._raw_value.canonical_name
except AttributeError:
return self._raw_value.canonical_name == Channel(other).canonical_name
def __str__(self):
return "%s" % self._raw_value.canonical_name
def __repr__(self):
return "'%s'" % self._raw_value.canonical_name
def __eq__(self, other):
return self.match(other)
def __hash__(self):
return hash(self._raw_value)
@property
def exact_value(self):
return self._raw_value
class StrMatch(MatchInterface):
__slots__ = '_raw_value', '_re_match'
def __init__(self, value):
super(StrMatch, self).__init__(value)
self._re_match = None
if value.startswith('^') and value.endswith('$'):
self._re_match = re.compile(value).match
elif '*' in value:
self._re_match = re.compile(r'^(?:%s)$' % value.replace('*', r'.*')).match
def match(self, other):
try:
_other_val = other._raw_value
except AttributeError:
_other_val = text_type(other)
if self._re_match:
return self._re_match(_other_val)
else:
return self._raw_value == _other_val
def __str__(self):
return self._raw_value
def __repr__(self):
return "%s('%s')" % (self.__class__.__name__, self._raw_value)
def __eq__(self, other):
return self.match(other)
def __hash__(self):
return hash(self._raw_value)
@property
def exact_value(self):
return self._raw_value if self._re_match is None else None
class LowerStrMatch(StrMatch):
def __init__(self, value):
super(LowerStrMatch, self).__init__(value.lower())
_implementors = {
'name': LowerStrMatch,
'features': SplitStrMatch,
'track_features': SplitStrMatch,
'version': VersionSpec,
'build_number': BuildNumberMatch,
'channel': ChannelMatch,
}
| true | true |
f721212419baf5ea18640832b738d3e1f17382a7 | 6,485 | py | Python | tests/integration-tests/cfn_stacks_factory.py | agobeaux/aws-parallelcluster | ec337c6b8341f9b84616b6bbbe8687a0a5f71126 | [
"Apache-2.0"
] | null | null | null | tests/integration-tests/cfn_stacks_factory.py | agobeaux/aws-parallelcluster | ec337c6b8341f9b84616b6bbbe8687a0a5f71126 | [
"Apache-2.0"
] | null | null | null | tests/integration-tests/cfn_stacks_factory.py | agobeaux/aws-parallelcluster | ec337c6b8341f9b84616b6bbbe8687a0a5f71126 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
# with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
import logging
import boto3
from botocore.exceptions import ClientError
from retrying import retry
from utils import retrieve_cfn_outputs, retrieve_cfn_resources, set_credentials, unset_credentials
class CfnStack:
"""Identify a CloudFormation stack."""
def __init__(self, name, region, template, parameters=None):
self.name = name
self.region = region
self.template = template
self.parameters = parameters or []
self.cfn_stack_id = None
self.__cfn_outputs = None
self.__cfn_resources = None
@property
def cfn_outputs(self):
"""
Return the CloudFormation stack outputs for the stack.
Outputs are retrieved only once and then cached.
"""
if not self.__cfn_outputs:
self.__cfn_outputs = retrieve_cfn_outputs(self.name, self.region)
return self.__cfn_outputs
@property
def cfn_resources(self):
"""
Return the CloudFormation stack resources for the stack.
Resources are retrieved only once and then cached.
"""
if not self.__cfn_resources:
self.__cfn_resources = retrieve_cfn_resources(self.name, self.region)
return self.__cfn_resources
class CfnStacksFactory:
"""Manage creation and deletion of CloudFormation stacks."""
def __init__(self, credentials):
self.__created_stacks = {}
self.__credentials = credentials
def create_stack(self, stack):
"""
Create a cfn stack with a given template.
:param stack: stack to create.
"""
name = stack.name
region = stack.region
try:
set_credentials(region, self.__credentials)
id = self.__get_stack_internal_id(name, region)
if id in self.__created_stacks:
raise ValueError("Stack {0} already exists in region {1}".format(name, region))
logging.info("Creating stack {0} in region {1}".format(name, region))
self.__created_stacks[id] = stack
try:
cfn_client = boto3.client("cloudformation", region_name=region)
result = cfn_client.create_stack(
StackName=name, TemplateBody=stack.template, Parameters=stack.parameters
)
stack.cfn_stack_id = result["StackId"]
final_status = self.__wait_for_stack_creation(stack.cfn_stack_id, cfn_client)
self.__assert_stack_status(final_status, "CREATE_COMPLETE")
except Exception as e:
logging.error("Creation of stack {0} in region {1} failed with exception: {2}".format(name, region, e))
raise
logging.info("Stack {0} created successfully in region {1}".format(name, region))
finally:
unset_credentials()
@retry(
stop_max_attempt_number=10,
wait_fixed=5000,
retry_on_exception=lambda exception: isinstance(exception, ClientError),
)
def delete_stack(self, name, region):
"""Destroy a created cfn stack."""
try:
set_credentials(region, self.__credentials)
id = self.__get_stack_internal_id(name, region)
if id in self.__created_stacks:
logging.info("Destroying stack {0} in region {1}".format(name, region))
try:
stack = self.__created_stacks[id]
cfn_client = boto3.client("cloudformation", region_name=stack.region)
cfn_client.delete_stack(StackName=stack.name)
final_status = self.__wait_for_stack_deletion(stack.cfn_stack_id, cfn_client)
self.__assert_stack_status(final_status, "DELETE_COMPLETE")
except Exception as e:
logging.error(
"Deletion of stack {0} in region {1} failed with exception: {2}".format(name, region, e)
)
raise
del self.__created_stacks[id]
logging.info("Stack {0} deleted successfully in region {1}".format(name, region))
else:
logging.warning(
"Couldn't find stack with name {0} in region {1}. Skipping deletion.".format(name, region)
)
finally:
unset_credentials()
def delete_all_stacks(self):
"""Destroy all created stacks."""
logging.debug("Destroying all cfn stacks")
for _, value in dict(self.__created_stacks).items():
try:
self.delete_stack(value.name, value.region)
except Exception as e:
logging.error(
"Failed when destroying stack {0} in region {1} with exception {2}.".format(
value.name, value.region, e
)
)
@retry(
retry_on_result=lambda result: result == "CREATE_IN_PROGRESS",
wait_fixed=5000,
retry_on_exception=lambda e: False,
)
def __wait_for_stack_creation(self, name, cfn_client):
return self.__get_stack_status(name, cfn_client)
@retry(
retry_on_result=lambda result: result == "DELETE_IN_PROGRESS",
wait_fixed=5000,
retry_on_exception=lambda e: False,
)
def __wait_for_stack_deletion(self, name, cfn_client):
return self.__get_stack_status(name, cfn_client)
@staticmethod
def __get_stack_status(name, cfn_client):
return cfn_client.describe_stacks(StackName=name).get("Stacks")[0].get("StackStatus")
@staticmethod
def __assert_stack_status(status, expected_status):
if status != expected_status:
raise Exception("Stack status {0} differs from expected one {1}".format(status, expected_status))
@staticmethod
def __get_stack_internal_id(name, region):
return name + "-" + region
| 39.066265 | 119 | 0.626831 |
import logging
import boto3
from botocore.exceptions import ClientError
from retrying import retry
from utils import retrieve_cfn_outputs, retrieve_cfn_resources, set_credentials, unset_credentials
class CfnStack:
def __init__(self, name, region, template, parameters=None):
self.name = name
self.region = region
self.template = template
self.parameters = parameters or []
self.cfn_stack_id = None
self.__cfn_outputs = None
self.__cfn_resources = None
@property
def cfn_outputs(self):
if not self.__cfn_outputs:
self.__cfn_outputs = retrieve_cfn_outputs(self.name, self.region)
return self.__cfn_outputs
@property
def cfn_resources(self):
if not self.__cfn_resources:
self.__cfn_resources = retrieve_cfn_resources(self.name, self.region)
return self.__cfn_resources
class CfnStacksFactory:
def __init__(self, credentials):
self.__created_stacks = {}
self.__credentials = credentials
def create_stack(self, stack):
name = stack.name
region = stack.region
try:
set_credentials(region, self.__credentials)
id = self.__get_stack_internal_id(name, region)
if id in self.__created_stacks:
raise ValueError("Stack {0} already exists in region {1}".format(name, region))
logging.info("Creating stack {0} in region {1}".format(name, region))
self.__created_stacks[id] = stack
try:
cfn_client = boto3.client("cloudformation", region_name=region)
result = cfn_client.create_stack(
StackName=name, TemplateBody=stack.template, Parameters=stack.parameters
)
stack.cfn_stack_id = result["StackId"]
final_status = self.__wait_for_stack_creation(stack.cfn_stack_id, cfn_client)
self.__assert_stack_status(final_status, "CREATE_COMPLETE")
except Exception as e:
logging.error("Creation of stack {0} in region {1} failed with exception: {2}".format(name, region, e))
raise
logging.info("Stack {0} created successfully in region {1}".format(name, region))
finally:
unset_credentials()
@retry(
stop_max_attempt_number=10,
wait_fixed=5000,
retry_on_exception=lambda exception: isinstance(exception, ClientError),
)
def delete_stack(self, name, region):
try:
set_credentials(region, self.__credentials)
id = self.__get_stack_internal_id(name, region)
if id in self.__created_stacks:
logging.info("Destroying stack {0} in region {1}".format(name, region))
try:
stack = self.__created_stacks[id]
cfn_client = boto3.client("cloudformation", region_name=stack.region)
cfn_client.delete_stack(StackName=stack.name)
final_status = self.__wait_for_stack_deletion(stack.cfn_stack_id, cfn_client)
self.__assert_stack_status(final_status, "DELETE_COMPLETE")
except Exception as e:
logging.error(
"Deletion of stack {0} in region {1} failed with exception: {2}".format(name, region, e)
)
raise
del self.__created_stacks[id]
logging.info("Stack {0} deleted successfully in region {1}".format(name, region))
else:
logging.warning(
"Couldn't find stack with name {0} in region {1}. Skipping deletion.".format(name, region)
)
finally:
unset_credentials()
def delete_all_stacks(self):
logging.debug("Destroying all cfn stacks")
for _, value in dict(self.__created_stacks).items():
try:
self.delete_stack(value.name, value.region)
except Exception as e:
logging.error(
"Failed when destroying stack {0} in region {1} with exception {2}.".format(
value.name, value.region, e
)
)
@retry(
retry_on_result=lambda result: result == "CREATE_IN_PROGRESS",
wait_fixed=5000,
retry_on_exception=lambda e: False,
)
def __wait_for_stack_creation(self, name, cfn_client):
return self.__get_stack_status(name, cfn_client)
@retry(
retry_on_result=lambda result: result == "DELETE_IN_PROGRESS",
wait_fixed=5000,
retry_on_exception=lambda e: False,
)
def __wait_for_stack_deletion(self, name, cfn_client):
return self.__get_stack_status(name, cfn_client)
@staticmethod
def __get_stack_status(name, cfn_client):
return cfn_client.describe_stacks(StackName=name).get("Stacks")[0].get("StackStatus")
@staticmethod
def __assert_stack_status(status, expected_status):
if status != expected_status:
raise Exception("Stack status {0} differs from expected one {1}".format(status, expected_status))
@staticmethod
def __get_stack_internal_id(name, region):
return name + "-" + region
| true | true |
f721218a181e524dc4105ce1e8ccda9b8507b1c2 | 3,080 | py | Python | blog/blog/settings.py | zhaotao789/blog | de23e5a29b6aae2fc87829833f3fae256c55f5b3 | [
"MIT"
] | null | null | null | blog/blog/settings.py | zhaotao789/blog | de23e5a29b6aae2fc87829833f3fae256c55f5b3 | [
"MIT"
] | null | null | null | blog/blog/settings.py | zhaotao789/blog | de23e5a29b6aae2fc87829833f3fae256c55f5b3 | [
"MIT"
] | null | null | null | """
Django settings for blog project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'd120mv4fw)wcwekzk-r^1w5++9e^q_6qteo4-+n8kk4ei%i5$0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'blog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'blog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| 25.454545 | 91 | 0.696104 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'd120mv4fw)wcwekzk-r^1w5++9e^q_6qteo4-+n8kk4ei%i5$0'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'blog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'blog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| true | true |
f72121ecda5066fe0ef9c48035c6f35ba4a47c1b | 400 | py | Python | lino_tera/lib/coachings/choicelists.py | khchine5/tera | dd85aaefc2392fa831bcee7c258d37038e32aeb7 | [
"BSD-2-Clause"
] | null | null | null | lino_tera/lib/coachings/choicelists.py | khchine5/tera | dd85aaefc2392fa831bcee7c258d37038e32aeb7 | [
"BSD-2-Clause"
] | null | null | null | lino_tera/lib/coachings/choicelists.py | khchine5/tera | dd85aaefc2392fa831bcee7c258d37038e32aeb7 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: UTF-8 -*-
# Copyright 2017-2018 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
"""The choicelists for this plugin.
"""
from lino.api import dd, _
class PartnerTariffs(dd.ChoiceList):
verbose_name = _("Client tariff")
verbose_name_plural = _("Client tariffs")
add = PartnerTariffs.add_item
add('10', _("Plain"), 'plain')
add('20', _("Reduced"), 'reduced')
| 18.181818 | 45 | 0.67 |
from lino.api import dd, _
class PartnerTariffs(dd.ChoiceList):
verbose_name = _("Client tariff")
verbose_name_plural = _("Client tariffs")
add = PartnerTariffs.add_item
add('10', _("Plain"), 'plain')
add('20', _("Reduced"), 'reduced')
| true | true |
f7212251e63dcb5ce319603d8ff0812abad4359b | 1,095 | py | Python | synapse/resources/packages-plugin/yum-pkg.py | mrmuxl/synapse-agent | 615ccc8faefa0f7d66d070a7444fe57e67e3bae1 | [
"MIT"
] | 1 | 2016-06-23T05:56:53.000Z | 2016-06-23T05:56:53.000Z | synapse/resources/packages-plugin/yum-pkg.py | mrmuxl/synapse-agent | 615ccc8faefa0f7d66d070a7444fe57e67e3bae1 | [
"MIT"
] | null | null | null | synapse/resources/packages-plugin/yum-pkg.py | mrmuxl/synapse-agent | 615ccc8faefa0f7d66d070a7444fe57e67e3bae1 | [
"MIT"
] | null | null | null | from synapse.syncmd import exec_cmd
from synapse.synapse_exceptions import ResourceException
from synapse.logger import logger
log = logger('yum-pkg')
def install(name):
ret = exec_cmd("/usr/bin/yum -q -y install {0}".format(name))
if ret['returncode'] != 0:
raise ResourceException(ret['stderr'])
def get_installed_packages():
ret = exec_cmd("/bin/rpm -qa")
return ret['stdout'].split('\n')
def remove(name):
ret = exec_cmd("/usr/bin/yum -q -y remove {0}".format(name))
if ret['returncode'] != 0:
raise ResourceException(ret['stderr'])
def update(name):
# We need to check first if the package is installed. yum update of a
# non-existing package has a returncode of 0. We need to raise an exception
# if the package is not installed !
inst = is_installed(name)
ret = exec_cmd("/usr/bin/yum -q -y update {0}".format(name))
if ret['returncode'] != 0 or not inst:
raise ResourceException(ret['stderr'])
def is_installed(name):
ret = exec_cmd("/bin/rpm -q {0}".format(name))
return ret['returncode'] == 0
| 28.076923 | 79 | 0.663014 | from synapse.syncmd import exec_cmd
from synapse.synapse_exceptions import ResourceException
from synapse.logger import logger
log = logger('yum-pkg')
def install(name):
ret = exec_cmd("/usr/bin/yum -q -y install {0}".format(name))
if ret['returncode'] != 0:
raise ResourceException(ret['stderr'])
def get_installed_packages():
ret = exec_cmd("/bin/rpm -qa")
return ret['stdout'].split('\n')
def remove(name):
ret = exec_cmd("/usr/bin/yum -q -y remove {0}".format(name))
if ret['returncode'] != 0:
raise ResourceException(ret['stderr'])
def update(name):
inst = is_installed(name)
ret = exec_cmd("/usr/bin/yum -q -y update {0}".format(name))
if ret['returncode'] != 0 or not inst:
raise ResourceException(ret['stderr'])
def is_installed(name):
ret = exec_cmd("/bin/rpm -q {0}".format(name))
return ret['returncode'] == 0
| true | true |
f7212358f16c2908668c9722bd9e47633e14b4ef | 2,154 | py | Python | sensirion_shdlc_sensorbridge/i2c_errors.py | Sensirion/python-shdlc-sensorbridge | c441c17d89697ecf0f7b61955f54c3da195e30e6 | [
"BSD-3-Clause"
] | null | null | null | sensirion_shdlc_sensorbridge/i2c_errors.py | Sensirion/python-shdlc-sensorbridge | c441c17d89697ecf0f7b61955f54c3da195e30e6 | [
"BSD-3-Clause"
] | 1 | 2021-03-28T22:15:29.000Z | 2021-11-03T09:06:14.000Z | sensirion_shdlc_sensorbridge/i2c_errors.py | Sensirion/python-shdlc-sensorbridge | c441c17d89697ecf0f7b61955f54c3da195e30e6 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# (c) Copyright 2020 Sensirion AG, Switzerland
from __future__ import absolute_import, division, print_function
import logging
log = logging.getLogger(__name__)
class SensorBridgeI2cError(IOError):
"""
I2C transceive error.
"""
def __init__(self, code, message="Unknown"):
super(SensorBridgeI2cError, self).__init__(
"I2C transceive error: {}".format(message)
)
self.error_code = code
self.error_message = message
class SensorBridgeI2cNackError(SensorBridgeI2cError):
"""
I2C transceive NACK error.
"""
def __init__(self):
super(SensorBridgeI2cNackError, self).__init__(
0x01,
"NACK (byte not acknowledged)"
)
class SensorBridgeI2cTimeoutError(SensorBridgeI2cError):
"""
I2C transceive timeout error.
"""
def __init__(self):
super(SensorBridgeI2cTimeoutError, self).__init__(
0x02,
"Timeout"
)
class SensorBridgeI2cTimingError(SensorBridgeI2cError):
"""
I2C repeated transceive timing error.
"""
def __init__(self):
super(SensorBridgeI2cTimingError, self).__init__(
0x03,
"Invalid timing (frequency, interval, timeout or delay)"
)
"""
List containing all I2C errors specified in this file.
"""
SENSORBRIDGE_I2C_ERROR_LIST = [
SensorBridgeI2cNackError(),
SensorBridgeI2cTimeoutError(),
SensorBridgeI2cTimingError(),
]
def i2c_error_from_code(code):
"""
Return the corresponding exception for a given I2C error code.
:param byte code:
Error code as received from the device.
:return:
The exception for the given error code. If code is zero (no error),
None is returned.
:rtype:
None or an instance of
:py:class:`~sensirion_shdlc_sensorbridge.i2c_errors.SensorBridgeI2cError`
""" # noqa: E501
if code == 0:
return None
for error in SENSORBRIDGE_I2C_ERROR_LIST:
if error.error_code == code:
return error
return SensorBridgeI2cError(code) # fallback for unknown error codes
| 25.642857 | 81 | 0.654132 |
from __future__ import absolute_import, division, print_function
import logging
log = logging.getLogger(__name__)
class SensorBridgeI2cError(IOError):
def __init__(self, code, message="Unknown"):
super(SensorBridgeI2cError, self).__init__(
"I2C transceive error: {}".format(message)
)
self.error_code = code
self.error_message = message
class SensorBridgeI2cNackError(SensorBridgeI2cError):
def __init__(self):
super(SensorBridgeI2cNackError, self).__init__(
0x01,
"NACK (byte not acknowledged)"
)
class SensorBridgeI2cTimeoutError(SensorBridgeI2cError):
def __init__(self):
super(SensorBridgeI2cTimeoutError, self).__init__(
0x02,
"Timeout"
)
class SensorBridgeI2cTimingError(SensorBridgeI2cError):
def __init__(self):
super(SensorBridgeI2cTimingError, self).__init__(
0x03,
"Invalid timing (frequency, interval, timeout or delay)"
)
SENSORBRIDGE_I2C_ERROR_LIST = [
SensorBridgeI2cNackError(),
SensorBridgeI2cTimeoutError(),
SensorBridgeI2cTimingError(),
]
def i2c_error_from_code(code):
if code == 0:
return None
for error in SENSORBRIDGE_I2C_ERROR_LIST:
if error.error_code == code:
return error
return SensorBridgeI2cError(code)
| true | true |
f721236e30c2bc62859814934c24d2d0a6124a36 | 1,534 | py | Python | tests/ecr/data_generator/test_vessel_parser.py | zhawan/maro | d8c98deea4296cdcb90efd1fb59bc571cec3a2ef | [
"MIT"
] | null | null | null | tests/ecr/data_generator/test_vessel_parser.py | zhawan/maro | d8c98deea4296cdcb90efd1fb59bc571cec3a2ef | [
"MIT"
] | null | null | null | tests/ecr/data_generator/test_vessel_parser.py | zhawan/maro | d8c98deea4296cdcb90efd1fb59bc571cec3a2ef | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import unittest
import yaml
from maro.data_lib.ecr.vessel_parser import VesselsParser
from maro.data_lib.ecr.entities import VesselSetting
conf_str = """
vessels:
rt1_vessel_001:
capacity: 92400
parking:
duration: 1
noise: 0
route:
initial_port_name: supply_port_001
route_name: route_001
sailing:
noise: 0
speed: 10
rt1_vessel_002:
capacity: 92400
parking:
duration: 1
noise: 0
route:
initial_port_name: demand_port_001
route_name: route_001
sailing:
noise: 0
speed: 10
"""
class TestVesselParser(unittest.TestCase):
def test_vessel_parse(self):
conf = yaml.safe_load(conf_str)
parser = VesselsParser()
vessel_mapping, vessels = parser.parse(conf["vessels"])
self.assertEqual(2, len(vessel_mapping))
self.assertEqual(2, len(vessels))
self.assertEqual("rt1_vessel_001", vessels[0].name)
self.assertEqual("rt1_vessel_002", vessels[1].name)
# check capacity
self.assertListEqual([92400, 92400], [v.capacity for v in vessels])
self.assertListEqual([1, 1], [v.parking_duration for v in vessels])
self.assertListEqual([0, 0], [v.parking_noise for v in vessels])
self.assertListEqual([10, 10], [v.sailing_speed for v in vessels])
self.assertListEqual([0, 0], [v.sailing_noise for v in vessels])
if __name__=="__main__":
unittest.main() | 25.566667 | 75 | 0.666232 |
import unittest
import yaml
from maro.data_lib.ecr.vessel_parser import VesselsParser
from maro.data_lib.ecr.entities import VesselSetting
conf_str = """
vessels:
rt1_vessel_001:
capacity: 92400
parking:
duration: 1
noise: 0
route:
initial_port_name: supply_port_001
route_name: route_001
sailing:
noise: 0
speed: 10
rt1_vessel_002:
capacity: 92400
parking:
duration: 1
noise: 0
route:
initial_port_name: demand_port_001
route_name: route_001
sailing:
noise: 0
speed: 10
"""
class TestVesselParser(unittest.TestCase):
def test_vessel_parse(self):
conf = yaml.safe_load(conf_str)
parser = VesselsParser()
vessel_mapping, vessels = parser.parse(conf["vessels"])
self.assertEqual(2, len(vessel_mapping))
self.assertEqual(2, len(vessels))
self.assertEqual("rt1_vessel_001", vessels[0].name)
self.assertEqual("rt1_vessel_002", vessels[1].name)
self.assertListEqual([92400, 92400], [v.capacity for v in vessels])
self.assertListEqual([1, 1], [v.parking_duration for v in vessels])
self.assertListEqual([0, 0], [v.parking_noise for v in vessels])
self.assertListEqual([10, 10], [v.sailing_speed for v in vessels])
self.assertListEqual([0, 0], [v.sailing_noise for v in vessels])
if __name__=="__main__":
unittest.main() | true | true |
f72123b570cec67b1077598e4da57ff2404e136f | 8,077 | py | Python | corehq/util/es/interface.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | null | null | null | corehq/util/es/interface.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 94 | 2020-12-11T06:57:31.000Z | 2022-03-15T10:24:06.000Z | corehq/util/es/interface.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | null | null | null | import abc
import logging
import traceback
from django.conf import settings
from corehq.pillows.mappings.utils import transform_for_es7
from corehq.util.es.elasticsearch import bulk, scan
class AbstractElasticsearchInterface(metaclass=abc.ABCMeta):
def __init__(self, es):
self.es = es
def get_aliases(self):
return self.es.indices.get_aliases()
def put_mapping(self, doc_type, mapping, index):
return self.es.indices.put_mapping(doc_type, {doc_type: mapping}, index=index)
def _verify_is_alias(self, index_or_alias):
from corehq.elastic import ES_META, ESError
from pillowtop.tests.utils import TEST_ES_ALIAS
all_es_aliases = [index_info.alias for index_info in ES_META.values()] + [TEST_ES_ALIAS]
if index_or_alias not in all_es_aliases:
raise ESError(
f"{index_or_alias} is an unknown alias, query target must be one of {all_es_aliases}")
def update_index_settings(self, index, settings_dict):
assert set(settings_dict.keys()) == {'index'}, settings_dict.keys()
return self.es.indices.put_settings(settings_dict, index=index)
def _get_source(self, index_alias, doc_type, doc_id, source_includes=None):
kwargs = {"_source_include": source_includes} if source_includes else {}
return self.es.get_source(index_alias, doc_type, doc_id, **kwargs)
def doc_exists(self, index_alias, doc_id, doc_type):
return self.es.exists(index_alias, doc_type, doc_id)
def _mget(self, index_alias, body, doc_type):
return self.es.mget(
index=index_alias, doc_type=doc_type, body=body, _source=True)
def get_doc(self, index_alias, doc_type, doc_id, source_includes=None, verify_alias=True):
if verify_alias:
self._verify_is_alias(index_alias)
doc = self._get_source(index_alias, doc_type, doc_id, source_includes=source_includes)
doc['_id'] = doc_id
return doc
def get_bulk_docs(self, index_alias, doc_type, doc_ids, verify_alias=True):
from corehq.elastic import ESError
if verify_alias:
self._verify_is_alias(index_alias)
docs = []
results = self._mget(index_alias=index_alias, doc_type=doc_type, body={'ids': doc_ids})
for doc_result in results['docs']:
if 'error' in doc_result:
raise ESError(doc_result['error'].get('reason', 'error doing bulk get'))
if doc_result['found']:
self._fix_hit(doc_result)
docs.append(doc_result['_source'])
return docs
def index_doc(self, index_alias, doc_type, doc_id, doc, params=None, verify_alias=True):
if verify_alias:
self._verify_is_alias(index_alias)
self.es.index(index_alias, doc_type, body=self._without_id_field(doc), id=doc_id,
params=params or {})
def update_doc_fields(self, index_alias, doc_type, doc_id, fields, params=None, verify_alias=True):
if verify_alias:
self._verify_is_alias(index_alias)
self.es.update(index_alias, doc_type, doc_id, body={"doc": self._without_id_field(fields)},
params=params or {})
def _prepare_count_query(self, query):
# pagination params are not required and not supported in ES count API
query = query.copy()
for extra in ['size', 'sort', 'from', 'to', '_source']:
query.pop(extra, None)
return query
def count(self, index_alias, doc_type, query):
query = self._prepare_count_query(query)
return self.es.count(index=index_alias, doc_type=doc_type, body=query).get('count')
@staticmethod
def _without_id_field(doc):
# Field [_id] is a metadata field and cannot be added inside a document.
# Use the index API request parameters.
return {key: value for key, value in doc.items() if key != '_id'}
def delete_doc(self, index_alias, doc_type, doc_id):
self.es.delete(index_alias, doc_type, doc_id)
def bulk_ops(self, actions, stats_only=False, **kwargs):
for action in actions:
if '_source' in action:
action['_source'] = self._without_id_field(action['_source'])
ret = bulk(self.es, actions, stats_only=stats_only, **kwargs)
return ret
def search(self, index_alias=None, doc_type=None, body=None, params=None, verify_alias=True, **kwargs):
if verify_alias:
self._verify_is_alias(index_alias)
results = self.es.search(index=index_alias, doc_type=doc_type, body=body, params=params or {}, **kwargs)
self._fix_hits_in_results(results)
return results
def scroll(self, scroll_id=None, body=None, params=None, **kwargs):
results = self.es.scroll(scroll_id, body, params=params or {}, **kwargs)
self._fix_hits_in_results(results)
return results
def scan(self, index_alias, query, doc_type):
return scan(self.es, query=query, index=index_alias, doc_type=doc_type, search_type='scan')
@staticmethod
def _fix_hit(hit):
if '_source' in hit:
hit['_source']['_id'] = hit['_id']
def _fix_hits_in_results(self, results):
try:
hits = results['hits']['hits']
except KeyError:
return results
for hit in hits:
self._fix_hit(hit)
total = results['hits']['total']
# In ES7 total is a dict
if isinstance(total, dict):
results['hits']['total'] = total.get('value', 0)
class ElasticsearchInterfaceDefault(AbstractElasticsearchInterface):
pass
class ElasticsearchInterface7(AbstractElasticsearchInterface):
def get_aliases(self):
return self.es.indices.get_alias()
def search(self, index_alias=None, doc_type=None, body=None, params=None, verify_alias=True, **kwargs):
if verify_alias:
self._verify_is_alias(index_alias)
results = self.es.search(index=index_alias, body=body, params=params or {}, **kwargs)
self._fix_hits_in_results(results)
return results
def put_mapping(self, doc_type, mapping, index):
mapping = transform_for_es7(mapping)
return self.es.indices.put_mapping(mapping, index=index)
def doc_exists(self, index_alias, doc_id, doc_type):
return self.es.exists(index_alias, doc_id)
def _get_source(self, index_alias, doc_type, doc_id, source_includes=None):
kwargs = {"_source_includes": source_includes} if source_includes else {}
return self.es.get_source(index_alias, doc_id, **kwargs)
def _mget(self, index_alias, body, doc_type):
return self.es.mget(
index=index_alias, body=body, _source=True)
def index_doc(self, index_alias, doc_type, doc_id, doc, params=None, verify_alias=True):
if verify_alias:
self._verify_is_alias(index_alias)
params = params or {}
# not supported in ES7
params.pop('retry_on_conflict', None)
self.es.index(index_alias, body=self._without_id_field(doc), id=doc_id,
params=params)
def update_doc_fields(self, index_alias, doc_type, doc_id, fields, params=None, verify_alias=True):
if verify_alias:
self._verify_is_alias(index_alias)
self.es.update(index_alias, doc_id, body={"doc": self._without_id_field(fields)},
params=params or {})
def delete_doc(self, index_alias, doc_type, doc_id):
self.es.delete(index_alias, doc_id)
def count(self, index_alias, doc_type, query):
query = self._prepare_count_query(query)
return self.es.count(index=index_alias, body=query).get('count')
def scan(self, index_alias, query, doc_type):
query["sort"] = "_doc"
return scan(self.es, query=query, index=index_alias)
ElasticsearchInterface = {
1: ElasticsearchInterfaceDefault,
2: ElasticsearchInterfaceDefault,
7: ElasticsearchInterface7,
}[settings.ELASTICSEARCH_MAJOR_VERSION]
| 39.985149 | 112 | 0.668317 | import abc
import logging
import traceback
from django.conf import settings
from corehq.pillows.mappings.utils import transform_for_es7
from corehq.util.es.elasticsearch import bulk, scan
class AbstractElasticsearchInterface(metaclass=abc.ABCMeta):
def __init__(self, es):
self.es = es
def get_aliases(self):
return self.es.indices.get_aliases()
def put_mapping(self, doc_type, mapping, index):
return self.es.indices.put_mapping(doc_type, {doc_type: mapping}, index=index)
def _verify_is_alias(self, index_or_alias):
from corehq.elastic import ES_META, ESError
from pillowtop.tests.utils import TEST_ES_ALIAS
all_es_aliases = [index_info.alias for index_info in ES_META.values()] + [TEST_ES_ALIAS]
if index_or_alias not in all_es_aliases:
raise ESError(
f"{index_or_alias} is an unknown alias, query target must be one of {all_es_aliases}")
def update_index_settings(self, index, settings_dict):
assert set(settings_dict.keys()) == {'index'}, settings_dict.keys()
return self.es.indices.put_settings(settings_dict, index=index)
def _get_source(self, index_alias, doc_type, doc_id, source_includes=None):
kwargs = {"_source_include": source_includes} if source_includes else {}
return self.es.get_source(index_alias, doc_type, doc_id, **kwargs)
def doc_exists(self, index_alias, doc_id, doc_type):
return self.es.exists(index_alias, doc_type, doc_id)
def _mget(self, index_alias, body, doc_type):
return self.es.mget(
index=index_alias, doc_type=doc_type, body=body, _source=True)
def get_doc(self, index_alias, doc_type, doc_id, source_includes=None, verify_alias=True):
if verify_alias:
self._verify_is_alias(index_alias)
doc = self._get_source(index_alias, doc_type, doc_id, source_includes=source_includes)
doc['_id'] = doc_id
return doc
def get_bulk_docs(self, index_alias, doc_type, doc_ids, verify_alias=True):
from corehq.elastic import ESError
if verify_alias:
self._verify_is_alias(index_alias)
docs = []
results = self._mget(index_alias=index_alias, doc_type=doc_type, body={'ids': doc_ids})
for doc_result in results['docs']:
if 'error' in doc_result:
raise ESError(doc_result['error'].get('reason', 'error doing bulk get'))
if doc_result['found']:
self._fix_hit(doc_result)
docs.append(doc_result['_source'])
return docs
def index_doc(self, index_alias, doc_type, doc_id, doc, params=None, verify_alias=True):
if verify_alias:
self._verify_is_alias(index_alias)
self.es.index(index_alias, doc_type, body=self._without_id_field(doc), id=doc_id,
params=params or {})
def update_doc_fields(self, index_alias, doc_type, doc_id, fields, params=None, verify_alias=True):
if verify_alias:
self._verify_is_alias(index_alias)
self.es.update(index_alias, doc_type, doc_id, body={"doc": self._without_id_field(fields)},
params=params or {})
def _prepare_count_query(self, query):
query = query.copy()
for extra in ['size', 'sort', 'from', 'to', '_source']:
query.pop(extra, None)
return query
def count(self, index_alias, doc_type, query):
query = self._prepare_count_query(query)
return self.es.count(index=index_alias, doc_type=doc_type, body=query).get('count')
@staticmethod
def _without_id_field(doc):
return {key: value for key, value in doc.items() if key != '_id'}
def delete_doc(self, index_alias, doc_type, doc_id):
self.es.delete(index_alias, doc_type, doc_id)
def bulk_ops(self, actions, stats_only=False, **kwargs):
for action in actions:
if '_source' in action:
action['_source'] = self._without_id_field(action['_source'])
ret = bulk(self.es, actions, stats_only=stats_only, **kwargs)
return ret
def search(self, index_alias=None, doc_type=None, body=None, params=None, verify_alias=True, **kwargs):
if verify_alias:
self._verify_is_alias(index_alias)
results = self.es.search(index=index_alias, doc_type=doc_type, body=body, params=params or {}, **kwargs)
self._fix_hits_in_results(results)
return results
def scroll(self, scroll_id=None, body=None, params=None, **kwargs):
results = self.es.scroll(scroll_id, body, params=params or {}, **kwargs)
self._fix_hits_in_results(results)
return results
def scan(self, index_alias, query, doc_type):
return scan(self.es, query=query, index=index_alias, doc_type=doc_type, search_type='scan')
@staticmethod
def _fix_hit(hit):
if '_source' in hit:
hit['_source']['_id'] = hit['_id']
def _fix_hits_in_results(self, results):
try:
hits = results['hits']['hits']
except KeyError:
return results
for hit in hits:
self._fix_hit(hit)
total = results['hits']['total']
if isinstance(total, dict):
results['hits']['total'] = total.get('value', 0)
class ElasticsearchInterfaceDefault(AbstractElasticsearchInterface):
pass
class ElasticsearchInterface7(AbstractElasticsearchInterface):
def get_aliases(self):
return self.es.indices.get_alias()
def search(self, index_alias=None, doc_type=None, body=None, params=None, verify_alias=True, **kwargs):
if verify_alias:
self._verify_is_alias(index_alias)
results = self.es.search(index=index_alias, body=body, params=params or {}, **kwargs)
self._fix_hits_in_results(results)
return results
def put_mapping(self, doc_type, mapping, index):
mapping = transform_for_es7(mapping)
return self.es.indices.put_mapping(mapping, index=index)
def doc_exists(self, index_alias, doc_id, doc_type):
return self.es.exists(index_alias, doc_id)
def _get_source(self, index_alias, doc_type, doc_id, source_includes=None):
kwargs = {"_source_includes": source_includes} if source_includes else {}
return self.es.get_source(index_alias, doc_id, **kwargs)
def _mget(self, index_alias, body, doc_type):
return self.es.mget(
index=index_alias, body=body, _source=True)
def index_doc(self, index_alias, doc_type, doc_id, doc, params=None, verify_alias=True):
if verify_alias:
self._verify_is_alias(index_alias)
params = params or {}
params.pop('retry_on_conflict', None)
self.es.index(index_alias, body=self._without_id_field(doc), id=doc_id,
params=params)
def update_doc_fields(self, index_alias, doc_type, doc_id, fields, params=None, verify_alias=True):
if verify_alias:
self._verify_is_alias(index_alias)
self.es.update(index_alias, doc_id, body={"doc": self._without_id_field(fields)},
params=params or {})
def delete_doc(self, index_alias, doc_type, doc_id):
self.es.delete(index_alias, doc_id)
def count(self, index_alias, doc_type, query):
query = self._prepare_count_query(query)
return self.es.count(index=index_alias, body=query).get('count')
def scan(self, index_alias, query, doc_type):
query["sort"] = "_doc"
return scan(self.es, query=query, index=index_alias)
ElasticsearchInterface = {
1: ElasticsearchInterfaceDefault,
2: ElasticsearchInterfaceDefault,
7: ElasticsearchInterface7,
}[settings.ELASTICSEARCH_MAJOR_VERSION]
| true | true |
f72125bd49bf7f1f45aab75707173700a233a682 | 2,262 | py | Python | brave/overlays/effect.py | datagutt/brave | 5b4de55146645f96870ffc544859e6f2bb9ec735 | [
"Apache-2.0"
] | 572 | 2018-10-25T10:52:21.000Z | 2022-03-09T18:02:20.000Z | brave/overlays/effect.py | datagutt/brave | 5b4de55146645f96870ffc544859e6f2bb9ec735 | [
"Apache-2.0"
] | 50 | 2018-11-06T08:53:27.000Z | 2022-01-04T17:00:37.000Z | brave/overlays/effect.py | datagutt/brave | 5b4de55146645f96870ffc544859e6f2bb9ec735 | [
"Apache-2.0"
] | 130 | 2018-11-01T14:50:46.000Z | 2022-03-10T20:31:41.000Z | from brave.overlays.overlay import Overlay
from gi.repository import Gst
class EffectOverlay(Overlay):
'''
For doing applying a video effect.
'''
def permitted_props(self):
return {
**super().permitted_props(),
'effect_name': {
'type': 'str',
'default': 'edgetv',
'permitted_values': {
'agingtv': 'AgingTV effect',
'burn': 'Burn',
'chromium': 'Chromium',
'dicetv': 'DiceTV effect',
'dilate': 'Dilate',
'dodge': 'Dodge',
'edgetv': 'EdgeTV effect',
'exclusion': 'Exclusion',
'optv': 'OpTV effect',
'radioactv': 'RadioacTV effect',
'revtv': 'RevTV effect',
'rippletv': 'RippleTV effect',
'solarize': 'Solarize',
'streaktv': 'StreakTV effect',
'vertigotv': 'VertigoTV effect',
'warptv': 'WarpTV effect'
# Note: quarktv and shagadelictv are removed as they were unreliable in testing
}
},
'visible': {
'type': 'bool',
'default': False
}
}
def create_elements(self):
# The effects filters can mess with the alpha channel.
# The best solution I've found is to allow it to move into RGBx, then force a detour via RGB
# to remove the alpha channel, before moving back to our default RGBA.
# This is done in a 'bin' so that the overlay can be manipulated as one thing.
desc = ('videoconvert ! %s ! videoconvert ! capsfilter caps="video/x-raw,format=RGB" ! '
'videoconvert ! capsfilter caps="video/x-raw,format=RGBA"') % self.effect_name
self.element = Gst.parse_bin_from_description(desc, True)
self.element.set_name('%s_bin' % self.uid)
place_to_add_elements = getattr(self.source, 'final_video_tee').parent
if not place_to_add_elements.add(self.element):
self.logger.warning('Unable to add effect overlay bin to the source pipeline')
| 41.888889 | 100 | 0.525199 | from brave.overlays.overlay import Overlay
from gi.repository import Gst
class EffectOverlay(Overlay):
def permitted_props(self):
return {
**super().permitted_props(),
'effect_name': {
'type': 'str',
'default': 'edgetv',
'permitted_values': {
'agingtv': 'AgingTV effect',
'burn': 'Burn',
'chromium': 'Chromium',
'dicetv': 'DiceTV effect',
'dilate': 'Dilate',
'dodge': 'Dodge',
'edgetv': 'EdgeTV effect',
'exclusion': 'Exclusion',
'optv': 'OpTV effect',
'radioactv': 'RadioacTV effect',
'revtv': 'RevTV effect',
'rippletv': 'RippleTV effect',
'solarize': 'Solarize',
'streaktv': 'StreakTV effect',
'vertigotv': 'VertigoTV effect',
'warptv': 'WarpTV effect'
}
},
'visible': {
'type': 'bool',
'default': False
}
}
def create_elements(self):
# to remove the alpha channel, before moving back to our default RGBA.
# This is done in a 'bin' so that the overlay can be manipulated as one thing.
desc = ('videoconvert ! %s ! videoconvert ! capsfilter caps="video/x-raw,format=RGB" ! '
'videoconvert ! capsfilter caps="video/x-raw,format=RGBA"') % self.effect_name
self.element = Gst.parse_bin_from_description(desc, True)
self.element.set_name('%s_bin' % self.uid)
place_to_add_elements = getattr(self.source, 'final_video_tee').parent
if not place_to_add_elements.add(self.element):
self.logger.warning('Unable to add effect overlay bin to the source pipeline')
| true | true |
f7212703196fd6c35cdef4b889edc2bf6b134e91 | 7,399 | py | Python | pytest_testrail/conftest.py | harmonm/pytest-testrail | cfd667b33cc857dd65c8531823859cd871aff525 | [
"MIT"
] | null | null | null | pytest_testrail/conftest.py | harmonm/pytest-testrail | cfd667b33cc857dd65c8531823859cd871aff525 | [
"MIT"
] | null | null | null | pytest_testrail/conftest.py | harmonm/pytest-testrail | cfd667b33cc857dd65c8531823859cd871aff525 | [
"MIT"
] | null | null | null | # -*- coding: UTF-8 -*-
import os
import sys
if sys.version_info.major == 2:
# python2
import ConfigParser as configparser
else:
# python3
import configparser
from .plugin import PyTestRailPlugin
from .testrail_api import APIClient
def pytest_addoption(parser):
group = parser.getgroup('testrail')
group.addoption(
'--testrail',
action='store_true',
help='Create and update testruns with TestRail')
group.addoption(
'--tr-config',
action='store',
default='testrail.cfg',
help='Path to the config file containing information about the TestRail server (defaults to testrail.cfg)')
group.addoption(
'--tr-url',
action='store',
help='TestRail address you use to access TestRail with your web browser (config file: url in API section)')
group.addoption(
'--tr-email',
action='store',
help='Email for the account on the TestRail server (config file: email in API section)')
group.addoption(
'--tr-password',
action='store',
help='Password for the account on the TestRail server (config file: password in API section)')
group.addoption(
'--tr-testrun-assignedto-id',
action='store',
help='ID of the user assigned to the test run (config file: assignedto_id in TESTRUN section)')
group.addoption(
'--tr-testrun-project-id',
action='store',
help='ID of the project the test run is in (config file: project_id in TESTRUN section)')
group.addoption(
'--tr-testrun-suite-id',
action='store',
help='ID of the test suite containing the test cases (config file: suite_id in TESTRUN section)')
group.addoption(
'--tr-testrun-suite-include-all',
action='store_true',
default=None,
help='Include all test cases in specified test suite when creating test run (config file: include_all in TESTRUN section)')
group.addoption(
'--tr-testrun-name',
action='store',
default=None,
help='Name given to testrun, that appears in TestRail (config file: name in TESTRUN section)')
group.addoption(
'--tr-run-id',
action='store',
default=0,
required=False,
help='Identifier of testrun, that appears in TestRail. If provided, option "--tr-testrun-name" will be ignored')
group.addoption(
'--tr-plan-id',
action='store',
default=0,
required=False,
help='Identifier of testplan, that appears in TestRail. If provided, option "--tr-testrun-name" will be ignored')
group.addoption(
'--tr-version',
action='store',
default='',
required=False,
help='Indicate a version in Test Case result')
group.addoption(
'--tr-no-ssl-cert-check',
action='store_false',
default=None,
help='Do not check for valid SSL certificate on TestRail host')
group.addoption(
'--tr-close-on-complete',
action='store_true',
default=False,
required=False,
help='Close a test run on completion')
group.addoption(
'--tr-dont-publish-blocked',
action='store_false',
required=False,
help='Determine if results of "blocked" testcases (in TestRail) are published or not')
group.addoption(
'--tr-skip-missing',
action='store_true',
required=False,
help='Skip test cases that are not present in testrun'),
group.addoption(
"--tr-add-passes",
action="store",
default=None,
required=False,
help="Add passing results, default is False"
),
group.addoption(
'--tr-testrun-milestone-id',
action='store',
help='Identifier for milestone, that appears in TestRail. If provided, testrun will be associated with milestone'
)
def pytest_configure(config):
if config.getoption('--testrail'):
cfg_file_path = config.getoption('--tr-config')
config_manager = ConfigManager(cfg_file_path, config)
client = APIClient(config_manager.getoption('tr-url', 'url', 'API'),
config_manager.getoption('tr-email', 'email', 'API'),
config_manager.getoption('tr-password', 'password', 'API'))
config.pluginmanager.register(
PyTestRailPlugin(
client=client,
assign_user_id=config_manager.getoption('tr-testrun-assignedto-id', 'assignedto_id', 'TESTRUN'),
project_id=config_manager.getoption('tr-testrun-project-id', 'project_id', 'TESTRUN'),
suite_id=config_manager.getoption('tr-testrun-suite-id', 'suite_id', 'TESTRUN'),
include_all=config_manager.getoption('tr-testrun-suite-include-all', 'include_all', 'TESTRUN', is_bool=True, default=False),
cert_check=config_manager.getoption('tr-no-ssl-cert-check', 'no_ssl_cert_check', 'API', is_bool=True, default=True),
tr_name=config_manager.getoption('tr-testrun-name', 'name', 'TESTRUN'),
milestone_id=config_manager.getoption('tr-testrun-milestone-id', 'milestone_id', 'TESTRUN'),
run_id=config.getoption('--tr-run-id'),
plan_id=config.getoption('--tr-plan-id'),
version=config.getoption('--tr-version'),
close_on_complete=config.getoption('--tr-close-on-complete'),
publish_blocked=config.getoption('--tr-dont-publish-blocked'),
skip_missing=config.getoption('--tr-skip-missing'),
add_passes=config_manager.getoption("tr-add-passes", "add_passes", "TESTRUN", is_bool=True, default=None)
),
# Name of plugin instance (allow to be used by other plugins)
name="pytest-testrail-instance"
)
class ConfigManager(object):
def __init__(self, cfg_file_path, config):
'''
Handles retrieving configuration values. Config options set in flags are given preferance over options set in the
config file.
:param cfg_file_path: Path to the config file containing information about the TestRail server.
:type cfg_file_path: str or None
:param config: Config object containing commandline flag options.
:type config: _pytest.config.Config
'''
self.cfg_file = None
if os.path.isfile(cfg_file_path) or os.path.islink(cfg_file_path):
self.cfg_file = configparser.ConfigParser()
self.cfg_file.read(cfg_file_path)
self.config = config
def getoption(self, flag, cfg_name, section=None, is_bool=False, default=None):
# priority: cli > config file > default
# 1. return cli option (if set)
value = self.config.getoption('--{}'.format(flag))
if value is not None:
return value
# 2. return default if not config file path is specified
if section is None or self.cfg_file is None:
return default
if self.cfg_file.has_option(section, cfg_name):
# 3. return config file value
return self.cfg_file.getboolean(section, cfg_name) if is_bool else self.cfg_file.get(section, cfg_name)
else:
# 4. if entry not found in config file
return default
| 41.105556 | 140 | 0.626571 |
import os
import sys
if sys.version_info.major == 2:
import ConfigParser as configparser
else:
import configparser
from .plugin import PyTestRailPlugin
from .testrail_api import APIClient
def pytest_addoption(parser):
group = parser.getgroup('testrail')
group.addoption(
'--testrail',
action='store_true',
help='Create and update testruns with TestRail')
group.addoption(
'--tr-config',
action='store',
default='testrail.cfg',
help='Path to the config file containing information about the TestRail server (defaults to testrail.cfg)')
group.addoption(
'--tr-url',
action='store',
help='TestRail address you use to access TestRail with your web browser (config file: url in API section)')
group.addoption(
'--tr-email',
action='store',
help='Email for the account on the TestRail server (config file: email in API section)')
group.addoption(
'--tr-password',
action='store',
help='Password for the account on the TestRail server (config file: password in API section)')
group.addoption(
'--tr-testrun-assignedto-id',
action='store',
help='ID of the user assigned to the test run (config file: assignedto_id in TESTRUN section)')
group.addoption(
'--tr-testrun-project-id',
action='store',
help='ID of the project the test run is in (config file: project_id in TESTRUN section)')
group.addoption(
'--tr-testrun-suite-id',
action='store',
help='ID of the test suite containing the test cases (config file: suite_id in TESTRUN section)')
group.addoption(
'--tr-testrun-suite-include-all',
action='store_true',
default=None,
help='Include all test cases in specified test suite when creating test run (config file: include_all in TESTRUN section)')
group.addoption(
'--tr-testrun-name',
action='store',
default=None,
help='Name given to testrun, that appears in TestRail (config file: name in TESTRUN section)')
group.addoption(
'--tr-run-id',
action='store',
default=0,
required=False,
help='Identifier of testrun, that appears in TestRail. If provided, option "--tr-testrun-name" will be ignored')
group.addoption(
'--tr-plan-id',
action='store',
default=0,
required=False,
help='Identifier of testplan, that appears in TestRail. If provided, option "--tr-testrun-name" will be ignored')
group.addoption(
'--tr-version',
action='store',
default='',
required=False,
help='Indicate a version in Test Case result')
group.addoption(
'--tr-no-ssl-cert-check',
action='store_false',
default=None,
help='Do not check for valid SSL certificate on TestRail host')
group.addoption(
'--tr-close-on-complete',
action='store_true',
default=False,
required=False,
help='Close a test run on completion')
group.addoption(
'--tr-dont-publish-blocked',
action='store_false',
required=False,
help='Determine if results of "blocked" testcases (in TestRail) are published or not')
group.addoption(
'--tr-skip-missing',
action='store_true',
required=False,
help='Skip test cases that are not present in testrun'),
group.addoption(
"--tr-add-passes",
action="store",
default=None,
required=False,
help="Add passing results, default is False"
),
group.addoption(
'--tr-testrun-milestone-id',
action='store',
help='Identifier for milestone, that appears in TestRail. If provided, testrun will be associated with milestone'
)
def pytest_configure(config):
if config.getoption('--testrail'):
cfg_file_path = config.getoption('--tr-config')
config_manager = ConfigManager(cfg_file_path, config)
client = APIClient(config_manager.getoption('tr-url', 'url', 'API'),
config_manager.getoption('tr-email', 'email', 'API'),
config_manager.getoption('tr-password', 'password', 'API'))
config.pluginmanager.register(
PyTestRailPlugin(
client=client,
assign_user_id=config_manager.getoption('tr-testrun-assignedto-id', 'assignedto_id', 'TESTRUN'),
project_id=config_manager.getoption('tr-testrun-project-id', 'project_id', 'TESTRUN'),
suite_id=config_manager.getoption('tr-testrun-suite-id', 'suite_id', 'TESTRUN'),
include_all=config_manager.getoption('tr-testrun-suite-include-all', 'include_all', 'TESTRUN', is_bool=True, default=False),
cert_check=config_manager.getoption('tr-no-ssl-cert-check', 'no_ssl_cert_check', 'API', is_bool=True, default=True),
tr_name=config_manager.getoption('tr-testrun-name', 'name', 'TESTRUN'),
milestone_id=config_manager.getoption('tr-testrun-milestone-id', 'milestone_id', 'TESTRUN'),
run_id=config.getoption('--tr-run-id'),
plan_id=config.getoption('--tr-plan-id'),
version=config.getoption('--tr-version'),
close_on_complete=config.getoption('--tr-close-on-complete'),
publish_blocked=config.getoption('--tr-dont-publish-blocked'),
skip_missing=config.getoption('--tr-skip-missing'),
add_passes=config_manager.getoption("tr-add-passes", "add_passes", "TESTRUN", is_bool=True, default=None)
),
name="pytest-testrail-instance"
)
class ConfigManager(object):
def __init__(self, cfg_file_path, config):
self.cfg_file = None
if os.path.isfile(cfg_file_path) or os.path.islink(cfg_file_path):
self.cfg_file = configparser.ConfigParser()
self.cfg_file.read(cfg_file_path)
self.config = config
def getoption(self, flag, cfg_name, section=None, is_bool=False, default=None):
value = self.config.getoption('--{}'.format(flag))
if value is not None:
return value
if section is None or self.cfg_file is None:
return default
if self.cfg_file.has_option(section, cfg_name):
return self.cfg_file.getboolean(section, cfg_name) if is_bool else self.cfg_file.get(section, cfg_name)
else:
return default
| true | true |
f72128027575513090564f54bc3c085deb980059 | 666 | py | Python | lldb/test/API/lang/swift/po/sys_types/TestSwiftPOSysTypes.py | LaudateCorpus1/llvm-project-staging | cc926dc3a87af7023aa9b6c392347a0a8ed6949b | [
"Apache-2.0"
] | 2 | 2021-11-20T04:04:47.000Z | 2022-01-06T07:44:23.000Z | lldb/test/API/lang/swift/po/sys_types/TestSwiftPOSysTypes.py | LaudateCorpus1/llvm-project-staging | cc926dc3a87af7023aa9b6c392347a0a8ed6949b | [
"Apache-2.0"
] | null | null | null | lldb/test/API/lang/swift/po/sys_types/TestSwiftPOSysTypes.py | LaudateCorpus1/llvm-project-staging | cc926dc3a87af7023aa9b6c392347a0a8ed6949b | [
"Apache-2.0"
] | null | null | null | # TestSwiftPOSysTypes.py
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ------------------------------------------------------------------------------
import lldbsuite.test.lldbinline as lldbinline
from lldbsuite.test.decorators import *
lldbinline.MakeInlineTest(__file__, globals(),
decorators=[swiftTest,skipIf(oslist=['windows'])])
| 39.176471 | 80 | 0.657658 |
import lldbsuite.test.lldbinline as lldbinline
from lldbsuite.test.decorators import *
lldbinline.MakeInlineTest(__file__, globals(),
decorators=[swiftTest,skipIf(oslist=['windows'])])
| true | true |
f721282c9bbbd6198fca4dcb39f852eed304a1be | 16,248 | py | Python | brian2/codegen/generators/numpy_generator.py | SimonAltrogge/brian2 | 6463c368a8277041051bf5ae4816f0dd5b6e057c | [
"BSD-2-Clause"
] | 674 | 2015-01-14T11:05:39.000Z | 2022-03-29T04:53:50.000Z | brian2/codegen/generators/numpy_generator.py | JongwanKim2090/brian2 | c212a57cb992b766786b5769ebb830ff12d8a8ad | [
"BSD-2-Clause"
] | 937 | 2015-01-05T13:24:22.000Z | 2022-03-25T13:10:13.000Z | brian2/codegen/generators/numpy_generator.py | JongwanKim2090/brian2 | c212a57cb992b766786b5769ebb830ff12d8a8ad | [
"BSD-2-Clause"
] | 237 | 2015-01-05T13:54:16.000Z | 2022-03-15T22:16:32.000Z |
import itertools
import numpy as np
from brian2.parsing.bast import brian_dtype_from_dtype
from brian2.parsing.rendering import NumpyNodeRenderer
from brian2.core.functions import DEFAULT_FUNCTIONS, timestep
from brian2.core.variables import ArrayVariable
from brian2.utils.stringtools import get_identifiers, word_substitute, indent
from brian2.utils.logger import get_logger
from .base import CodeGenerator
__all__ = ['NumpyCodeGenerator']
logger = get_logger(__name__)
class VectorisationError(Exception):
pass
class NumpyCodeGenerator(CodeGenerator):
"""
Numpy language
Essentially Python but vectorised.
"""
class_name = 'numpy'
_use_ufunc_at_vectorisation = True # allow this to be off for testing only
def translate_expression(self, expr):
expr = word_substitute(expr, self.func_name_replacements)
return NumpyNodeRenderer(auto_vectorise=self.auto_vectorise).render_expr(expr, self.variables).strip()
def translate_statement(self, statement):
# TODO: optimisation, translate arithmetic to a sequence of inplace
# operations like a=b+c -> add(b, c, a)
var, op, expr, comment = (statement.var, statement.op,
statement.expr, statement.comment)
if op == ':=':
op = '='
# For numpy we replace complex expressions involving a single boolean variable into a
# where(boolvar, expr_if_true, expr_if_false)
if (statement.used_boolean_variables is not None and len(statement.used_boolean_variables)==1
and brian_dtype_from_dtype(statement.dtype)=='float'
and statement.complexity_std>sum(statement.complexities.values())):
used_boolvars = statement.used_boolean_variables
bool_simp = statement.boolean_simplified_expressions
boolvar = used_boolvars[0]
for bool_assigns, simp_expr in bool_simp.items():
_, boolval = bool_assigns[0]
if boolval:
expr_true = simp_expr
else:
expr_false = simp_expr
code = f'{var} {op} _numpy.where({boolvar}, {expr_true}, {expr_false})'
else:
code = f"{var} {op} {self.translate_expression(expr)}"
if len(comment):
code += f" # {comment}"
return code
def ufunc_at_vectorisation(self, statement, variables, indices,
conditional_write_vars, created_vars, used_variables):
if not self._use_ufunc_at_vectorisation:
raise VectorisationError()
# Avoids circular import
from brian2.devices.device import device
# See https://github.com/brian-team/brian2/pull/531 for explanation
used = set(get_identifiers(statement.expr))
used = used.intersection(k for k in list(variables.keys()) if k in indices and indices[k]!='_idx')
used_variables.update(used)
if statement.var in used_variables:
raise VectorisationError()
expr = NumpyNodeRenderer(auto_vectorise=self.auto_vectorise).render_expr(statement.expr)
if statement.op == ':=' or indices[statement.var] == '_idx' or not statement.inplace:
if statement.op == ':=':
op = '='
else:
op = statement.op
line = f'{statement.var} {op} {expr}'
elif statement.inplace:
if statement.op == '+=':
ufunc_name = '_numpy.add'
elif statement.op == '*=':
ufunc_name = '_numpy.multiply'
elif statement.op == '/=':
ufunc_name = '_numpy.divide'
elif statement.op == '-=':
ufunc_name = '_numpy.subtract'
else:
raise VectorisationError()
array_name = device.get_array_name(variables[statement.var])
idx = indices[statement.var]
line = f'{ufunc_name}.at({array_name}, {idx}, {expr})'
line = self.conditional_write(line, statement, variables,
conditional_write_vars=conditional_write_vars,
created_vars=created_vars)
else:
raise VectorisationError()
if len(statement.comment):
line += f" # {statement.comment}"
return line
def vectorise_code(self, statements, variables, variable_indices, index='_idx'):
created_vars = {stmt.var for stmt in statements if stmt.op == ':='}
try:
lines = []
used_variables = set()
for statement in statements:
lines.append(f'# Abstract code: {statement.var} {statement.op} {statement.expr}')
# We treat every statement individually with its own read and write code
# to be on the safe side
read, write, indices, conditional_write_vars = self.arrays_helper([statement])
# We make sure that we only add code to `lines` after it went
# through completely
ufunc_lines = []
# No need to load a variable if it is only in read because of
# the in-place operation
if (statement.inplace and
variable_indices[statement.var] != '_idx' and
statement.var not in get_identifiers(statement.expr)):
read = read - {statement.var}
ufunc_lines.extend(self.read_arrays(read, write, indices,
variables, variable_indices))
ufunc_lines.append(self.ufunc_at_vectorisation(statement,
variables,
variable_indices,
conditional_write_vars,
created_vars,
used_variables,
))
# Do not write back such values, the ufuncs have modified the
# underlying array already
if statement.inplace and variable_indices[statement.var] != '_idx':
write = write - {statement.var}
ufunc_lines.extend(self.write_arrays([statement], read, write,
variables,
variable_indices))
lines.extend(ufunc_lines)
except VectorisationError:
if self._use_ufunc_at_vectorisation:
logger.info("Failed to vectorise code, falling back on Python loop: note that "
"this will be very slow! Switch to another code generation target for "
"best performance (e.g. cython). First line is: "+str(statements[0]),
once=True)
lines = []
lines.extend(['_full_idx = _idx',
'for _idx in _full_idx:',
' _vectorisation_idx = _idx'
])
read, write, indices, conditional_write_vars = self.arrays_helper(statements)
lines.extend(indent(code) for code in
self.read_arrays(read, write, indices,
variables, variable_indices))
for statement in statements:
line = self.translate_statement(statement)
if statement.var in conditional_write_vars:
lines.append(indent(f'if {conditional_write_vars[statement.var]}:'))
lines.append(indent(line, 2))
else:
lines.append(indent(line))
lines.extend(indent(code) for code in
self.write_arrays(statements, read, write,
variables, variable_indices))
return lines
def read_arrays(self, read, write, indices, variables, variable_indices):
# index and read arrays (index arrays first)
lines = []
for varname in itertools.chain(indices, read):
var = variables[varname]
index = variable_indices[varname]
# if index in iterate_all:
# line = '{varname} = {array_name}'
# else:
# line = '{varname} = {array_name}.take({index})'
# line = line.format(varname=varname, array_name=self.get_array_name(var), index=index)
line = f"{varname} = {self.get_array_name(var)}"
if not index in self.iterate_all:
line += f"[{index}]"
elif varname in write:
# avoid potential issues with aliased variables, see github #259
line += '.copy()'
lines.append(line)
return lines
def write_arrays(self, statements, read, write, variables, variable_indices):
# write arrays
lines = []
for varname in write:
var = variables[varname]
index_var = variable_indices[varname]
# check if all operations were inplace and we're operating on the
# whole vector, if so we don't need to write the array back
if index_var not in self.iterate_all or varname in read:
all_inplace = False
else:
all_inplace = True
for stmt in statements:
if stmt.var == varname and not stmt.inplace:
all_inplace = False
break
if not all_inplace:
line = self.get_array_name(var)
if index_var in self.iterate_all:
line = f"{line}[:]"
else:
line = f"{line}[{index_var}]"
line = f"{line} = {varname}"
lines.append(line)
return lines
def conditional_write(self, line, stmt, variables, conditional_write_vars,
created_vars):
if stmt.var in conditional_write_vars:
subs = {}
index = conditional_write_vars[stmt.var]
# we replace all var with var[index], but actually we use this repl_string first because
# we don't want to end up with lines like x[not_refractory[not_refractory]] when
# multiple substitution passes are invoked
repl_string = '#$(@#&$@$*U#@)$@(#' # this string shouldn't occur anywhere I hope! :)
for varname, var in list(variables.items()):
if isinstance(var, ArrayVariable) and not var.scalar:
subs[varname] = f"{varname}[{repl_string}]"
# all newly created vars are arrays and will need indexing
for varname in created_vars:
subs[varname] = f"{varname}[{repl_string}]"
# Also index _vectorisation_idx so that e.g. rand() works correctly
subs['_vectorisation_idx'] = f"_vectorisation_idx[{repl_string}]"
line = word_substitute(line, subs)
line = line.replace(repl_string, index)
return line
def translate_one_statement_sequence(self, statements, scalar=False):
variables = self.variables
variable_indices = self.variable_indices
read, write, indices, conditional_write_vars = self.arrays_helper(statements)
lines = []
all_unique = not self.has_repeated_indices(statements)
if scalar or all_unique:
# Simple translation
lines.extend(self.read_arrays(read, write, indices, variables,
variable_indices))
created_vars = {stmt.var for stmt in statements if stmt.op == ':='}
for stmt in statements:
line = self.translate_statement(stmt)
line = self.conditional_write(line, stmt, variables,
conditional_write_vars,
created_vars)
lines.append(line)
lines.extend(self.write_arrays(statements, read, write, variables,
variable_indices))
else:
# More complex translation to deal with repeated indices
lines.extend(self.vectorise_code(statements, variables,
variable_indices))
return lines
def determine_keywords(self):
try:
import scipy
scipy_available = True
except ImportError:
scipy_available = False
return {'_scipy_available': scipy_available}
################################################################################
# Implement functions
################################################################################
# Functions that exist under the same name in numpy
for func_name, func in [('sin', np.sin), ('cos', np.cos), ('tan', np.tan),
('sinh', np.sinh), ('cosh', np.cosh), ('tanh', np.tanh),
('exp', np.exp), ('log', np.log), ('log10', np.log10),
('sqrt', np.sqrt), ('arcsin', np.arcsin),
('arccos', np.arccos), ('arctan', np.arctan),
('abs', np.abs), ('sign', np.sign)]:
DEFAULT_FUNCTIONS[func_name].implementations.add_implementation(NumpyCodeGenerator,
code=func)
# Functions that are implemented in a somewhat special way
def randn_func(vectorisation_idx):
try:
N = len(vectorisation_idx)
return np.random.randn(N)
except TypeError:
# scalar value
return np.random.randn()
def rand_func(vectorisation_idx):
try:
N = len(vectorisation_idx)
return np.random.rand(N)
except TypeError:
# scalar value
return np.random.rand()
def poisson_func(lam, vectorisation_idx):
try:
N = len(vectorisation_idx)
return np.random.poisson(lam, size=N)
except TypeError:
# scalar value
return np.random.poisson(lam)
DEFAULT_FUNCTIONS['randn'].implementations.add_implementation(NumpyCodeGenerator,
code=randn_func)
DEFAULT_FUNCTIONS['rand'].implementations.add_implementation(NumpyCodeGenerator,
code=rand_func)
DEFAULT_FUNCTIONS['poisson'].implementations.add_implementation(NumpyCodeGenerator,
code=poisson_func)
clip_func = lambda array, a_min, a_max: np.clip(array, a_min, a_max)
DEFAULT_FUNCTIONS['clip'].implementations.add_implementation(NumpyCodeGenerator,
code=clip_func)
int_func = lambda value: np.int32(value)
DEFAULT_FUNCTIONS['int'].implementations.add_implementation(NumpyCodeGenerator,
code=int_func)
ceil_func = lambda value: np.int32(np.ceil(value))
DEFAULT_FUNCTIONS['ceil'].implementations.add_implementation(NumpyCodeGenerator,
code=ceil_func)
floor_func = lambda value: np.int32(np.floor(value))
DEFAULT_FUNCTIONS['floor'].implementations.add_implementation(NumpyCodeGenerator,
code=floor_func)
# We need to explicitly add an implementation for the timestep function,
# otherwise Brian would *add* units during simulation, thinking that the
# timestep function would not work correctly otherwise. This would slow the
# function down significantly.
DEFAULT_FUNCTIONS['timestep'].implementations.add_implementation(NumpyCodeGenerator,
code=timestep)
| 46.959538 | 110 | 0.551576 |
import itertools
import numpy as np
from brian2.parsing.bast import brian_dtype_from_dtype
from brian2.parsing.rendering import NumpyNodeRenderer
from brian2.core.functions import DEFAULT_FUNCTIONS, timestep
from brian2.core.variables import ArrayVariable
from brian2.utils.stringtools import get_identifiers, word_substitute, indent
from brian2.utils.logger import get_logger
from .base import CodeGenerator
__all__ = ['NumpyCodeGenerator']
logger = get_logger(__name__)
class VectorisationError(Exception):
pass
class NumpyCodeGenerator(CodeGenerator):
class_name = 'numpy'
_use_ufunc_at_vectorisation = True
def translate_expression(self, expr):
expr = word_substitute(expr, self.func_name_replacements)
return NumpyNodeRenderer(auto_vectorise=self.auto_vectorise).render_expr(expr, self.variables).strip()
def translate_statement(self, statement):
var, op, expr, comment = (statement.var, statement.op,
statement.expr, statement.comment)
if op == ':=':
op = '='
if (statement.used_boolean_variables is not None and len(statement.used_boolean_variables)==1
and brian_dtype_from_dtype(statement.dtype)=='float'
and statement.complexity_std>sum(statement.complexities.values())):
used_boolvars = statement.used_boolean_variables
bool_simp = statement.boolean_simplified_expressions
boolvar = used_boolvars[0]
for bool_assigns, simp_expr in bool_simp.items():
_, boolval = bool_assigns[0]
if boolval:
expr_true = simp_expr
else:
expr_false = simp_expr
code = f'{var} {op} _numpy.where({boolvar}, {expr_true}, {expr_false})'
else:
code = f"{var} {op} {self.translate_expression(expr)}"
if len(comment):
code += f" # {comment}"
return code
def ufunc_at_vectorisation(self, statement, variables, indices,
conditional_write_vars, created_vars, used_variables):
if not self._use_ufunc_at_vectorisation:
raise VectorisationError()
from brian2.devices.device import device
used = set(get_identifiers(statement.expr))
used = used.intersection(k for k in list(variables.keys()) if k in indices and indices[k]!='_idx')
used_variables.update(used)
if statement.var in used_variables:
raise VectorisationError()
expr = NumpyNodeRenderer(auto_vectorise=self.auto_vectorise).render_expr(statement.expr)
if statement.op == ':=' or indices[statement.var] == '_idx' or not statement.inplace:
if statement.op == ':=':
op = '='
else:
op = statement.op
line = f'{statement.var} {op} {expr}'
elif statement.inplace:
if statement.op == '+=':
ufunc_name = '_numpy.add'
elif statement.op == '*=':
ufunc_name = '_numpy.multiply'
elif statement.op == '/=':
ufunc_name = '_numpy.divide'
elif statement.op == '-=':
ufunc_name = '_numpy.subtract'
else:
raise VectorisationError()
array_name = device.get_array_name(variables[statement.var])
idx = indices[statement.var]
line = f'{ufunc_name}.at({array_name}, {idx}, {expr})'
line = self.conditional_write(line, statement, variables,
conditional_write_vars=conditional_write_vars,
created_vars=created_vars)
else:
raise VectorisationError()
if len(statement.comment):
line += f" # {statement.comment}"
return line
def vectorise_code(self, statements, variables, variable_indices, index='_idx'):
created_vars = {stmt.var for stmt in statements if stmt.op == ':='}
try:
lines = []
used_variables = set()
for statement in statements:
lines.append(f'# Abstract code: {statement.var} {statement.op} {statement.expr}')
read, write, indices, conditional_write_vars = self.arrays_helper([statement])
ufunc_lines = []
if (statement.inplace and
variable_indices[statement.var] != '_idx' and
statement.var not in get_identifiers(statement.expr)):
read = read - {statement.var}
ufunc_lines.extend(self.read_arrays(read, write, indices,
variables, variable_indices))
ufunc_lines.append(self.ufunc_at_vectorisation(statement,
variables,
variable_indices,
conditional_write_vars,
created_vars,
used_variables,
))
if statement.inplace and variable_indices[statement.var] != '_idx':
write = write - {statement.var}
ufunc_lines.extend(self.write_arrays([statement], read, write,
variables,
variable_indices))
lines.extend(ufunc_lines)
except VectorisationError:
if self._use_ufunc_at_vectorisation:
logger.info("Failed to vectorise code, falling back on Python loop: note that "
"this will be very slow! Switch to another code generation target for "
"best performance (e.g. cython). First line is: "+str(statements[0]),
once=True)
lines = []
lines.extend(['_full_idx = _idx',
'for _idx in _full_idx:',
' _vectorisation_idx = _idx'
])
read, write, indices, conditional_write_vars = self.arrays_helper(statements)
lines.extend(indent(code) for code in
self.read_arrays(read, write, indices,
variables, variable_indices))
for statement in statements:
line = self.translate_statement(statement)
if statement.var in conditional_write_vars:
lines.append(indent(f'if {conditional_write_vars[statement.var]}:'))
lines.append(indent(line, 2))
else:
lines.append(indent(line))
lines.extend(indent(code) for code in
self.write_arrays(statements, read, write,
variables, variable_indices))
return lines
def read_arrays(self, read, write, indices, variables, variable_indices):
lines = []
for varname in itertools.chain(indices, read):
var = variables[varname]
index = variable_indices[varname]
line = f"{varname} = {self.get_array_name(var)}"
if not index in self.iterate_all:
line += f"[{index}]"
elif varname in write:
line += '.copy()'
lines.append(line)
return lines
def write_arrays(self, statements, read, write, variables, variable_indices):
lines = []
for varname in write:
var = variables[varname]
index_var = variable_indices[varname]
# whole vector, if so we don't need to write the array back
if index_var not in self.iterate_all or varname in read:
all_inplace = False
else:
all_inplace = True
for stmt in statements:
if stmt.var == varname and not stmt.inplace:
all_inplace = False
break
if not all_inplace:
line = self.get_array_name(var)
if index_var in self.iterate_all:
line = f"{line}[:]"
else:
line = f"{line}[{index_var}]"
line = f"{line} = {varname}"
lines.append(line)
return lines
def conditional_write(self, line, stmt, variables, conditional_write_vars,
created_vars):
if stmt.var in conditional_write_vars:
subs = {}
index = conditional_write_vars[stmt.var]
# multiple substitution passes are invoked
repl_string = 'epl_string}]"
for varname in created_vars:
subs[varname] = f"{varname}[{repl_string}]"
subs['_vectorisation_idx'] = f"_vectorisation_idx[{repl_string}]"
line = word_substitute(line, subs)
line = line.replace(repl_string, index)
return line
def translate_one_statement_sequence(self, statements, scalar=False):
variables = self.variables
variable_indices = self.variable_indices
read, write, indices, conditional_write_vars = self.arrays_helper(statements)
lines = []
all_unique = not self.has_repeated_indices(statements)
if scalar or all_unique:
lines.extend(self.read_arrays(read, write, indices, variables,
variable_indices))
created_vars = {stmt.var for stmt in statements if stmt.op == ':='}
for stmt in statements:
line = self.translate_statement(stmt)
line = self.conditional_write(line, stmt, variables,
conditional_write_vars,
created_vars)
lines.append(line)
lines.extend(self.write_arrays(statements, read, write, variables,
variable_indices))
else:
lines.extend(self.vectorise_code(statements, variables,
variable_indices))
return lines
def determine_keywords(self):
try:
import scipy
scipy_available = True
except ImportError:
scipy_available = False
return {'_scipy_available': scipy_available}
| true | true |
f721282f6a2dc461afc65e2af7a6340bab2f41d6 | 7,584 | py | Python | cfripper/rules/wildcard_principals.py | claytonbrown/cfripper | 869eb5861da3fcfaa5e2f5e877fa9c30f60cfce9 | [
"Apache-2.0"
] | 360 | 2018-08-08T12:34:58.000Z | 2022-03-25T17:01:41.000Z | cfripper/rules/wildcard_principals.py | Skyscanner/cfripper | 1bc3ff483ac9c126037f796950ebe52cf463ac17 | [
"Apache-2.0"
] | 40 | 2018-11-26T07:08:15.000Z | 2022-03-02T09:10:45.000Z | cfripper/rules/wildcard_principals.py | claytonbrown/cfripper | 869eb5861da3fcfaa5e2f5e877fa9c30f60cfce9 | [
"Apache-2.0"
] | 51 | 2018-11-09T11:46:32.000Z | 2022-03-28T08:47:28.000Z | __all__ = ["GenericWildcardPrincipalRule", "PartialWildcardPrincipalRule", "FullWildcardPrincipalRule"]
import logging
import re
from typing import Dict, Optional
from pycfmodel.model.cf_model import CFModel
from pycfmodel.model.resources.iam_managed_policy import IAMManagedPolicy
from pycfmodel.model.resources.iam_policy import IAMPolicy
from pycfmodel.model.resources.iam_role import IAMRole
from pycfmodel.model.resources.iam_user import IAMUser
from pycfmodel.model.resources.properties.policy_document import PolicyDocument
from pycfmodel.model.resources.s3_bucket_policy import S3BucketPolicy
from pycfmodel.model.resources.sns_topic_policy import SNSTopicPolicy
from pycfmodel.model.resources.sqs_queue_policy import SQSQueuePolicy
from cfripper.config.regex import REGEX_FULL_WILDCARD_PRINCIPAL, REGEX_PARTIAL_WILDCARD_PRINCIPAL
from cfripper.model.enums import RuleGranularity, RuleRisk
from cfripper.model.result import Result
from cfripper.rules.base_rules import PrincipalCheckingRule
logger = logging.getLogger(__file__)
class GenericWildcardPrincipalRule(PrincipalCheckingRule):
REASON_WILDCARD_PRINCIPAL = "{} should not allow wildcards in principals (principal: '{}')"
GRANULARITY = RuleGranularity.RESOURCE
AWS_ACCOUNT_ID_PATTERN = re.compile(r"^(\d{12})$")
IAM_PATTERN = re.compile(r"arn:aws:iam::(\d*|\*):.*")
FULL_REGEX = REGEX_FULL_WILDCARD_PRINCIPAL
def invoke(self, cfmodel: CFModel, extras: Optional[Dict] = None) -> Result:
result = Result()
for logical_id, resource in cfmodel.Resources.items():
if isinstance(resource, (IAMManagedPolicy, IAMPolicy, S3BucketPolicy, SNSTopicPolicy, SQSQueuePolicy)):
self.check_for_wildcards(result, logical_id, resource.Properties.PolicyDocument, extras)
elif isinstance(resource, (IAMRole, IAMUser)):
if isinstance(resource, IAMRole):
self.check_for_wildcards(result, logical_id, resource.Properties.AssumeRolePolicyDocument, extras)
if resource.Properties and resource.Properties.Policies:
for policy in resource.Properties.Policies:
self.check_for_wildcards(result, logical_id, policy.PolicyDocument, extras)
return result
def check_for_wildcards(
self, result: Result, logical_id: str, resource: PolicyDocument, extras: Optional[Dict] = None
):
for statement in resource._statement_as_list():
if statement.Effect == "Allow" and statement.principals_with(self.FULL_REGEX):
for principal in statement.get_principal_list():
account_id_match = self.IAM_PATTERN.match(principal) or self.AWS_ACCOUNT_ID_PATTERN.match(principal)
account_id = account_id_match.group(1) if account_id_match else None
# Check if account ID is allowed. `self._get_allowed_from_config()` used here
# to reduce number of false negatives and only allow exemptions for accounts
# which belong to AWS Services (such as ELB and ElastiCache).
if account_id in self._get_allowed_from_config():
continue
if statement.Condition and statement.Condition.dict():
logger.warning(
f"Not adding {type(self).__name__} failure in {logical_id} because there are conditions: "
f"{statement.Condition}"
)
else:
self.add_failure_to_result(
result,
self.REASON_WILDCARD_PRINCIPAL.format(logical_id, principal),
resource_ids={logical_id},
context={
"config": self._config,
"extras": extras,
"logical_id": logical_id,
"resource": resource,
"statement": statement,
"principal": principal,
"account_id": account_id,
},
)
class PartialWildcardPrincipalRule(GenericWildcardPrincipalRule):
"""
Checks for any wildcard or account-wide principals defined in any statements. This rule will flag
as non-compliant any principals where `root` or `*` are included at the end of the value, for
example, `arn:aws:iam:12345:12345*`.
Risk:
It might allow other AWS identities or the root access of the account to escalate privileges.
Fix:
Where possible, restrict the access to only the required resources.
For example, instead of `Principal: "*"`, include a list of the roles that need access.
Filters context:
| Parameter | Type | Description |
|:-----------:|:------------------:|:--------------------------------------------------------------:|
|`config` | str | `config` variable available inside the rule |
|`extras` | str | `extras` variable available inside the rule |
|`logical_id` | str | ID used in Cloudformation to refer the resource being analysed |
|`resource` | `S3BucketPolicy` | Resource that is being addressed |
|`statement` | `Statement` | Statement being checked found in the Resource |
|`principal` | str | AWS Principal being checked found in the statement |
|`account_id` | str | Account ID found in the principal |
"""
REASON_WILDCARD_PRINCIPAL = (
"{} should not allow wildcard in principals or account-wide principals (principal: '{}')"
)
RISK_VALUE = RuleRisk.MEDIUM
FULL_REGEX = REGEX_PARTIAL_WILDCARD_PRINCIPAL
class FullWildcardPrincipalRule(GenericWildcardPrincipalRule):
"""
Checks for any wildcard principals defined in any statements.
Risk:
It might allow other AWS identities to escalate privileges.
Fix:
Where possible, restrict the access to only the required resources.
For example, instead of `Principal: "*"`, include a list of the roles that need access.
Filters context:
| Parameter | Type | Description |
|:-----------:|:------------------:|:--------------------------------------------------------------:|
|`config` | str | `config` variable available inside the rule |
|`extras` | str | `extras` variable available inside the rule |
|`logical_id` | str | ID used in Cloudformation to refer the resource being analysed |
|`resource` | `S3BucketPolicy` | Resource that is being addressed |
|`statement` | `Statement` | Statement being checked found in the Resource |
|`principal` | str | AWS Principal being checked found in the statement |
|`account_id` | str | Account ID found in the principal |
"""
RISK_VALUE = RuleRisk.HIGH
| 54.956522 | 120 | 0.587421 | __all__ = ["GenericWildcardPrincipalRule", "PartialWildcardPrincipalRule", "FullWildcardPrincipalRule"]
import logging
import re
from typing import Dict, Optional
from pycfmodel.model.cf_model import CFModel
from pycfmodel.model.resources.iam_managed_policy import IAMManagedPolicy
from pycfmodel.model.resources.iam_policy import IAMPolicy
from pycfmodel.model.resources.iam_role import IAMRole
from pycfmodel.model.resources.iam_user import IAMUser
from pycfmodel.model.resources.properties.policy_document import PolicyDocument
from pycfmodel.model.resources.s3_bucket_policy import S3BucketPolicy
from pycfmodel.model.resources.sns_topic_policy import SNSTopicPolicy
from pycfmodel.model.resources.sqs_queue_policy import SQSQueuePolicy
from cfripper.config.regex import REGEX_FULL_WILDCARD_PRINCIPAL, REGEX_PARTIAL_WILDCARD_PRINCIPAL
from cfripper.model.enums import RuleGranularity, RuleRisk
from cfripper.model.result import Result
from cfripper.rules.base_rules import PrincipalCheckingRule
logger = logging.getLogger(__file__)
class GenericWildcardPrincipalRule(PrincipalCheckingRule):
REASON_WILDCARD_PRINCIPAL = "{} should not allow wildcards in principals (principal: '{}')"
GRANULARITY = RuleGranularity.RESOURCE
AWS_ACCOUNT_ID_PATTERN = re.compile(r"^(\d{12})$")
IAM_PATTERN = re.compile(r"arn:aws:iam::(\d*|\*):.*")
FULL_REGEX = REGEX_FULL_WILDCARD_PRINCIPAL
def invoke(self, cfmodel: CFModel, extras: Optional[Dict] = None) -> Result:
result = Result()
for logical_id, resource in cfmodel.Resources.items():
if isinstance(resource, (IAMManagedPolicy, IAMPolicy, S3BucketPolicy, SNSTopicPolicy, SQSQueuePolicy)):
self.check_for_wildcards(result, logical_id, resource.Properties.PolicyDocument, extras)
elif isinstance(resource, (IAMRole, IAMUser)):
if isinstance(resource, IAMRole):
self.check_for_wildcards(result, logical_id, resource.Properties.AssumeRolePolicyDocument, extras)
if resource.Properties and resource.Properties.Policies:
for policy in resource.Properties.Policies:
self.check_for_wildcards(result, logical_id, policy.PolicyDocument, extras)
return result
def check_for_wildcards(
self, result: Result, logical_id: str, resource: PolicyDocument, extras: Optional[Dict] = None
):
for statement in resource._statement_as_list():
if statement.Effect == "Allow" and statement.principals_with(self.FULL_REGEX):
for principal in statement.get_principal_list():
account_id_match = self.IAM_PATTERN.match(principal) or self.AWS_ACCOUNT_ID_PATTERN.match(principal)
account_id = account_id_match.group(1) if account_id_match else None
if account_id in self._get_allowed_from_config():
continue
if statement.Condition and statement.Condition.dict():
logger.warning(
f"Not adding {type(self).__name__} failure in {logical_id} because there are conditions: "
f"{statement.Condition}"
)
else:
self.add_failure_to_result(
result,
self.REASON_WILDCARD_PRINCIPAL.format(logical_id, principal),
resource_ids={logical_id},
context={
"config": self._config,
"extras": extras,
"logical_id": logical_id,
"resource": resource,
"statement": statement,
"principal": principal,
"account_id": account_id,
},
)
class PartialWildcardPrincipalRule(GenericWildcardPrincipalRule):
REASON_WILDCARD_PRINCIPAL = (
"{} should not allow wildcard in principals or account-wide principals (principal: '{}')"
)
RISK_VALUE = RuleRisk.MEDIUM
FULL_REGEX = REGEX_PARTIAL_WILDCARD_PRINCIPAL
class FullWildcardPrincipalRule(GenericWildcardPrincipalRule):
RISK_VALUE = RuleRisk.HIGH
| true | true |
f72128c8178af94dd8a0a21b6e9bdb1ebcf3a076 | 2,575 | py | Python | graphs/karger.py | jenia90/Python | 696fb4a681ad9e4d84e0d2b894daf449a3e30b24 | [
"MIT"
] | 145,614 | 2016-07-21T05:40:05.000Z | 2022-03-31T22:17:22.000Z | graphs/karger.py | Agha-Muqarib/Python | 04f156a8973d6156a4357e0717d9eb0aa264d086 | [
"MIT"
] | 3,987 | 2016-07-28T17:31:25.000Z | 2022-03-30T23:07:46.000Z | graphs/karger.py | Agha-Muqarib/Python | 04f156a8973d6156a4357e0717d9eb0aa264d086 | [
"MIT"
] | 40,014 | 2016-07-26T15:14:41.000Z | 2022-03-31T22:23:03.000Z | """
An implementation of Karger's Algorithm for partitioning a graph.
"""
from __future__ import annotations
import random
# Adjacency list representation of this graph:
# https://en.wikipedia.org/wiki/File:Single_run_of_Karger%E2%80%99s_Mincut_algorithm.svg
TEST_GRAPH = {
"1": ["2", "3", "4", "5"],
"2": ["1", "3", "4", "5"],
"3": ["1", "2", "4", "5", "10"],
"4": ["1", "2", "3", "5", "6"],
"5": ["1", "2", "3", "4", "7"],
"6": ["7", "8", "9", "10", "4"],
"7": ["6", "8", "9", "10", "5"],
"8": ["6", "7", "9", "10"],
"9": ["6", "7", "8", "10"],
"10": ["6", "7", "8", "9", "3"],
}
def partition_graph(graph: dict[str, list[str]]) -> set[tuple[str, str]]:
"""
Partitions a graph using Karger's Algorithm. Implemented from
pseudocode found here:
https://en.wikipedia.org/wiki/Karger%27s_algorithm.
This function involves random choices, meaning it will not give
consistent outputs.
Args:
graph: A dictionary containing adacency lists for the graph.
Nodes must be strings.
Returns:
The cutset of the cut found by Karger's Algorithm.
>>> graph = {'0':['1'], '1':['0']}
>>> partition_graph(graph)
{('0', '1')}
"""
# Dict that maps contracted nodes to a list of all the nodes it "contains."
contracted_nodes = {node: {node} for node in graph}
graph_copy = {node: graph[node][:] for node in graph}
while len(graph_copy) > 2:
# Choose a random edge.
u = random.choice(list(graph_copy.keys()))
v = random.choice(graph_copy[u])
# Contract edge (u, v) to new node uv
uv = u + v
uv_neighbors = list(set(graph_copy[u] + graph_copy[v]))
uv_neighbors.remove(u)
uv_neighbors.remove(v)
graph_copy[uv] = uv_neighbors
for neighbor in uv_neighbors:
graph_copy[neighbor].append(uv)
contracted_nodes[uv] = set(contracted_nodes[u].union(contracted_nodes[v]))
# Remove nodes u and v.
del graph_copy[u]
del graph_copy[v]
for neighbor in uv_neighbors:
if u in graph_copy[neighbor]:
graph_copy[neighbor].remove(u)
if v in graph_copy[neighbor]:
graph_copy[neighbor].remove(v)
# Find cutset.
groups = [contracted_nodes[node] for node in graph_copy]
return {
(node, neighbor)
for node in groups[0]
for neighbor in graph[node]
if neighbor in groups[1]
}
if __name__ == "__main__":
print(partition_graph(TEST_GRAPH))
| 29.597701 | 88 | 0.572816 |
from __future__ import annotations
import random
TEST_GRAPH = {
"1": ["2", "3", "4", "5"],
"2": ["1", "3", "4", "5"],
"3": ["1", "2", "4", "5", "10"],
"4": ["1", "2", "3", "5", "6"],
"5": ["1", "2", "3", "4", "7"],
"6": ["7", "8", "9", "10", "4"],
"7": ["6", "8", "9", "10", "5"],
"8": ["6", "7", "9", "10"],
"9": ["6", "7", "8", "10"],
"10": ["6", "7", "8", "9", "3"],
}
def partition_graph(graph: dict[str, list[str]]) -> set[tuple[str, str]]:
contracted_nodes = {node: {node} for node in graph}
graph_copy = {node: graph[node][:] for node in graph}
while len(graph_copy) > 2:
u = random.choice(list(graph_copy.keys()))
v = random.choice(graph_copy[u])
uv = u + v
uv_neighbors = list(set(graph_copy[u] + graph_copy[v]))
uv_neighbors.remove(u)
uv_neighbors.remove(v)
graph_copy[uv] = uv_neighbors
for neighbor in uv_neighbors:
graph_copy[neighbor].append(uv)
contracted_nodes[uv] = set(contracted_nodes[u].union(contracted_nodes[v]))
del graph_copy[u]
del graph_copy[v]
for neighbor in uv_neighbors:
if u in graph_copy[neighbor]:
graph_copy[neighbor].remove(u)
if v in graph_copy[neighbor]:
graph_copy[neighbor].remove(v)
groups = [contracted_nodes[node] for node in graph_copy]
return {
(node, neighbor)
for node in groups[0]
for neighbor in graph[node]
if neighbor in groups[1]
}
if __name__ == "__main__":
print(partition_graph(TEST_GRAPH))
| true | true |
f721291ec9a303b02a2cdf00dbe42788ad4a0a98 | 1,661 | py | Python | setup.py | chermed/assembly | 4c993d19bc9d33c1641323e03231e9ecad711b38 | [
"MIT"
] | 176 | 2019-11-16T19:44:08.000Z | 2021-09-10T22:16:04.000Z | setup.py | chermed/assembly | 4c993d19bc9d33c1641323e03231e9ecad711b38 | [
"MIT"
] | 12 | 2019-11-21T02:02:07.000Z | 2020-02-17T21:45:57.000Z | setup.py | chermed/assembly | 4c993d19bc9d33c1641323e03231e9ecad711b38 | [
"MIT"
] | 12 | 2019-11-20T08:07:11.000Z | 2021-02-27T09:52:06.000Z | """
Assembly
"""
import os
from setuptools import setup, find_packages
base_dir = os.path.dirname(__file__)
__about__ = {}
with open(os.path.join(base_dir, "assembly", "about.py")) as f:
exec(f.read(), __about__)
with open('requirements.txt') as f:
install_requires = f.read().splitlines()
with open("README.md", "r") as f:
long_description = f.read()
setup(
name=__about__["__title__"],
version=__about__["__version__"],
license=__about__["__license__"],
author=__about__["__author__"],
author_email=__about__["__email__"],
description=__about__["__summary__"],
url=__about__["__uri__"],
long_description=long_description,
long_description_content_type="text/markdown",
py_modules=['assembly'],
entry_points=dict(console_scripts=[
'asm=assembly.scripts:cmd',
]),
include_package_data=True,
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
install_requires=install_requires,
keywords=['flask',
'assembly',
'templates',
'views',
'classy',
'framework',
"mvc",
"blueprint"],
platforms='any',
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
],
zip_safe=False
)
| 27.683333 | 81 | 0.615292 |
import os
from setuptools import setup, find_packages
base_dir = os.path.dirname(__file__)
__about__ = {}
with open(os.path.join(base_dir, "assembly", "about.py")) as f:
exec(f.read(), __about__)
with open('requirements.txt') as f:
install_requires = f.read().splitlines()
with open("README.md", "r") as f:
long_description = f.read()
setup(
name=__about__["__title__"],
version=__about__["__version__"],
license=__about__["__license__"],
author=__about__["__author__"],
author_email=__about__["__email__"],
description=__about__["__summary__"],
url=__about__["__uri__"],
long_description=long_description,
long_description_content_type="text/markdown",
py_modules=['assembly'],
entry_points=dict(console_scripts=[
'asm=assembly.scripts:cmd',
]),
include_package_data=True,
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
install_requires=install_requires,
keywords=['flask',
'assembly',
'templates',
'views',
'classy',
'framework',
"mvc",
"blueprint"],
platforms='any',
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
],
zip_safe=False
)
| true | true |
f7212933bf16d8d621d170fadea4f1c611eeef47 | 2,438 | py | Python | .history/postImages/index_20201006214330.py | Lambda-School-Labs/Labs27-C-Bridges-To-Prosperity-BE | 9a8289d8550115362c46dea3ed8570b789c09a10 | [
"MIT"
] | 2 | 2020-10-21T22:14:15.000Z | 2020-10-21T22:14:16.000Z | .history/postImages/index_20201006214330.py | Lambda-School-Labs/Labs27-C-Bridges-To-Prosperity-BE | 9a8289d8550115362c46dea3ed8570b789c09a10 | [
"MIT"
] | null | null | null | .history/postImages/index_20201006214330.py | Lambda-School-Labs/Labs27-C-Bridges-To-Prosperity-BE | 9a8289d8550115362c46dea3ed8570b789c09a10 | [
"MIT"
] | null | null | null | import csv
import requests
df = open("bridgeData3.csv",'r').readlines()
fin = open('final.csv','r').readlines()
finCsv = fin[1:]
# url = https://b2ptc.herokuapp.com/bridges
finalCsv = df[1:]
obj = {}
for i in finalCsv:
x = i.split(',')
obj[x[1]] = {'bridge_name':x[0],'proj_code':x[1],'before_img':x[2],'after_img':x[3][0:-1]}
finalObj = {}
for i in finCsv:
x = i.split(',')
id = x[6]
finalObj[id]= {}
if x[6] in obj:
finalObj[x[6]]['before_img'] = obj[x[6]]['before_img']
finalObj[x[6]]['after_img'] = obj[x[6]]['after_img'][0:-1]
# finalObj[x[6]]['district'] = x[1]
# finalObj[x[6]]['sector'] = x[2]
# finalObj[x[6]]['cell'] = x[3]
# finalObj[x[6]]['bridge_site'] = x[4]
# finalObj[x[6]]['stage'] = x[5]
# finalObj[x[6]]['id'] = int(x[6])
# finalObj[x[6]]['type'] = x[7]
# finalObj[x[6]]['latt'] = float(x[8])
# finalObj[x[6]]['long'] = float(x[9])
# try:
# serv = float(x[10])
# except:
# serv = x[10]
# sv = x[13].split(' ')[2]
# finalObj[x[6]]['served'] = serv
# finalObj[x[6]]['community_served'] = x[14]
# try:
# pId = int(x[15])
# except :
# pId = x[15]
# finalObj[x[6]]['provId'] = pId
# finalObj[x[6]]['districtId'] = int(x[16])
# finalObj[x[6]]['sectorId'] = int(x[17])
# finalObj[x[6]]['cellId'] = int(x[18][0:-1])
# print(id)
row = fin[0].split(',')
for i in range(len(row)):
key = row[i].replace(' ',"_")
key = key.strip()
if i == 8:
key = 'latitude'
if i == 9:
key = 'longitude'
if i == 11:
continue
try:
val = float(x[i])
except ValueError :
val = x[i]
else:
val = x[i]
finalObj[id][key.lower()] = val
print(finalObj['1013351'])
# for i in finalCsv:
# x = i.split(',')
# requests.put(url+x[0],data={before:x[2],after:x[3]})
# pull each id,before image and after from df
# for each data item do a put request with the id as the param id
# and then put the before and after image in an dict and place it as the data for the put request
| 30.475 | 97 | 0.454471 | import csv
import requests
df = open("bridgeData3.csv",'r').readlines()
fin = open('final.csv','r').readlines()
finCsv = fin[1:]
finalCsv = df[1:]
obj = {}
for i in finalCsv:
x = i.split(',')
obj[x[1]] = {'bridge_name':x[0],'proj_code':x[1],'before_img':x[2],'after_img':x[3][0:-1]}
finalObj = {}
for i in finCsv:
x = i.split(',')
id = x[6]
finalObj[id]= {}
if x[6] in obj:
finalObj[x[6]]['before_img'] = obj[x[6]]['before_img']
finalObj[x[6]]['after_img'] = obj[x[6]]['after_img'][0:-1]
row = fin[0].split(',')
for i in range(len(row)):
key = row[i].replace(' ',"_")
key = key.strip()
if i == 8:
key = 'latitude'
if i == 9:
key = 'longitude'
if i == 11:
continue
try:
val = float(x[i])
except ValueError :
val = x[i]
else:
val = x[i]
finalObj[id][key.lower()] = val
print(finalObj['1013351'])
| true | true |
f7212939f549dcbfa9bf8b9ff6fb82924b643d38 | 1,241 | py | Python | harnesses/dict_conv.py | ujjwalsh/oniguruma | 604ea58f210b301d572b9fcf89d6e03fbb1df1f9 | [
"BSD-2-Clause"
] | 1,793 | 2015-07-20T14:14:18.000Z | 2022-03-29T13:00:16.000Z | harnesses/dict_conv.py | ujjwalsh/oniguruma | 604ea58f210b301d572b9fcf89d6e03fbb1df1f9 | [
"BSD-2-Clause"
] | 183 | 2015-09-04T14:00:57.000Z | 2022-03-19T15:52:13.000Z | harnesses/dict_conv.py | ujjwalsh/oniguruma | 604ea58f210b301d572b9fcf89d6e03fbb1df1f9 | [
"BSD-2-Clause"
] | 339 | 2015-09-03T11:13:46.000Z | 2022-03-20T08:21:15.000Z | # -*- coding: utf-8 -*-
# dict_conv.py (Python3 script)
import sys
ENC_UTF16_BE = 1
ENC_UTF16_LE = 2
def add_char(enc, s, c):
if enc == ENC_UTF16_BE:
s += "\\x00"
s += c
if enc == ENC_UTF16_LE:
s += "\\x00"
return s
def conv(enc, s):
n = len(s)
r = ""
i = 0
while i < n:
c = s[i]
if c == '\\':
c = s[i+1]
if c == '\\' or c == '"':
r = add_char(enc, r, "\\" + c)
i += 2
continue
else:
raise("Unknown escape {0}".format(s))
r = add_char(enc, r, c)
i += 1
return r
def main(enc):
print("# This file was generated by dict_conv.py.")
for line in sys.stdin:
s = line.strip()
if s[0] == '#':
print(s)
continue
if s[0] == '"' and s[-1] == '"':
s = conv(enc, s[1:-1])
print("\"{0}\"".format(s))
else:
raise("Invalid format {0}".format(s))
def usage(argv):
raise RuntimeError("Usage: python {0} utf16_be/utf16_le".format(argv[0]))
if __name__ == "__main__":
argv = sys.argv
argc = len(argv)
if argc >= 2:
s = argv[1]
if s == 'utf16_be':
enc = ENC_UTF16_BE
elif s == 'utf16_le':
enc = ENC_UTF16_LE
else:
usage(argv)
else:
usage(argv)
main(enc)
| 17 | 75 | 0.498791 |
import sys
ENC_UTF16_BE = 1
ENC_UTF16_LE = 2
def add_char(enc, s, c):
if enc == ENC_UTF16_BE:
s += "\\x00"
s += c
if enc == ENC_UTF16_LE:
s += "\\x00"
return s
def conv(enc, s):
n = len(s)
r = ""
i = 0
while i < n:
c = s[i]
if c == '\\':
c = s[i+1]
if c == '\\' or c == '"':
r = add_char(enc, r, "\\" + c)
i += 2
continue
else:
raise("Unknown escape {0}".format(s))
r = add_char(enc, r, c)
i += 1
return r
def main(enc):
print("
for line in sys.stdin:
s = line.strip()
if s[0] == '#':
print(s)
continue
if s[0] == '"' and s[-1] == '"':
s = conv(enc, s[1:-1])
print("\"{0}\"".format(s))
else:
raise("Invalid format {0}".format(s))
def usage(argv):
raise RuntimeError("Usage: python {0} utf16_be/utf16_le".format(argv[0]))
if __name__ == "__main__":
argv = sys.argv
argc = len(argv)
if argc >= 2:
s = argv[1]
if s == 'utf16_be':
enc = ENC_UTF16_BE
elif s == 'utf16_le':
enc = ENC_UTF16_LE
else:
usage(argv)
else:
usage(argv)
main(enc)
| true | true |
f721296a022523474df762cfe7ddb9431b342931 | 1,300 | py | Python | src/relaxed/infer/hypothesis_test.py | gradhep/smooth | 9ed6f1d622fb5346c46f1b9f62aed886b73fe09a | [
"BSD-3-Clause"
] | 4 | 2020-05-18T17:43:07.000Z | 2020-07-13T12:05:10.000Z | src/relaxed/infer/hypothesis_test.py | gradhep/relaxed | 9ed6f1d622fb5346c46f1b9f62aed886b73fe09a | [
"BSD-3-Clause"
] | 13 | 2021-05-13T20:59:55.000Z | 2022-03-25T12:04:44.000Z | src/relaxed/infer/hypothesis_test.py | gradhep/relaxed | 9ed6f1d622fb5346c46f1b9f62aed886b73fe09a | [
"BSD-3-Clause"
] | 3 | 2020-05-21T13:24:10.000Z | 2021-04-22T12:36:33.000Z | """Calculate expected CLs values with hypothesis tests."""
from __future__ import annotations
__all__ = ("hypotest",)
from functools import partial
import jax.numpy as jnp
import pyhf
from chex import Array
from jax import jit
from ..mle import fit, fixed_poi_fit
@partial(jit, static_argnames=["model", "return_mle_pars"]) # forward pass
def hypotest(
test_poi: float,
data: Array,
model: pyhf.Model,
lr: float,
return_mle_pars: bool = False,
) -> tuple[Array, Array] | Array:
# hard-code 1 as inits for now
# TODO: need to parse different inits for constrained and global fits
init_pars = jnp.ones_like(jnp.asarray(model.config.suggested_init()))
conditional_pars = fixed_poi_fit(
data, model, poi_condition=test_poi, init_pars=init_pars[:-1], lr=lr
)
mle_pars = fit(data, model, init_pars=init_pars, lr=lr)
profile_likelihood = -2 * (
model.logpdf(conditional_pars, data)[0] - model.logpdf(mle_pars, data)[0]
)
poi_hat = mle_pars[model.config.poi_index]
qmu = jnp.where(poi_hat < test_poi, profile_likelihood, 0.0)
CLsb = 1 - pyhf.tensorlib.normal_cdf(jnp.sqrt(qmu))
altval = 0.0
CLb = 1 - pyhf.tensorlib.normal_cdf(altval)
CLs = CLsb / CLb
return (CLs, mle_pars) if return_mle_pars else CLs
| 30.232558 | 81 | 0.698462 | from __future__ import annotations
__all__ = ("hypotest",)
from functools import partial
import jax.numpy as jnp
import pyhf
from chex import Array
from jax import jit
from ..mle import fit, fixed_poi_fit
@partial(jit, static_argnames=["model", "return_mle_pars"])
def hypotest(
test_poi: float,
data: Array,
model: pyhf.Model,
lr: float,
return_mle_pars: bool = False,
) -> tuple[Array, Array] | Array:
init_pars = jnp.ones_like(jnp.asarray(model.config.suggested_init()))
conditional_pars = fixed_poi_fit(
data, model, poi_condition=test_poi, init_pars=init_pars[:-1], lr=lr
)
mle_pars = fit(data, model, init_pars=init_pars, lr=lr)
profile_likelihood = -2 * (
model.logpdf(conditional_pars, data)[0] - model.logpdf(mle_pars, data)[0]
)
poi_hat = mle_pars[model.config.poi_index]
qmu = jnp.where(poi_hat < test_poi, profile_likelihood, 0.0)
CLsb = 1 - pyhf.tensorlib.normal_cdf(jnp.sqrt(qmu))
altval = 0.0
CLb = 1 - pyhf.tensorlib.normal_cdf(altval)
CLs = CLsb / CLb
return (CLs, mle_pars) if return_mle_pars else CLs
| true | true |
f7212c9ffcc95acbd3066fc3058f89d2b03ec98b | 1,750 | py | Python | cheeseprism/event.py | msabramo/CheesePrism | 3880528fb5a83fc650860d41e77729853081d404 | [
"BSD-2-Clause"
] | null | null | null | cheeseprism/event.py | msabramo/CheesePrism | 3880528fb5a83fc650860d41e77729853081d404 | [
"BSD-2-Clause"
] | null | null | null | cheeseprism/event.py | msabramo/CheesePrism | 3880528fb5a83fc650860d41e77729853081d404 | [
"BSD-2-Clause"
] | null | null | null | from zope.interface import Attribute
from zope.interface import Interface
from zope.interface import implements
class IIndexEvent(Interface):
"""
An lower level event involving the index
"""
class IIndexUpdate(Interface):
"""
An low level event involving the index
"""
class IPackageEvent(IIndexEvent):
"""
An event involving a package
"""
path = Attribute('Path to package')
class IPackageAdded(IPackageEvent):
"""
A package is added to the repository
"""
class IPackageRemoved(IPackageEvent):
"""
A package is removed to the repository
"""
class IndexEvent(object):
implements(IIndexEvent)
def __init__(self, datafile, index, pkgdatas=None):
self.index = index
self.datafile = datafile
self.pkgdatas = pkgdatas
class IndexUpdate(IndexEvent):
implements(IIndexUpdate)
class PackageEvent(object):
"""
Baseclass for pacakage events
"""
implements(IPackageEvent)
def __init__(self, index_manager, path=None, name=None, version=None):
self.name = name
self.version = version
self.im = index_manager
self.path = path
if self.name is None and self.path:
info = self.im.pkginfo_from_file(path, self.im.move_on_error)
self.name = info.name
self.version = info.version
class PackageAdded(PackageEvent):
implements(IPackageAdded)
def __init__(self, index_manager, path=None, name=None, version=None, rebuild_leaf=True):
super(PackageAdded, self).__init__(index_manager, path, name, version)
self.rebuild_leaf = rebuild_leaf
class PackageRemoved(PackageEvent):
implements(IPackageRemoved)
| 22.727273 | 93 | 0.668571 | from zope.interface import Attribute
from zope.interface import Interface
from zope.interface import implements
class IIndexEvent(Interface):
class IIndexUpdate(Interface):
class IPackageEvent(IIndexEvent):
path = Attribute('Path to package')
class IPackageAdded(IPackageEvent):
class IPackageRemoved(IPackageEvent):
class IndexEvent(object):
implements(IIndexEvent)
def __init__(self, datafile, index, pkgdatas=None):
self.index = index
self.datafile = datafile
self.pkgdatas = pkgdatas
class IndexUpdate(IndexEvent):
implements(IIndexUpdate)
class PackageEvent(object):
implements(IPackageEvent)
def __init__(self, index_manager, path=None, name=None, version=None):
self.name = name
self.version = version
self.im = index_manager
self.path = path
if self.name is None and self.path:
info = self.im.pkginfo_from_file(path, self.im.move_on_error)
self.name = info.name
self.version = info.version
class PackageAdded(PackageEvent):
implements(IPackageAdded)
def __init__(self, index_manager, path=None, name=None, version=None, rebuild_leaf=True):
super(PackageAdded, self).__init__(index_manager, path, name, version)
self.rebuild_leaf = rebuild_leaf
class PackageRemoved(PackageEvent):
implements(IPackageRemoved)
| true | true |
f7212d29fbadd9f4d8375d99f948230868b7821d | 520 | py | Python | tests/test_parallel.py | seznam/flexp | 84043150a80474809d066a06db02cbbd858f349e | [
"BSD-3-Clause"
] | 6 | 2018-05-30T10:41:56.000Z | 2020-08-05T16:47:54.000Z | tests/test_parallel.py | seznam/flexp | 84043150a80474809d066a06db02cbbd858f349e | [
"BSD-3-Clause"
] | 39 | 2018-07-11T14:44:01.000Z | 2019-08-06T12:27:43.000Z | tests/test_parallel.py | seznam/flexp | 84043150a80474809d066a06db02cbbd858f349e | [
"BSD-3-Clause"
] | 3 | 2018-07-11T14:54:39.000Z | 2019-04-07T04:47:29.000Z | from __future__ import print_function
import time
import unittest
from flexp.flow.parallel import parallelize
def add_two(x):
return x + 2
class TestParallel(unittest.TestCase):
def test_parallel(self):
count = 50
data = range(0, count)
start = time.clock()
res = list(parallelize(add_two, data, 25))
end = time.clock()
print("Time to process {}".format(end - start))
assert len(res) == count
assert sum(res) == (2 + count + 1) * count / 2
| 20.8 | 55 | 0.617308 | from __future__ import print_function
import time
import unittest
from flexp.flow.parallel import parallelize
def add_two(x):
return x + 2
class TestParallel(unittest.TestCase):
def test_parallel(self):
count = 50
data = range(0, count)
start = time.clock()
res = list(parallelize(add_two, data, 25))
end = time.clock()
print("Time to process {}".format(end - start))
assert len(res) == count
assert sum(res) == (2 + count + 1) * count / 2
| true | true |
f7212eed7b4fc035239879fd2bda4106c25fb513 | 2,399 | py | Python | CNN/extract.py | skywolf829/CSE5559_Final_Project | c7b29e6fc0cbfd81252edbadaa0d733a0c24bee7 | [
"MIT"
] | null | null | null | CNN/extract.py | skywolf829/CSE5559_Final_Project | c7b29e6fc0cbfd81252edbadaa0d733a0c24bee7 | [
"MIT"
] | null | null | null | CNN/extract.py | skywolf829/CSE5559_Final_Project | c7b29e6fc0cbfd81252edbadaa0d733a0c24bee7 | [
"MIT"
] | 1 | 2020-05-02T05:58:55.000Z | 2020-05-02T05:58:55.000Z | ## Basic Python libraries
import os
from PIL import Image
## Deep learning and array processing libraries
import numpy as np
import torch
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
## Inner-project imports
from model import EncoderCNN, DecoderRNN
##### Code begins #####
# Path to config file
image_directory = './CNN/images/'
network_directory = './CNN/models/'
# Setting up other necessary paths
encoder_path = f'{network_directory}encoder-5-3000.pkl'
# Define the compute device (either GPU or CPU)
if torch.cuda.is_available():
compute_device = torch.device('cuda:0')
else:
compute_device = torch.device('cpu')
print(f'Using device: {compute_device}')
# Create the data transforms for evaluating
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
# Configure network
network = EncoderCNN(embed_size=256)
network = network.eval()
network.load_state_dict(torch.load(encoder_path, map_location='cpu'))
network = network.to(compute_device)
def get_visual_features(img):
"""
Extracts the visual features from an input image. Converts input
into PIL Image, normalizes the image, then feeds it through a CNN.
The features returned from the CNN are then pooled into a 1x512x1x1
and finally squeezed to produce our [512] array output.
Input
img :: 3D NumPy array
Takes a [x, y, 3] NumPy array to be converted into a PIL Image
Output
features :: 1D NumPy array
Returns a [512] NumPy array of the visual features from the CNN
"""
# Convert to PIL Image and perform transformation
img = Image.fromarray(img).convert('RGB')
img = img.resize([224, 224], Image.LANCZOS)
img = transform(img)
# Add a 4th dimension and send to compute device (GPU or CPU)
img = img.unsqueeze(0)
img = img.to(compute_device)
# Feed input through CNN
features = network(img)
# Squeeze into a [512] vector
features = features.squeeze()
# Convert to NumPy
features = features.cpu().detach().numpy()
return features
# Below is only there for testing, commented out for now
"""
if __name__ == '__main__':
# Inference
img = Image.open(f'{image_directory}input/1.png')
img = np.asarray(img)
features = get_visual_features(img)
print('End')
""" | 28.903614 | 132 | 0.709462 | rt Image
.nn.functional as F
import torchvision
import torchvision.transforms as transforms
derCNN, DecoderRNN
ork_directory}encoder-5-3000.pkl'
if torch.cuda.is_available():
compute_device = torch.device('cuda:0')
else:
compute_device = torch.device('cpu')
print(f'Using device: {compute_device}')
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
network = EncoderCNN(embed_size=256)
network = network.eval()
network.load_state_dict(torch.load(encoder_path, map_location='cpu'))
network = network.to(compute_device)
def get_visual_features(img):
img = Image.fromarray(img).convert('RGB')
img = img.resize([224, 224], Image.LANCZOS)
img = transform(img)
img = img.unsqueeze(0)
img = img.to(compute_device)
features = network(img)
features = features.squeeze()
features = features.cpu().detach().numpy()
return features
| true | true |
f72130741418bf63d0a3ae4aa5d952c8d92a5ac0 | 72 | py | Python | python/dcf-tools/dcf2dev/__init__.py | tshu/lely-core | fd0ceff2db726af6d2a766039a0b5a6d33d056e8 | [
"Apache-2.0"
] | 4 | 2020-12-27T11:31:57.000Z | 2022-02-09T11:32:08.000Z | python/dcf-tools/dcf2dev/__init__.py | DroidDrive/lely-core | 2ec4560f513264a53d2afaedecdae4a49a39023c | [
"Apache-2.0"
] | null | null | null | python/dcf-tools/dcf2dev/__init__.py | DroidDrive/lely-core | 2ec4560f513264a53d2afaedecdae4a49a39023c | [
"Apache-2.0"
] | 1 | 2022-01-03T01:41:59.000Z | 2022-01-03T01:41:59.000Z | from .cdevice import CDevice, CObject, CSubObject, CValue # noqa: F401
| 36 | 71 | 0.763889 | from .cdevice import CDevice, CObject, CSubObject, CValue
| true | true |
f72130c7c2fc80d8513e06cfe21833534c1bec7a | 1,022 | py | Python | tools/grafana/daemon/lib/parse/sen_bin_parse.py | Flowm/move-on-helium-sensors | 3794d38671e1976c9801bcb8a9639465cdddb731 | [
"Apache-2.0"
] | 1 | 2021-11-11T01:49:28.000Z | 2021-11-11T01:49:28.000Z | tools/grafana/daemon/lib/parse/sen_bin_parse.py | Flowm/move-on-helium-sensors | 3794d38671e1976c9801bcb8a9639465cdddb731 | [
"Apache-2.0"
] | null | null | null | tools/grafana/daemon/lib/parse/sen_bin_parse.py | Flowm/move-on-helium-sensors | 3794d38671e1976c9801bcb8a9639465cdddb731 | [
"Apache-2.0"
] | null | null | null | import logging
from subprocess import Popen, PIPE, STDOUT
from lib.parse.sen_ascii_parse import SenAsciiParse
class SenBinParse:
def __init__(self):
self.ascii_parser = SenAsciiParse()
def parse_packet(self, packet, with_header=True):
if len(packet) < 10:
return
length = len(packet)
status = 0
data = packet
invalid_chunks = 0
if with_header:
length, status = packet[:2]
data = packet[2:-1]
invalid_chunks = packet[-1]
logging.debug("BIN IN: CHK=%d" % invalid_chunks)
#if invalid_chunks != 0:
# return
parser = Popen(['moveon-sen-parser', str(invalid_chunks)], stdout=PIPE, stdin=PIPE, stderr=STDOUT)
stdout = parser.communicate(input=data)[0]
logging.debug("BIN OUT: %s" % stdout.strip().decode())
for line in stdout.decode().splitlines():
for data in self.ascii_parser.parse_packet(line, "com"):
yield data
| 28.388889 | 106 | 0.596869 | import logging
from subprocess import Popen, PIPE, STDOUT
from lib.parse.sen_ascii_parse import SenAsciiParse
class SenBinParse:
def __init__(self):
self.ascii_parser = SenAsciiParse()
def parse_packet(self, packet, with_header=True):
if len(packet) < 10:
return
length = len(packet)
status = 0
data = packet
invalid_chunks = 0
if with_header:
length, status = packet[:2]
data = packet[2:-1]
invalid_chunks = packet[-1]
logging.debug("BIN IN: CHK=%d" % invalid_chunks)
parser = Popen(['moveon-sen-parser', str(invalid_chunks)], stdout=PIPE, stdin=PIPE, stderr=STDOUT)
stdout = parser.communicate(input=data)[0]
logging.debug("BIN OUT: %s" % stdout.strip().decode())
for line in stdout.decode().splitlines():
for data in self.ascii_parser.parse_packet(line, "com"):
yield data
| true | true |
f72130ca917b168f52b2f7177e043fcb23096456 | 1,897 | py | Python | tools/concatlibs.py | ZECTBynmo/tacnode | 344d0a10b3766c47538e4917e0ef4d59e07f9b9e | [
"BSD-2-Clause"
] | 28 | 2015-01-28T11:17:04.000Z | 2022-02-07T12:48:22.000Z | tools/concatlibs.py | ZECTBynmo/tacnode | 344d0a10b3766c47538e4917e0ef4d59e07f9b9e | [
"BSD-2-Clause"
] | null | null | null | tools/concatlibs.py | ZECTBynmo/tacnode | 344d0a10b3766c47538e4917e0ef4d59e07f9b9e | [
"BSD-2-Clause"
] | 5 | 2015-01-29T19:34:45.000Z | 2019-03-17T11:15:26.000Z | import os
import sys
##############
# NOTE: You will need to build boost
# On windows, use the following command from the visual studio command
# prompt (after running boostrap.bat)
#
# bjam --build-dir=c:\boost --build-type=complete --toolset=msvc-9.0 address-model=64 architecture=x86 --with-system
##############
currentPath = os.getcwd()
config = "Debug"
boostLocation = "C:/boost"
boostLibString = "libboost_system-vc90-s-1_52.lib" if config == "Release" else "libboost_system-vc90-sgd-1_52.lib";
# Main list of libraries to be concatinated into the final library
# NOTE: For non-windows environments, the .lib is replaced with .o below
#
# Remove any libries from this list that you would rather link manually (separately)
inputLibs = [
# Main node.js library
currentPath+'/'+config+"/lib/node.lib",
# v8
currentPath+'/build/'+config+"/lib/v8_base.lib",
currentPath+'/build/'+config+"/lib/v8_nosnapshot.lib",
currentPath+'/build/'+config+"/lib/v8_snapshot.lib",
# libuv
currentPath+'/'+config+"/lib/libuv.lib",
# Other direct dependencies of node
currentPath+'/'+config+"/lib/cares.lib",
currentPath+'/'+config+"/lib/http_parser.lib",
currentPath+'/'+config+"/lib/openssl.lib",
currentPath+'/'+config+"/lib/zlib.lib",
# Boost
#boostLocation+"/boost/bin.v2/libs/system/build/msvc-9.0/"+config+"/address-model-64/architecture-x86/link-static/runtime-link-static/" + boostLibString
]
inputLibString = ""
inputOString = ""
# Build our list of input libraries for windows (.lib)
for lib in inputLibs:
inputLibString += lib + " "
# Build an equivelant list for non-windows (.o)
for lib in inputLibs:
lib.replace(".lib", ".o")
inputOString += lib + " "
# Concatinate!
if sys.platform == 'win32':
os.system('lib.exe /OUT:'+currentPath+'/'+config+'/node.lib ' + inputLibString)
else:
os.system('ar rcs '+currentPath+'/'+config+'/node.a ' + inputOString)
| 30.596774 | 153 | 0.698471 | import os
import sys
gd-1_52.lib";
inputLibs = [
currentPath+'/'+config+"/lib/node.lib",
currentPath+'/build/'+config+"/lib/v8_base.lib",
currentPath+'/build/'+config+"/lib/v8_nosnapshot.lib",
currentPath+'/build/'+config+"/lib/v8_snapshot.lib",
currentPath+'/'+config+"/lib/libuv.lib",
currentPath+'/'+config+"/lib/cares.lib",
currentPath+'/'+config+"/lib/http_parser.lib",
currentPath+'/'+config+"/lib/openssl.lib",
currentPath+'/'+config+"/lib/zlib.lib",
]
inputLibString = ""
inputOString = ""
for lib in inputLibs:
inputLibString += lib + " "
for lib in inputLibs:
lib.replace(".lib", ".o")
inputOString += lib + " "
if sys.platform == 'win32':
os.system('lib.exe /OUT:'+currentPath+'/'+config+'/node.lib ' + inputLibString)
else:
os.system('ar rcs '+currentPath+'/'+config+'/node.a ' + inputOString)
| true | true |
f72130ecddae709848f93c0001cf6167db8fb692 | 3,950 | py | Python | google/ads/google_ads/v4/proto/enums/mobile_app_vendor_pb2.py | arammaliachi/google-ads-python | a4fe89567bd43eb784410523a6306b5d1dd9ee67 | [
"Apache-2.0"
] | 1 | 2021-04-09T04:28:47.000Z | 2021-04-09T04:28:47.000Z | google/ads/google_ads/v4/proto/enums/mobile_app_vendor_pb2.py | arammaliachi/google-ads-python | a4fe89567bd43eb784410523a6306b5d1dd9ee67 | [
"Apache-2.0"
] | null | null | null | google/ads/google_ads/v4/proto/enums/mobile_app_vendor_pb2.py | arammaliachi/google-ads-python | a4fe89567bd43eb784410523a6306b5d1dd9ee67 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v4/proto/enums/mobile_app_vendor.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v4/proto/enums/mobile_app_vendor.proto',
package='google.ads.googleads.v4.enums',
syntax='proto3',
serialized_options=_b('\n!com.google.ads.googleads.v4.enumsB\024MobileAppVendorProtoP\001ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v4/enums;enums\242\002\003GAA\252\002\035Google.Ads.GoogleAds.V4.Enums\312\002\035Google\\Ads\\GoogleAds\\V4\\Enums\352\002!Google::Ads::GoogleAds::V4::Enums'),
serialized_pb=_b('\n;google/ads/googleads_v4/proto/enums/mobile_app_vendor.proto\x12\x1dgoogle.ads.googleads.v4.enums\x1a\x1cgoogle/api/annotations.proto\"q\n\x13MobileAppVendorEnum\"Z\n\x0fMobileAppVendor\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0b\n\x07UNKNOWN\x10\x01\x12\x13\n\x0f\x41PPLE_APP_STORE\x10\x02\x12\x14\n\x10GOOGLE_APP_STORE\x10\x03\x42\xe9\x01\n!com.google.ads.googleads.v4.enumsB\x14MobileAppVendorProtoP\x01ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v4/enums;enums\xa2\x02\x03GAA\xaa\x02\x1dGoogle.Ads.GoogleAds.V4.Enums\xca\x02\x1dGoogle\\Ads\\GoogleAds\\V4\\Enums\xea\x02!Google::Ads::GoogleAds::V4::Enumsb\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_MOBILEAPPVENDORENUM_MOBILEAPPVENDOR = _descriptor.EnumDescriptor(
name='MobileAppVendor',
full_name='google.ads.googleads.v4.enums.MobileAppVendorEnum.MobileAppVendor',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='APPLE_APP_STORE', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GOOGLE_APP_STORE', index=3, number=3,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=147,
serialized_end=237,
)
_sym_db.RegisterEnumDescriptor(_MOBILEAPPVENDORENUM_MOBILEAPPVENDOR)
_MOBILEAPPVENDORENUM = _descriptor.Descriptor(
name='MobileAppVendorEnum',
full_name='google.ads.googleads.v4.enums.MobileAppVendorEnum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_MOBILEAPPVENDORENUM_MOBILEAPPVENDOR,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=124,
serialized_end=237,
)
_MOBILEAPPVENDORENUM_MOBILEAPPVENDOR.containing_type = _MOBILEAPPVENDORENUM
DESCRIPTOR.message_types_by_name['MobileAppVendorEnum'] = _MOBILEAPPVENDORENUM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
MobileAppVendorEnum = _reflection.GeneratedProtocolMessageType('MobileAppVendorEnum', (_message.Message,), dict(
DESCRIPTOR = _MOBILEAPPVENDORENUM,
__module__ = 'google.ads.googleads_v4.proto.enums.mobile_app_vendor_pb2'
,
__doc__ = """Container for enum describing different types of mobile app vendors.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v4.enums.MobileAppVendorEnum)
))
_sym_db.RegisterMessage(MobileAppVendorEnum)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 38.72549 | 649 | 0.786076 |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v4/proto/enums/mobile_app_vendor.proto',
package='google.ads.googleads.v4.enums',
syntax='proto3',
serialized_options=_b('\n!com.google.ads.googleads.v4.enumsB\024MobileAppVendorProtoP\001ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v4/enums;enums\242\002\003GAA\252\002\035Google.Ads.GoogleAds.V4.Enums\312\002\035Google\\Ads\\GoogleAds\\V4\\Enums\352\002!Google::Ads::GoogleAds::V4::Enums'),
serialized_pb=_b('\n;google/ads/googleads_v4/proto/enums/mobile_app_vendor.proto\x12\x1dgoogle.ads.googleads.v4.enums\x1a\x1cgoogle/api/annotations.proto\"q\n\x13MobileAppVendorEnum\"Z\n\x0fMobileAppVendor\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0b\n\x07UNKNOWN\x10\x01\x12\x13\n\x0f\x41PPLE_APP_STORE\x10\x02\x12\x14\n\x10GOOGLE_APP_STORE\x10\x03\x42\xe9\x01\n!com.google.ads.googleads.v4.enumsB\x14MobileAppVendorProtoP\x01ZBgoogle.golang.org/genproto/googleapis/ads/googleads/v4/enums;enums\xa2\x02\x03GAA\xaa\x02\x1dGoogle.Ads.GoogleAds.V4.Enums\xca\x02\x1dGoogle\\Ads\\GoogleAds\\V4\\Enums\xea\x02!Google::Ads::GoogleAds::V4::Enumsb\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_MOBILEAPPVENDORENUM_MOBILEAPPVENDOR = _descriptor.EnumDescriptor(
name='MobileAppVendor',
full_name='google.ads.googleads.v4.enums.MobileAppVendorEnum.MobileAppVendor',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='APPLE_APP_STORE', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GOOGLE_APP_STORE', index=3, number=3,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=147,
serialized_end=237,
)
_sym_db.RegisterEnumDescriptor(_MOBILEAPPVENDORENUM_MOBILEAPPVENDOR)
_MOBILEAPPVENDORENUM = _descriptor.Descriptor(
name='MobileAppVendorEnum',
full_name='google.ads.googleads.v4.enums.MobileAppVendorEnum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_MOBILEAPPVENDORENUM_MOBILEAPPVENDOR,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=124,
serialized_end=237,
)
_MOBILEAPPVENDORENUM_MOBILEAPPVENDOR.containing_type = _MOBILEAPPVENDORENUM
DESCRIPTOR.message_types_by_name['MobileAppVendorEnum'] = _MOBILEAPPVENDORENUM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
MobileAppVendorEnum = _reflection.GeneratedProtocolMessageType('MobileAppVendorEnum', (_message.Message,), dict(
DESCRIPTOR = _MOBILEAPPVENDORENUM,
__module__ = 'google.ads.googleads_v4.proto.enums.mobile_app_vendor_pb2'
,
__doc__ = """Container for enum describing different types of mobile app vendors.
""",
))
_sym_db.RegisterMessage(MobileAppVendorEnum)
DESCRIPTOR._options = None
| true | true |
f72131ada34d81d06455b0c8be1ca2dd4d1e24ee | 5,159 | py | Python | checkpoints/sum/train/hotel_mask/batch_size_16-notes_new_subword/code_snapshot/generate_from_lm.py | Saibo-creator/Text-Summrize-Project | d5ce54193110452a18cc0b223360c2bd004b4b28 | [
"Apache-2.0"
] | null | null | null | checkpoints/sum/train/hotel_mask/batch_size_16-notes_new_subword/code_snapshot/generate_from_lm.py | Saibo-creator/Text-Summrize-Project | d5ce54193110452a18cc0b223360c2bd004b4b28 | [
"Apache-2.0"
] | null | null | null | checkpoints/sum/train/hotel_mask/batch_size_16-notes_new_subword/code_snapshot/generate_from_lm.py | Saibo-creator/Text-Summrize-Project | d5ce54193110452a18cc0b223360c2bd004b4b28 | [
"Apache-2.0"
] | null | null | null | # generate_from_lm.py
"""
Load a trained language model and generate text
Example usage:
PYTHONPATH=. python generate_from_lm.py \
--init="Although the food" --tau=0.5 \
--sample_method=gumbel --g_eps=1e-5 \
--load_model='checkpoints/lm/mlstm/hotel/batch_size_64/lm_e9_2.93.pt' \
--dataset='hotel' --cpu=1 --sample_method=greedy
"""
import pdb
import torch
import torch.nn as nn
from models.custom_parallel import DataParallelModel
from models.mlstm import StackedLSTMEncoderDecoder
from models.nn_utils import move_to_cuda, setup_gpus, logits_to_prob, prob_to_vocab_id
from project_settings import HParams, PAD_ID, DatasetConfig
from utils import load_file, create_argparse_and_update_hp
#######################################
#
# Setup
#
#######################################
hp = HParams()
hp, run_name, parser = create_argparse_and_update_hp(hp)
parser.add_argument('--dataset', default='yelp',
help='yelp,amazon; will determine which subwordenc to use')
parser.add_argument('--init', default='The meaning of life is ',
help="Initial text ")
parser.add_argument('--load_model', default=None,
help="Path to model to load")
parser.add_argument('--seq_len', type=int, default=50,
help="Maximum sequence length")
parser.add_argument('--softmax_method', type=str, default='softmax',
help="softmax or gumbel")
parser.add_argument('--sample_method', type=str, default='sample',
help="sample or greedy")
parser.add_argument('--gumbel_hard', type=bool, default=False,
help="whether to produce one-hot from Gumbel softmax")
parser.add_argument('--beam_size', type=int, default=1,
help="Width for beam search")
parser.add_argument('--len_norm_factor', type=float, default=0.0,
help="Normalization factor")
parser.add_argument('--len_norm_const', type=float, default=5.0,
help="Normalization constant")
parser.add_argument('--gpus', default='0',
help="CUDA visible devices, e.g. 2,3")
parser.add_argument('--cpu', default=False,
help="if want to run on cpu, set --cpu=True")
opt = parser.parse_args()
setup_gpus(opt.gpus, hp.seed)
ds_conf = DatasetConfig(opt.dataset)
if opt.load_model is None:
opt.load_model = ds_conf.lm_path
#######################################
#
# Run
#
#######################################
def batchify(data, batch_size):
"""
Args:
data: 1D Tensor
batch_size: int
Returns:
data: reshaped Tensor of size (batch_size, -1)
Example where data is non-negative integers and batch_size = 4
[[0 1 2 3 4 5 6 ]
[7 8 9 10 11 12 13]
[14 15 16 17 18 19 20]
[21 22 23 24 25 26 27]]
Note: not currently using this anymore. Was used when reading in data from text fileW
"""
nbatch = data.size(0) // batch_size
data = data.narrow(0, 0, nbatch * batch_size) # same as slice
data = data.view(batch_size, -1).contiguous()
return data
#
# Prepare initial input text
#
subwordenc = load_file(ds_conf.subwordenc_path)
init_texts = [init for init in opt.init.split('|')]
init_tokens = [subwordenc.encode(init) for init in init_texts]
init_lens = [len(init) for init in init_tokens]
max_len = max(init_lens)
init_tokens_padded = [tokens + [PAD_ID for _ in range(max_len - len(tokens))] for tokens in init_tokens]
init_tensor = [batchify(torch.LongTensor(init), 1) for init in init_tokens_padded]
init_tensor = torch.cat(init_tensor, dim=0) # [batch, lens
init_tensor = move_to_cuda(init_tensor)
batch_size = init_tensor.size(0)
#
# Load and set up model
#
if opt.cpu:
checkpoint = torch.load(opt.load_model, map_location='cpu')
elif torch.cuda.is_available():
checkpoint = torch.load(opt.load_model) # StackedLSTMEncoder
model = checkpoint['model']
if isinstance(model, nn.DataParallel):
model = model.module
ngpus = 1 if len(opt.gpus) == 1 else len(opt.gpus.split(','))
#
# Generate
# #
if 'mlstm' in opt.load_model:
# Set up encoder decoder
embed, rnn = model.embed, model.rnn
enc_dec = StackedLSTMEncoderDecoder(embed, rnn)
if torch.cuda.is_available():
enc_dec.cuda()
enc_dec = DataParallelModel(enc_dec) if ngpus > 1 else enc_dec
enc_dec.eval()
# Generate
result = enc_dec(init_tensor,
dec_kwargs={'seq_len': opt.seq_len,
'softmax_method': opt.softmax_method,
'sample_method': opt.sample_method,
'tau': hp.tau,
'gumbel_hard': opt.gumbel_hard,
'k': opt.beam_size,
'subwordenc': subwordenc})
probs, ids, texts, extra = zip(*result) if ngpus > 1 else result
if ngpus > 1: # flatten: each gpu returns lists of texts
texts = [batch_text for gpu_texts in texts for batch_text in gpu_texts]
for i in range(batch_size):
print(init_texts[i] + texts[i])
print('-' * 100)
| 33.070513 | 104 | 0.629192 |
import pdb
import torch
import torch.nn as nn
from models.custom_parallel import DataParallelModel
from models.mlstm import StackedLSTMEncoderDecoder
from models.nn_utils import move_to_cuda, setup_gpus, logits_to_prob, prob_to_vocab_id
from project_settings import HParams, PAD_ID, DatasetConfig
from utils import load_file, create_argparse_and_update_hp
.gpus, hp.seed)
ds_conf = DatasetConfig(opt.dataset)
if opt.load_model is None:
opt.load_model = ds_conf.lm_path
opt.softmax_method,
'sample_method': opt.sample_method,
'tau': hp.tau,
'gumbel_hard': opt.gumbel_hard,
'k': opt.beam_size,
'subwordenc': subwordenc})
probs, ids, texts, extra = zip(*result) if ngpus > 1 else result
if ngpus > 1:
texts = [batch_text for gpu_texts in texts for batch_text in gpu_texts]
for i in range(batch_size):
print(init_texts[i] + texts[i])
print('-' * 100)
| true | true |
f721321d98af2205ce169d3d88af0b431e7731ea | 1,235 | py | Python | sysinv/sysinv/sysinv/sysinv/objects/interface_base.py | etaivan/stx-config | 281e1f110973f96e077645fb01f67b646fc253cc | [
"Apache-2.0"
] | null | null | null | sysinv/sysinv/sysinv/sysinv/objects/interface_base.py | etaivan/stx-config | 281e1f110973f96e077645fb01f67b646fc253cc | [
"Apache-2.0"
] | null | null | null | sysinv/sysinv/sysinv/sysinv/objects/interface_base.py | etaivan/stx-config | 281e1f110973f96e077645fb01f67b646fc253cc | [
"Apache-2.0"
] | 1 | 2021-01-05T16:24:58.000Z | 2021-01-05T16:24:58.000Z | #
# Copyright (c) 2013-2016 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# coding=utf-8
#
from sysinv.db import api as db_api
from sysinv.objects import base
from sysinv.objects import utils
def _get_interface_name_list(field, db_object):
ifnames = []
for i in db_object[field]:
ifnames.append(i['ifname'])
return ifnames
class InterfaceBase(base.SysinvObject):
dbapi = db_api.get_instance()
fields = {
'id': int,
'uuid': utils.str_or_none,
'forihostid': utils.int_or_none,
'iftype': utils.str_or_none,
'ifname': utils.str_or_none,
'networktype': utils.str_or_none,
'ifcapabilities': utils.dict_or_none,
'farend': utils.dict_or_none,
'uses': utils.list_of_strings_or_none,
'used_by': utils.list_of_strings_or_none,
'sriov_numvfs': utils.int_or_none
}
_foreign_fields = {
'uses': _get_interface_name_list,
'used_by': _get_interface_name_list,
}
@base.remotable_classmethod
def get_by_uuid(cls, context, uuid):
return cls.dbapi.interface_get(uuid)
| 25.204082 | 53 | 0.637247 |
from sysinv.db import api as db_api
from sysinv.objects import base
from sysinv.objects import utils
def _get_interface_name_list(field, db_object):
ifnames = []
for i in db_object[field]:
ifnames.append(i['ifname'])
return ifnames
class InterfaceBase(base.SysinvObject):
dbapi = db_api.get_instance()
fields = {
'id': int,
'uuid': utils.str_or_none,
'forihostid': utils.int_or_none,
'iftype': utils.str_or_none,
'ifname': utils.str_or_none,
'networktype': utils.str_or_none,
'ifcapabilities': utils.dict_or_none,
'farend': utils.dict_or_none,
'uses': utils.list_of_strings_or_none,
'used_by': utils.list_of_strings_or_none,
'sriov_numvfs': utils.int_or_none
}
_foreign_fields = {
'uses': _get_interface_name_list,
'used_by': _get_interface_name_list,
}
@base.remotable_classmethod
def get_by_uuid(cls, context, uuid):
return cls.dbapi.interface_get(uuid)
| true | true |
f72133aff214d90410fb19b8ccb50eafa1390f3b | 12,732 | py | Python | datalad/customremotes/tests/test_archives.py | christinerogers/datalad | 8b91f3767b45371e213aa7ade146a290a13c00f2 | [
"MIT"
] | 1 | 2021-06-11T19:54:19.000Z | 2021-06-11T19:54:19.000Z | datalad/customremotes/tests/test_archives.py | christinerogers/datalad | 8b91f3767b45371e213aa7ade146a290a13c00f2 | [
"MIT"
] | 1 | 2019-08-30T14:45:33.000Z | 2019-08-30T14:45:33.000Z | datalad/customremotes/tests/test_archives.py | christinerogers/datalad | 8b91f3767b45371e213aa7ade146a290a13c00f2 | [
"MIT"
] | null | null | null | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Tests for customremotes archives providing dl+archive URLs handling"""
from unittest.mock import patch
import os
import os.path as op
import sys
import re
import logging
import glob
from time import sleep
from ..archives import (
ArchiveAnnexCustomRemote,
link_file_load,
)
from ..base import AnnexExchangeProtocol
from ...support.annexrepo import AnnexRepo
from ...consts import ARCHIVES_SPECIAL_REMOTE
from .test_base import (
BASE_INTERACTION_SCENARIOS,
check_interaction_scenario,
)
from ...tests.utils import (
abspath,
assert_equal,
assert_false,
assert_is_instance,
assert_not_in,
assert_true,
chpwd,
eq_,
get_most_obscure_supported_name,
in_,
known_failure_githubci_win,
ok_,
ok_file_has_content,
serve_path_via_http,
swallow_logs,
swallow_outputs,
with_tempfile,
with_tree,
)
from ...cmd import Runner, GitRunner
from ...utils import (
_path_,
on_linux,
on_osx,
unlink,
)
from . import _get_custom_runner
from ...tests.test_archives import (
fn_archive_obscure,
fn_archive_obscure_ext,
fn_in_archive_obscure,
)
#import line_profiler
#prof = line_profiler.LineProfiler()
# TODO: with_tree ATM for archives creates this nested top directory
# matching archive name, so it will be a/d/test.dat ... we don't want that probably
@known_failure_githubci_win
@with_tree(
tree=(('a.tar.gz', {'d': {fn_in_archive_obscure: '123'}}),
('simple.txt', '123'),
(fn_archive_obscure_ext, (('d', ((fn_in_archive_obscure, '123'),)),)),
(fn_archive_obscure, '123')))
@with_tempfile()
def test_basic_scenario(d, d2):
fn_archive, fn_extracted = fn_archive_obscure_ext, fn_archive_obscure
annex = AnnexRepo(d, runner=_get_custom_runner(d))
annex.init_remote(
ARCHIVES_SPECIAL_REMOTE,
['encryption=none', 'type=external', 'externaltype=%s' % ARCHIVES_SPECIAL_REMOTE,
'autoenable=true'
])
assert annex.is_special_annex_remote(ARCHIVES_SPECIAL_REMOTE)
# We want two maximally obscure names, which are also different
assert(fn_extracted != fn_in_archive_obscure)
annex.add(fn_archive)
annex.commit(msg="Added tarball")
annex.add(fn_extracted)
annex.commit(msg="Added the load file")
# Operations with archive remote URL
annexcr = ArchiveAnnexCustomRemote(path=d)
# few quick tests for get_file_url
eq_(annexcr.get_file_url(archive_key="xyz", file="a.dat"), "dl+archive:xyz#path=a.dat")
eq_(annexcr.get_file_url(archive_key="xyz", file="a.dat", size=999), "dl+archive:xyz#path=a.dat&size=999")
# see https://github.com/datalad/datalad/issues/441#issuecomment-223376906
# old style
eq_(annexcr._parse_url("dl+archive:xyz/a.dat#size=999"), ("xyz", "a.dat", {'size': 999}))
eq_(annexcr._parse_url("dl+archive:xyz/a.dat"), ("xyz", "a.dat", {})) # old format without size
# new style
eq_(annexcr._parse_url("dl+archive:xyz#path=a.dat&size=999"), ("xyz", "a.dat", {'size': 999}))
eq_(annexcr._parse_url("dl+archive:xyz#path=a.dat"), ("xyz", "a.dat", {})) # old format without size
file_url = annexcr.get_file_url(
archive_file=fn_archive,
file=fn_archive.replace('.tar.gz', '') + '/d/' + fn_in_archive_obscure)
annex.add_url_to_file(fn_extracted, file_url, ['--relaxed'])
annex.drop(fn_extracted)
list_of_remotes = annex.whereis(fn_extracted, output='descriptions')
in_('[%s]' % ARCHIVES_SPECIAL_REMOTE, list_of_remotes)
assert_false(annex.file_has_content(fn_extracted))
annex.get(fn_extracted)
assert_true(annex.file_has_content(fn_extracted))
annex.rm_url(fn_extracted, file_url)
assert_false(annex.drop(fn_extracted)['success'])
annex.add_url_to_file(fn_extracted, file_url)
annex.drop(fn_extracted)
annex.get(fn_extracted)
annex.drop(fn_extracted) # so we don't get from this one next
# Let's create a clone and verify chain of getting file through the tarball
cloned_annex = AnnexRepo.clone(d, d2, runner=_get_custom_runner(d2))
# we still need to enable manually atm that special remote for archives
# cloned_annex.enable_remote('annexed-archives')
assert_false(cloned_annex.file_has_content(fn_archive))
assert_false(cloned_annex.file_has_content(fn_extracted))
cloned_annex.get(fn_extracted)
assert_true(cloned_annex.file_has_content(fn_extracted))
# as a result it would also fetch tarball
assert_true(cloned_annex.file_has_content(fn_archive))
# Check if protocol was collected
if os.environ.get('DATALAD_TESTS_PROTOCOLREMOTE'):
assert_is_instance(annex.cmd_call_wrapper.protocol, AnnexExchangeProtocol)
protocol_file = _path_(annex.path,
'.git/bin/git-annex-remote-datalad-archive')
ok_file_has_content(protocol_file, "VERSION 1", re_=True, match=False)
ok_file_has_content(protocol_file, "GETAVAILABILITY", re_=True, match=False)
ok_file_has_content(protocol_file, "#!/bin/bash", re_=True, match=False)
else:
assert_false(isinstance(annex.cmd_call_wrapper.protocol, AnnexExchangeProtocol))
# verify that we can drop if original archive gets dropped but available online:
# -- done as part of the test_add_archive_content.py
# verify that we can't drop a file if archive key was dropped and online archive was removed or changed size! ;)
@known_failure_githubci_win
@with_tree(
tree={'a.tar.gz': {'d': {fn_in_archive_obscure: '123'}}}
)
def test_annex_get_from_subdir(topdir):
from datalad.api import add_archive_content
annex = AnnexRepo(topdir, init=True)
annex.add('a.tar.gz')
annex.commit()
add_archive_content('a.tar.gz', annex=annex, delete=True)
fpath = op.join(topdir, 'a', 'd', fn_in_archive_obscure)
with chpwd(op.join(topdir, 'a', 'd')):
runner = Runner()
runner(['git', 'annex', 'drop', '--', fn_in_archive_obscure]) # run git annex drop
assert_false(annex.file_has_content(fpath)) # and verify if file deleted from directory
runner(['git', 'annex', 'get', '--', fn_in_archive_obscure]) # run git annex get
assert_true(annex.file_has_content(fpath)) # and verify if file got into directory
@known_failure_githubci_win
def test_get_git_environ_adjusted():
gitrunner = GitRunner()
env = {"GIT_DIR": "../../.git", "GIT_WORK_TREE": "../../", "TEST_VAR": "Exists"}
# test conversion of relevant env vars from relative_path to correct absolute_path
adj_env = gitrunner.get_git_environ_adjusted(env)
assert_equal(adj_env["GIT_DIR"], abspath(env["GIT_DIR"]))
assert_equal(adj_env["GIT_WORK_TREE"], abspath(env["GIT_WORK_TREE"]))
# test if other environment variables passed to function returned unaltered
assert_equal(adj_env["TEST_VAR"], env["TEST_VAR"])
# test import of sys_env if no environment passed to function
sys_env = gitrunner.get_git_environ_adjusted()
assert_equal(sys_env["PWD"], os.environ.get("PWD"))
def test_no_rdflib_loaded():
# rely on rdflib polluting stdout to see that it is not loaded whenever we load this remote
# since that adds 300ms delay for no immediate use
from ...cmd import Runner
runner = Runner()
with swallow_outputs() as cmo:
runner.run(
[sys.executable,
'-c',
'import datalad.customremotes.archives, sys; '
'print([k for k in sys.modules if k.startswith("rdflib")])'],
log_stdout=False,
log_stderr=False)
# print cmo.out
assert_not_in("rdflib", cmo.out)
assert_not_in("rdflib", cmo.err)
@with_tree(tree={'archive.tar.gz': {'f1.txt': 'content'}})
def test_interactions(tdir):
# Just a placeholder since constructor expects a repo
repo = AnnexRepo(tdir, create=True, init=True)
repo.add('archive.tar.gz')
repo.commit('added')
for scenario in BASE_INTERACTION_SCENARIOS + [
[
('GETCOST', 'COST %d' % ArchiveAnnexCustomRemote.COST),
],
[
# by default we do not require any fancy init
# no urls supported by default
('CLAIMURL http://example.com', 'CLAIMURL-FAILURE'),
# we know that is just a single option, url, is expected so full
# one would be passed
('CLAIMURL http://example.com roguearg', 'CLAIMURL-FAILURE'),
],
# basic interaction failing to fetch content from archive
[
('TRANSFER RETRIEVE somekey somefile', 'GETURLS somekey dl+archive:'),
('VALUE dl+archive://somekey2#path', None),
('VALUE dl+archive://somekey3#path', None),
('VALUE',
re.compile(
'TRANSFER-FAILURE RETRIEVE somekey Failed to fetch any '
'archive containing somekey. Tried: \[\]')
)
],
# # incorrect response received from annex -- something isn't right but ... later
# [
# ('TRANSFER RETRIEVE somekey somefile', 'GETURLS somekey dl+archive:'),
# # We reply with UNSUPPORTED-REQUEST in these cases
# ('GETCOST', 'UNSUPPORTED-REQUEST'),
# ],
]:
check_interaction_scenario(ArchiveAnnexCustomRemote, tdir, scenario)
@with_tree(tree=
{'1.tar.gz':
{
'bu.dat': '52055957098986598349795121365535' * 10000,
'bu3.dat': '8236397048205454767887168342849275422' * 10000
},
'2.tar.gz':
{
'bu2.dat': '17470674346319559612580175475351973007892815102' * 10000
},
}
)
@serve_path_via_http()
@with_tempfile
def check_observe_tqdm(topdir, topurl, outdir):
# just a helper to enable/use when want quickly to get some
# repository with archives and observe tqdm
from datalad.api import add_archive_content
from datalad.api import create
ds = create(outdir)
for f in '1.tar.gz', '2.tar.gz':
with chpwd(outdir):
ds.repo.add_url_to_file(f, topurl + f)
ds.save(f)
add_archive_content(f, delete=True, drop_after=True)
files = glob.glob(op.join(outdir, '*'))
ds.drop(files) # will not drop tarballs
ds.repo.drop([], options=['--all', '--fast'])
ds.get(files)
ds.repo.drop([], options=['--all', '--fast'])
# now loop so we could play with it outside
print(outdir)
# import pdb; pdb.set_trace()
while True:
sleep(0.1)
@known_failure_githubci_win
@with_tempfile
def test_link_file_load(tempfile):
tempfile2 = tempfile + '_'
with open(tempfile, 'w') as f:
f.write("LOAD")
link_file_load(tempfile, tempfile2) # this should work in general
ok_(os.path.exists(tempfile2))
with open(tempfile2, 'r') as f:
assert_equal(f.read(), "LOAD")
def inode(fname):
with open(fname) as fd:
return os.fstat(fd.fileno()).st_ino
def stats(fname, times=True):
"""Return stats on the file which should have been preserved"""
with open(fname) as fd:
st = os.fstat(fd.fileno())
stats = (st.st_mode, st.st_uid, st.st_gid, st.st_size)
if times:
return stats + (st.st_atime, st.st_mtime)
else:
return stats
# despite copystat mtime is not copied. TODO
# st.st_mtime)
if on_linux or on_osx:
# above call should result in the hardlink
assert_equal(inode(tempfile), inode(tempfile2))
assert_equal(stats(tempfile), stats(tempfile2))
# and if we mock absence of .link
def raise_AttributeError(*args):
raise AttributeError("TEST")
with patch('os.link', raise_AttributeError):
with swallow_logs(logging.WARNING) as cm:
link_file_load(tempfile, tempfile2) # should still work
ok_("failed (TEST), copying file" in cm.out)
# should be a copy (either originally for windows, or after mocked call)
ok_(inode(tempfile) != inode(tempfile2))
with open(tempfile2, 'r') as f:
assert_equal(f.read(), "LOAD")
assert_equal(stats(tempfile, times=False), stats(tempfile2, times=False))
unlink(tempfile2) # TODO: next two with_tempfile
| 37.011628 | 116 | 0.654807 |
"VERSION 1", re_=True, match=False)
ok_file_has_content(protocol_file, "GETAVAILABILITY", re_=True, match=False)
ok_file_has_content(protocol_file, "#!/bin/bash", re_=True, match=False)
else:
assert_false(isinstance(annex.cmd_call_wrapper.protocol, AnnexExchangeProtocol))
# verify that we can drop if original archive gets dropped but available online:
# -- done as part of the test_add_archive_content.py
# verify that we can't drop a file if archive key was dropped and online archive was removed or changed size! ;)
@known_failure_githubci_win
@with_tree(
tree={'a.tar.gz': {'d': {fn_in_archive_obscure: '123'}}}
)
def test_annex_get_from_subdir(topdir):
from datalad.api import add_archive_content
annex = AnnexRepo(topdir, init=True)
annex.add('a.tar.gz')
annex.commit()
add_archive_content('a.tar.gz', annex=annex, delete=True)
fpath = op.join(topdir, 'a', 'd', fn_in_archive_obscure)
with chpwd(op.join(topdir, 'a', 'd')):
runner = Runner()
runner(['git', 'annex', 'drop', '--', fn_in_archive_obscure])
assert_false(annex.file_has_content(fpath))
runner(['git', 'annex', 'get', '--', fn_in_archive_obscure])
assert_true(annex.file_has_content(fpath))
@known_failure_githubci_win
def test_get_git_environ_adjusted():
gitrunner = GitRunner()
env = {"GIT_DIR": "../../.git", "GIT_WORK_TREE": "../../", "TEST_VAR": "Exists"}
adj_env = gitrunner.get_git_environ_adjusted(env)
assert_equal(adj_env["GIT_DIR"], abspath(env["GIT_DIR"]))
assert_equal(adj_env["GIT_WORK_TREE"], abspath(env["GIT_WORK_TREE"]))
assert_equal(adj_env["TEST_VAR"], env["TEST_VAR"])
sys_env = gitrunner.get_git_environ_adjusted()
assert_equal(sys_env["PWD"], os.environ.get("PWD"))
def test_no_rdflib_loaded():
from ...cmd import Runner
runner = Runner()
with swallow_outputs() as cmo:
runner.run(
[sys.executable,
'-c',
'import datalad.customremotes.archives, sys; '
'print([k for k in sys.modules if k.startswith("rdflib")])'],
log_stdout=False,
log_stderr=False)
assert_not_in("rdflib", cmo.out)
assert_not_in("rdflib", cmo.err)
@with_tree(tree={'archive.tar.gz': {'f1.txt': 'content'}})
def test_interactions(tdir):
repo = AnnexRepo(tdir, create=True, init=True)
repo.add('archive.tar.gz')
repo.commit('added')
for scenario in BASE_INTERACTION_SCENARIOS + [
[
('GETCOST', 'COST %d' % ArchiveAnnexCustomRemote.COST),
],
[
('CLAIMURL http://example.com', 'CLAIMURL-FAILURE'),
('CLAIMURL http://example.com roguearg', 'CLAIMURL-FAILURE'),
],
[
('TRANSFER RETRIEVE somekey somefile', 'GETURLS somekey dl+archive:'),
('VALUE dl+archive://somekey2#path', None),
('VALUE dl+archive://somekey3#path', None),
('VALUE',
re.compile(
'TRANSFER-FAILURE RETRIEVE somekey Failed to fetch any '
'archive containing somekey. Tried: \[\]')
)
],
key dl+archive:'),
# # We reply with UNSUPPORTED-REQUEST in these cases
# ('GETCOST', 'UNSUPPORTED-REQUEST'),
# ],
]:
check_interaction_scenario(ArchiveAnnexCustomRemote, tdir, scenario)
@with_tree(tree=
{'1.tar.gz':
{
'bu.dat': '52055957098986598349795121365535' * 10000,
'bu3.dat': '8236397048205454767887168342849275422' * 10000
},
'2.tar.gz':
{
'bu2.dat': '17470674346319559612580175475351973007892815102' * 10000
},
}
)
@serve_path_via_http()
@with_tempfile
def check_observe_tqdm(topdir, topurl, outdir):
# just a helper to enable/use when want quickly to get some
# repository with archives and observe tqdm
from datalad.api import add_archive_content
from datalad.api import create
ds = create(outdir)
for f in '1.tar.gz', '2.tar.gz':
with chpwd(outdir):
ds.repo.add_url_to_file(f, topurl + f)
ds.save(f)
add_archive_content(f, delete=True, drop_after=True)
files = glob.glob(op.join(outdir, '*'))
ds.drop(files) # will not drop tarballs
ds.repo.drop([], options=['--all', '--fast'])
ds.get(files)
ds.repo.drop([], options=['--all', '--fast'])
# now loop so we could play with it outside
print(outdir)
# import pdb; pdb.set_trace()
while True:
sleep(0.1)
@known_failure_githubci_win
@with_tempfile
def test_link_file_load(tempfile):
tempfile2 = tempfile + '_'
with open(tempfile, 'w') as f:
f.write("LOAD")
link_file_load(tempfile, tempfile2) # this should work in general
ok_(os.path.exists(tempfile2))
with open(tempfile2, 'r') as f:
assert_equal(f.read(), "LOAD")
def inode(fname):
with open(fname) as fd:
return os.fstat(fd.fileno()).st_ino
def stats(fname, times=True):
with open(fname) as fd:
st = os.fstat(fd.fileno())
stats = (st.st_mode, st.st_uid, st.st_gid, st.st_size)
if times:
return stats + (st.st_atime, st.st_mtime)
else:
return stats
# despite copystat mtime is not copied. TODO
# st.st_mtime)
if on_linux or on_osx:
# above call should result in the hardlink
assert_equal(inode(tempfile), inode(tempfile2))
assert_equal(stats(tempfile), stats(tempfile2))
# and if we mock absence of .link
def raise_AttributeError(*args):
raise AttributeError("TEST")
with patch('os.link', raise_AttributeError):
with swallow_logs(logging.WARNING) as cm:
link_file_load(tempfile, tempfile2) # should still work
ok_("failed (TEST), copying file" in cm.out)
# should be a copy (either originally for windows, or after mocked call)
ok_(inode(tempfile) != inode(tempfile2))
with open(tempfile2, 'r') as f:
assert_equal(f.read(), "LOAD")
assert_equal(stats(tempfile, times=False), stats(tempfile2, times=False))
unlink(tempfile2) # TODO: next two with_tempfile
| true | true |
f7213680738375f96a2084c20c14a5024e5d194e | 5,679 | py | Python | salt/states/postgres_extension.py | trebuchet-deploy/salt | dcdf1148248912a4592f0f48d2303903588729cc | [
"Apache-2.0"
] | null | null | null | salt/states/postgres_extension.py | trebuchet-deploy/salt | dcdf1148248912a4592f0f48d2303903588729cc | [
"Apache-2.0"
] | null | null | null | salt/states/postgres_extension.py | trebuchet-deploy/salt | dcdf1148248912a4592f0f48d2303903588729cc | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Management of PostgreSQL extensions (e.g.: postgis)
===================================================
The postgres_extensions module is used to create and manage Postgres extensions.
.. code-block:: yaml
adminpack:
postgres_extension.present
'''
# Import Python libs
import logging
# Import salt libs
from salt.modules import postgres
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if the postgres module is present
'''
return 'postgres.create_extension' in __salt__
def present(name,
if_not_exists=None,
schema=None,
ext_version=None,
from_version=None,
user=None,
maintenance_db=None,
db_password=None,
db_host=None,
db_port=None,
db_user=None):
'''
Ensure that the named extension is present with the specified privileges
name
The name of the extension to manage
if_not_exists
Add a if_not_exists switch to the ddl statement
schema
Schema to install the extension into
from_version
Old extension version if already installed
ext_version
version to install
user
System user all operations should be performed on behalf of
maintenance_db
Database to act on
db_user
database username if different from config or default
db_password
user password if any password for a specified user
db_host
Database host if different from config or default
db_port
Database port if different from config or default
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Extention {0} is already present'.format(name)}
db_args = {
'maintenance_db': maintenance_db,
'runas': user,
'host': db_host,
'user': db_user,
'port': db_port,
'password': db_password,
}
# check if extension exists
mode = 'create'
mtdata = __salt__['postgres.create_metadata'](
name,
schema=schema,
ext_version=ext_version,
**db_args)
# The extension is not present, install it!
toinstall = postgres._EXTENSION_NOT_INSTALLED in mtdata
if toinstall:
mode = 'install'
toupgrade = False
if postgres._EXTENSION_INSTALLED in mtdata:
for flag in [
postgres._EXTENSION_TO_MOVE,
postgres._EXTENSION_TO_UPGRADE
]:
if flag in mtdata:
toupgrade = True
mode = 'upgrade'
if __opts__['test']:
ret['result'] = None
if mode:
ret['comment'] = 'Extension {0} is set to be {1}ed'.format(
name, mode).replace('eed', 'ed')
return ret
cret = None
if toinstall or toupgrade:
cret = __salt__['postgres.create_extension'](
name=name,
if_not_exists=if_not_exists,
schema=schema,
ext_version=ext_version,
from_version=from_version,
**db_args)
if cret:
ret['comment'] = 'The extension {0} has been {1}ed'.format(name, mode)
elif cret is not None:
ret['comment'] = 'Failed to {1} extension {0}'.format(name, mode)
ret['result'] = False
else:
ret['result'] = True
return ret
def absent(name,
if_exists=None,
restrict=None,
cascade=None,
user=None,
maintenance_db=None,
db_password=None,
db_host=None,
db_port=None,
db_user=None):
'''
Ensure that the named extension is absent
name
Extension name of the extension to remove
cascade
Drop on cascade
if_exists
Add if exist slug
restrict
Add restrict slug
maintenance_db
Database to act on
user
System user all operations should be performed on behalf of
db_user
database username if different from config or default
db_password
user password if any password for a specified user
db_host
Database host if different from config or default
db_port
Database port if different from config or default
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
db_args = {
'maintenance_db': maintenance_db,
'runas': user,
'host': db_host,
'user': db_user,
'port': db_port,
'password': db_password,
}
# check if extension exists and remove it
exists = __salt__['postgres.is_installed_extension'](name, **db_args)
if exists:
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Extension {0} is set to be removed'.format(name)
return ret
if __salt__['postgres.drop_extension'](name,
if_exists=if_exists,
restrict=restrict,
cascade=cascade,
**db_args):
ret['comment'] = 'Extension {0} has been removed'.format(name)
ret['changes'][name] = 'Absent'
return ret
else:
ret['result'] = False
ret['comment'] = 'Extension {0} failed to be removed'.format(name)
return ret
else:
ret['comment'] = 'Extension {0} is not present, so it cannot ' \
'be removed'.format(name)
return ret
| 26.537383 | 80 | 0.559606 |
import logging
from salt.modules import postgres
log = logging.getLogger(__name__)
def __virtual__():
return 'postgres.create_extension' in __salt__
def present(name,
if_not_exists=None,
schema=None,
ext_version=None,
from_version=None,
user=None,
maintenance_db=None,
db_password=None,
db_host=None,
db_port=None,
db_user=None):
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Extention {0} is already present'.format(name)}
db_args = {
'maintenance_db': maintenance_db,
'runas': user,
'host': db_host,
'user': db_user,
'port': db_port,
'password': db_password,
}
mode = 'create'
mtdata = __salt__['postgres.create_metadata'](
name,
schema=schema,
ext_version=ext_version,
**db_args)
toinstall = postgres._EXTENSION_NOT_INSTALLED in mtdata
if toinstall:
mode = 'install'
toupgrade = False
if postgres._EXTENSION_INSTALLED in mtdata:
for flag in [
postgres._EXTENSION_TO_MOVE,
postgres._EXTENSION_TO_UPGRADE
]:
if flag in mtdata:
toupgrade = True
mode = 'upgrade'
if __opts__['test']:
ret['result'] = None
if mode:
ret['comment'] = 'Extension {0} is set to be {1}ed'.format(
name, mode).replace('eed', 'ed')
return ret
cret = None
if toinstall or toupgrade:
cret = __salt__['postgres.create_extension'](
name=name,
if_not_exists=if_not_exists,
schema=schema,
ext_version=ext_version,
from_version=from_version,
**db_args)
if cret:
ret['comment'] = 'The extension {0} has been {1}ed'.format(name, mode)
elif cret is not None:
ret['comment'] = 'Failed to {1} extension {0}'.format(name, mode)
ret['result'] = False
else:
ret['result'] = True
return ret
def absent(name,
if_exists=None,
restrict=None,
cascade=None,
user=None,
maintenance_db=None,
db_password=None,
db_host=None,
db_port=None,
db_user=None):
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
db_args = {
'maintenance_db': maintenance_db,
'runas': user,
'host': db_host,
'user': db_user,
'port': db_port,
'password': db_password,
}
exists = __salt__['postgres.is_installed_extension'](name, **db_args)
if exists:
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Extension {0} is set to be removed'.format(name)
return ret
if __salt__['postgres.drop_extension'](name,
if_exists=if_exists,
restrict=restrict,
cascade=cascade,
**db_args):
ret['comment'] = 'Extension {0} has been removed'.format(name)
ret['changes'][name] = 'Absent'
return ret
else:
ret['result'] = False
ret['comment'] = 'Extension {0} failed to be removed'.format(name)
return ret
else:
ret['comment'] = 'Extension {0} is not present, so it cannot ' \
'be removed'.format(name)
return ret
| true | true |
f72136d4758f64cffd134ac0eafb88595992711c | 10,305 | py | Python | fairseq/progress_bar.py | jxhe/fairseq | 214e3fed5619733efa4f1f82c61db58e5ce08ad8 | [
"MIT"
] | null | null | null | fairseq/progress_bar.py | jxhe/fairseq | 214e3fed5619733efa4f1f82c61db58e5ce08ad8 | [
"MIT"
] | null | null | null | fairseq/progress_bar.py | jxhe/fairseq | 214e3fed5619733efa4f1f82c61db58e5ce08ad8 | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Wrapper around various loggers and progress bars (e.g., tqdm).
"""
from collections import OrderedDict
from contextlib import contextmanager
import json
import logging
from numbers import Number
import os
import sys
import torch
from fairseq import distributed_utils
from fairseq.meters import AverageMeter, StopwatchMeter, TimeMeter
logger = logging.getLogger(__name__)
def build_progress_bar(args, iterator, epoch=None, prefix=None, default='tqdm', no_progress_bar='none'):
if args.log_format is None:
args.log_format = no_progress_bar if args.no_progress_bar else default
if args.log_format == 'tqdm' and not sys.stderr.isatty():
args.log_format = 'simple'
if args.log_format == 'json':
bar = json_progress_bar(iterator, epoch, prefix, args.log_interval)
elif args.log_format == 'none':
bar = noop_progress_bar(iterator, epoch, prefix)
elif args.log_format == 'simple':
bar = simple_progress_bar(iterator, epoch, prefix, args.log_interval)
elif args.log_format == 'tqdm':
bar = tqdm_progress_bar(iterator, epoch, prefix)
else:
raise ValueError('Unknown log format: {}'.format(args.log_format))
if args.tensorboard_logdir and distributed_utils.is_master(args):
try:
# [FB only] custom wrapper for TensorBoard
import palaas # noqa
from fairseq.fb_tbmf_wrapper import fb_tbmf_wrapper
bar = fb_tbmf_wrapper(bar, args, args.log_interval)
except ImportError:
bar = tensorboard_log_wrapper(bar, args.tensorboard_logdir, args)
return bar
def format_stat(stat):
if isinstance(stat, Number):
stat = '{:g}'.format(stat)
elif isinstance(stat, AverageMeter):
stat = '{:.3f}'.format(stat.avg)
elif isinstance(stat, TimeMeter):
stat = '{:g}'.format(round(stat.avg))
elif isinstance(stat, StopwatchMeter):
stat = '{:g}'.format(round(stat.sum))
elif torch.is_tensor(stat):
stat = stat.tolist()
return stat
class progress_bar(object):
"""Abstract class for progress bars."""
def __init__(self, iterable, epoch=None, prefix=None):
self.iterable = iterable
self.offset = getattr(iterable, 'offset', 0)
self.epoch = epoch
self.prefix = ''
if epoch is not None:
self.prefix += 'epoch {:03d}'.format(epoch)
if prefix is not None:
self.prefix += ' | {}'.format(prefix)
def __len__(self):
return len(self.iterable)
def __enter__(self):
return self
def __exit__(self, *exc):
return False
def __iter__(self):
raise NotImplementedError
def log(self, stats, tag=None, step=None):
"""Log intermediate stats according to log_interval."""
raise NotImplementedError
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
raise NotImplementedError
def _str_commas(self, stats):
return ', '.join(key + '=' + stats[key].strip()
for key in stats.keys())
def _str_pipes(self, stats):
return ' | '.join(key + ' ' + stats[key].strip()
for key in stats.keys())
def _format_stats(self, stats):
postfix = OrderedDict(stats)
# Preprocess stats according to datatype
for key in postfix.keys():
postfix[key] = str(format_stat(postfix[key]))
return postfix
@contextmanager
def rename_logger(logger, new_name):
old_name = logger.name
if new_name is not None:
logger.name = new_name
yield logger
logger.name = old_name
class json_progress_bar(progress_bar):
"""Log output in JSON format."""
def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000):
super().__init__(iterable, epoch, prefix)
self.log_interval = log_interval
self.stats = None
self.tag = None
def __iter__(self):
size = float(len(self.iterable))
for i, obj in enumerate(self.iterable, start=self.offset):
yield obj
if (
self.stats is not None
and i > 0
and self.log_interval is not None
and (i + 1) % self.log_interval == 0
):
update = (
self.epoch - 1 + float(i / size)
if self.epoch is not None
else None
)
stats = self._format_stats(self.stats, epoch=self.epoch, update=update)
with rename_logger(logger, self.tag):
logger.info(json.dumps(stats))
def log(self, stats, tag=None, step=None):
"""Log intermediate stats according to log_interval."""
self.stats = stats
self.tag = tag
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
self.stats = stats
if tag is not None:
self.stats = OrderedDict([(tag + '_' + k, v) for k, v in self.stats.items()])
stats = self._format_stats(self.stats, epoch=self.epoch)
with rename_logger(logger, tag):
logger.info(json.dumps(stats))
def _format_stats(self, stats, epoch=None, update=None):
postfix = OrderedDict()
if epoch is not None:
postfix['epoch'] = epoch
if update is not None:
postfix['update'] = round(update, 3)
# Preprocess stats according to datatype
for key in stats.keys():
postfix[key] = format_stat(stats[key])
return postfix
class noop_progress_bar(progress_bar):
"""No logging."""
def __init__(self, iterable, epoch=None, prefix=None):
super().__init__(iterable, epoch, prefix)
def __iter__(self):
for obj in self.iterable:
yield obj
def log(self, stats, tag=None, step=None):
"""Log intermediate stats according to log_interval."""
pass
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
pass
class simple_progress_bar(progress_bar):
"""A minimal logger for non-TTY environments."""
def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000):
super().__init__(iterable, epoch, prefix)
self.log_interval = log_interval
self.stats = None
self.tag = None
def __iter__(self):
size = len(self.iterable)
for i, obj in enumerate(self.iterable, start=self.offset):
yield obj
if (
self.stats is not None
and i > 0
and self.log_interval is not None
and (i + 1) % self.log_interval == 0
):
postfix = self._str_commas(self.stats)
with rename_logger(logger, self.tag):
logger.info('{}: {:5d} / {:d} {}'.format(self.prefix, i, size, postfix))
def log(self, stats, tag=None, step=None):
"""Log intermediate stats according to log_interval."""
self.stats = self._format_stats(stats)
self.tag = tag
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
postfix = self._str_pipes(self._format_stats(stats))
with rename_logger(logger, tag):
logger.info('{} | {}'.format(self.prefix, postfix))
class tqdm_progress_bar(progress_bar):
"""Log to tqdm."""
def __init__(self, iterable, epoch=None, prefix=None):
super().__init__(iterable, epoch, prefix)
from tqdm import tqdm
self.tqdm = tqdm(iterable, self.prefix, leave=False)
def __iter__(self):
return iter(self.tqdm)
def log(self, stats, tag=None, step=None):
"""Log intermediate stats according to log_interval."""
self.tqdm.set_postfix(self._format_stats(stats), refresh=False)
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
postfix = self._str_pipes(self._format_stats(stats))
self.tqdm.write('{} | {}'.format(self.tqdm.desc, postfix))
try:
from tensorboardX import SummaryWriter
_tensorboard_writers = {}
except ImportError:
SummaryWriter = None
class tensorboard_log_wrapper(progress_bar):
"""Log to tensorboard."""
def __init__(self, wrapped_bar, tensorboard_logdir, args):
self.wrapped_bar = wrapped_bar
self.tensorboard_logdir = tensorboard_logdir
self.args = args
if SummaryWriter is None:
logger.warning(
"tensorboard or required dependencies not found, please see README "
"for using tensorboard. (e.g. pip install tensorboardX)"
)
def _writer(self, key):
if SummaryWriter is None:
return None
_writers = _tensorboard_writers
if key not in _writers:
_writers[key] = SummaryWriter(os.path.join(self.tensorboard_logdir, key))
_writers[key].add_text('args', str(vars(self.args)))
_writers[key].add_text('sys.argv', " ".join(sys.argv))
return _writers[key]
def __iter__(self):
return iter(self.wrapped_bar)
def log(self, stats, tag=None, step=None):
"""Log intermediate stats to tensorboard."""
self._log_to_tensorboard(stats, tag, step)
self.wrapped_bar.log(stats, tag=tag, step=step)
def print(self, stats, tag=None, step=None):
"""Print end-of-epoch stats."""
self._log_to_tensorboard(stats, tag, step)
self.wrapped_bar.print(stats, tag=tag, step=step)
def _log_to_tensorboard(self, stats, tag=None, step=None):
writer = self._writer(tag or '')
if writer is None:
return
if step is None:
step = stats['num_updates']
for key in stats.keys() - {'num_updates'}:
if isinstance(stats[key], AverageMeter):
writer.add_scalar(key, stats[key].val, step)
elif isinstance(stats[key], Number):
writer.add_scalar(key, stats[key], step)
| 33.028846 | 104 | 0.612033 |
from collections import OrderedDict
from contextlib import contextmanager
import json
import logging
from numbers import Number
import os
import sys
import torch
from fairseq import distributed_utils
from fairseq.meters import AverageMeter, StopwatchMeter, TimeMeter
logger = logging.getLogger(__name__)
def build_progress_bar(args, iterator, epoch=None, prefix=None, default='tqdm', no_progress_bar='none'):
if args.log_format is None:
args.log_format = no_progress_bar if args.no_progress_bar else default
if args.log_format == 'tqdm' and not sys.stderr.isatty():
args.log_format = 'simple'
if args.log_format == 'json':
bar = json_progress_bar(iterator, epoch, prefix, args.log_interval)
elif args.log_format == 'none':
bar = noop_progress_bar(iterator, epoch, prefix)
elif args.log_format == 'simple':
bar = simple_progress_bar(iterator, epoch, prefix, args.log_interval)
elif args.log_format == 'tqdm':
bar = tqdm_progress_bar(iterator, epoch, prefix)
else:
raise ValueError('Unknown log format: {}'.format(args.log_format))
if args.tensorboard_logdir and distributed_utils.is_master(args):
try:
import palaas
from fairseq.fb_tbmf_wrapper import fb_tbmf_wrapper
bar = fb_tbmf_wrapper(bar, args, args.log_interval)
except ImportError:
bar = tensorboard_log_wrapper(bar, args.tensorboard_logdir, args)
return bar
def format_stat(stat):
if isinstance(stat, Number):
stat = '{:g}'.format(stat)
elif isinstance(stat, AverageMeter):
stat = '{:.3f}'.format(stat.avg)
elif isinstance(stat, TimeMeter):
stat = '{:g}'.format(round(stat.avg))
elif isinstance(stat, StopwatchMeter):
stat = '{:g}'.format(round(stat.sum))
elif torch.is_tensor(stat):
stat = stat.tolist()
return stat
class progress_bar(object):
def __init__(self, iterable, epoch=None, prefix=None):
self.iterable = iterable
self.offset = getattr(iterable, 'offset', 0)
self.epoch = epoch
self.prefix = ''
if epoch is not None:
self.prefix += 'epoch {:03d}'.format(epoch)
if prefix is not None:
self.prefix += ' | {}'.format(prefix)
def __len__(self):
return len(self.iterable)
def __enter__(self):
return self
def __exit__(self, *exc):
return False
def __iter__(self):
raise NotImplementedError
def log(self, stats, tag=None, step=None):
raise NotImplementedError
def print(self, stats, tag=None, step=None):
raise NotImplementedError
def _str_commas(self, stats):
return ', '.join(key + '=' + stats[key].strip()
for key in stats.keys())
def _str_pipes(self, stats):
return ' | '.join(key + ' ' + stats[key].strip()
for key in stats.keys())
def _format_stats(self, stats):
postfix = OrderedDict(stats)
for key in postfix.keys():
postfix[key] = str(format_stat(postfix[key]))
return postfix
@contextmanager
def rename_logger(logger, new_name):
old_name = logger.name
if new_name is not None:
logger.name = new_name
yield logger
logger.name = old_name
class json_progress_bar(progress_bar):
def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000):
super().__init__(iterable, epoch, prefix)
self.log_interval = log_interval
self.stats = None
self.tag = None
def __iter__(self):
size = float(len(self.iterable))
for i, obj in enumerate(self.iterable, start=self.offset):
yield obj
if (
self.stats is not None
and i > 0
and self.log_interval is not None
and (i + 1) % self.log_interval == 0
):
update = (
self.epoch - 1 + float(i / size)
if self.epoch is not None
else None
)
stats = self._format_stats(self.stats, epoch=self.epoch, update=update)
with rename_logger(logger, self.tag):
logger.info(json.dumps(stats))
def log(self, stats, tag=None, step=None):
self.stats = stats
self.tag = tag
def print(self, stats, tag=None, step=None):
self.stats = stats
if tag is not None:
self.stats = OrderedDict([(tag + '_' + k, v) for k, v in self.stats.items()])
stats = self._format_stats(self.stats, epoch=self.epoch)
with rename_logger(logger, tag):
logger.info(json.dumps(stats))
def _format_stats(self, stats, epoch=None, update=None):
postfix = OrderedDict()
if epoch is not None:
postfix['epoch'] = epoch
if update is not None:
postfix['update'] = round(update, 3)
for key in stats.keys():
postfix[key] = format_stat(stats[key])
return postfix
class noop_progress_bar(progress_bar):
def __init__(self, iterable, epoch=None, prefix=None):
super().__init__(iterable, epoch, prefix)
def __iter__(self):
for obj in self.iterable:
yield obj
def log(self, stats, tag=None, step=None):
pass
def print(self, stats, tag=None, step=None):
pass
class simple_progress_bar(progress_bar):
def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000):
super().__init__(iterable, epoch, prefix)
self.log_interval = log_interval
self.stats = None
self.tag = None
def __iter__(self):
size = len(self.iterable)
for i, obj in enumerate(self.iterable, start=self.offset):
yield obj
if (
self.stats is not None
and i > 0
and self.log_interval is not None
and (i + 1) % self.log_interval == 0
):
postfix = self._str_commas(self.stats)
with rename_logger(logger, self.tag):
logger.info('{}: {:5d} / {:d} {}'.format(self.prefix, i, size, postfix))
def log(self, stats, tag=None, step=None):
self.stats = self._format_stats(stats)
self.tag = tag
def print(self, stats, tag=None, step=None):
postfix = self._str_pipes(self._format_stats(stats))
with rename_logger(logger, tag):
logger.info('{} | {}'.format(self.prefix, postfix))
class tqdm_progress_bar(progress_bar):
def __init__(self, iterable, epoch=None, prefix=None):
super().__init__(iterable, epoch, prefix)
from tqdm import tqdm
self.tqdm = tqdm(iterable, self.prefix, leave=False)
def __iter__(self):
return iter(self.tqdm)
def log(self, stats, tag=None, step=None):
self.tqdm.set_postfix(self._format_stats(stats), refresh=False)
def print(self, stats, tag=None, step=None):
postfix = self._str_pipes(self._format_stats(stats))
self.tqdm.write('{} | {}'.format(self.tqdm.desc, postfix))
try:
from tensorboardX import SummaryWriter
_tensorboard_writers = {}
except ImportError:
SummaryWriter = None
class tensorboard_log_wrapper(progress_bar):
def __init__(self, wrapped_bar, tensorboard_logdir, args):
self.wrapped_bar = wrapped_bar
self.tensorboard_logdir = tensorboard_logdir
self.args = args
if SummaryWriter is None:
logger.warning(
"tensorboard or required dependencies not found, please see README "
"for using tensorboard. (e.g. pip install tensorboardX)"
)
def _writer(self, key):
if SummaryWriter is None:
return None
_writers = _tensorboard_writers
if key not in _writers:
_writers[key] = SummaryWriter(os.path.join(self.tensorboard_logdir, key))
_writers[key].add_text('args', str(vars(self.args)))
_writers[key].add_text('sys.argv', " ".join(sys.argv))
return _writers[key]
def __iter__(self):
return iter(self.wrapped_bar)
def log(self, stats, tag=None, step=None):
self._log_to_tensorboard(stats, tag, step)
self.wrapped_bar.log(stats, tag=tag, step=step)
def print(self, stats, tag=None, step=None):
self._log_to_tensorboard(stats, tag, step)
self.wrapped_bar.print(stats, tag=tag, step=step)
def _log_to_tensorboard(self, stats, tag=None, step=None):
writer = self._writer(tag or '')
if writer is None:
return
if step is None:
step = stats['num_updates']
for key in stats.keys() - {'num_updates'}:
if isinstance(stats[key], AverageMeter):
writer.add_scalar(key, stats[key].val, step)
elif isinstance(stats[key], Number):
writer.add_scalar(key, stats[key], step)
| true | true |
f72137bf78f559bfe97edbab30e7988e13d94b58 | 327 | py | Python | truck_microservice/truck_microservice/urls.py | getnosleep/VirtualUnjam | bae08eec9756c963dab409c6e4e7397ef019cc8a | [
"MIT"
] | null | null | null | truck_microservice/truck_microservice/urls.py | getnosleep/VirtualUnjam | bae08eec9756c963dab409c6e4e7397ef019cc8a | [
"MIT"
] | null | null | null | truck_microservice/truck_microservice/urls.py | getnosleep/VirtualUnjam | bae08eec9756c963dab409c6e4e7397ef019cc8a | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.urls import path, include
from rest_framework import routers
router = routers.DefaultRouter()
urlpatterns = [
path('', include(router.urls)),
path('api-auth/', include('rest_framework.urls')),
path('admin/', admin.site.urls),
path('api/', include('truck.urls')),
]
| 27.25 | 54 | 0.700306 | from django.contrib import admin
from django.urls import path, include
from rest_framework import routers
router = routers.DefaultRouter()
urlpatterns = [
path('', include(router.urls)),
path('api-auth/', include('rest_framework.urls')),
path('admin/', admin.site.urls),
path('api/', include('truck.urls')),
]
| true | true |
f72137f9085fd5053b6df852024426c114db9e8c | 7,174 | py | Python | cogdl/models/emb/netsmf.py | wsyCUHK/cogdl | 7a0e36326fc653d85378e3845ec14ebd9425a9b6 | [
"MIT"
] | 1 | 2021-12-16T11:53:20.000Z | 2021-12-16T11:53:20.000Z | cogdl/models/emb/netsmf.py | wsyCUHK/cogdl | 7a0e36326fc653d85378e3845ec14ebd9425a9b6 | [
"MIT"
] | null | null | null | cogdl/models/emb/netsmf.py | wsyCUHK/cogdl | 7a0e36326fc653d85378e3845ec14ebd9425a9b6 | [
"MIT"
] | null | null | null | import numpy as np
import networkx as nx
import scipy.sparse as sp
from sklearn import preprocessing
from sklearn.utils.extmath import randomized_svd
from multiprocessing import Pool
from tqdm import tqdm
import time
from cogdl.utils import alias_draw, alias_setup
from .. import BaseModel
class NetSMF(BaseModel):
r"""The NetSMF model from the `"NetSMF: Large-Scale Network Embedding as Sparse Matrix Factorization"
<http://arxiv.org/abs/1710.02971>`_ paper.
Args:
hidden_size (int) : The dimension of node representation.
window_size (int) : The actual context size which is considered in language model.
negative (int) : The number of nagative samples in negative sampling.
num_round (int) : The number of round in NetSMF.
worker (int) : The number of workers for NetSMF.
"""
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument("--window-size", type=int, default=10,
help="Window size of approximate matrix. Default is 10.")
parser.add_argument("--negative", type=int, default=1,
help="Number of negative node in sampling. Default is 1.")
parser.add_argument("--num-round", type=int, default=100,
help="Number of round in NetSMF. Default is 100.")
parser.add_argument("--worker", type=int, default=10,
help="Number of parallel workers. Default is 10.")
parser.add_argument("--hidden-size", type=int, default=128)
# fmt: on
@classmethod
def build_model_from_args(cls, args):
return cls(
args.hidden_size,
args.window_size,
args.negative,
args.num_round,
args.worker,
)
def __init__(self, dimension, window_size, negative, num_round, worker):
super(NetSMF, self).__init__()
self.dimension = dimension
self.window_size = window_size
self.negative = negative
self.worker = worker
self.num_round = num_round
def train(self, graph, return_dict=False):
return self.forward(graph, return_dict)
def forward(self, graph, return_dict=False):
self.G = graph.to_networkx()
node2id = dict([(node, vid) for vid, node in enumerate(self.G.nodes())])
self.is_directed = nx.is_directed(self.G)
self.num_node = self.G.number_of_nodes()
self.num_edge = self.G.number_of_edges()
self.edges = [[node2id[e[0]], node2id[e[1]]] for e in self.G.edges()]
id2node = dict(zip(node2id.values(), node2id.keys()))
self.num_neigh = np.asarray([len(list(self.G.neighbors(id2node[i]))) for i in range(self.num_node)])
self.neighbors = [[node2id[v] for v in self.G.neighbors(id2node[i])] for i in range(self.num_node)]
s = time.time()
self.alias_nodes = {}
self.node_weight = {}
for i in range(self.num_node):
unnormalized_probs = [self.G[id2node[i]][nbr].get("weight", 1.0) for nbr in self.G.neighbors(id2node[i])]
norm_const = sum(unnormalized_probs)
normalized_probs = [float(u_prob) / norm_const for u_prob in unnormalized_probs]
self.alias_nodes[i] = alias_setup(normalized_probs)
self.node_weight[i] = dict(
zip(
[node2id[nbr] for nbr in self.G.neighbors(id2node[i])],
unnormalized_probs,
)
)
t = time.time()
print("alias_nodes", t - s)
# run netsmf algorithm with multiprocessing and apply randomized svd
print("number of sample edges ", self.num_round * self.num_edge * self.window_size)
print("random walk start...")
t0 = time.time()
results = []
pool = Pool(processes=self.worker)
for i in range(self.worker):
results.append(pool.apply_async(func=self._random_walk_matrix, args=(i,)))
pool.close()
pool.join()
print("random walk time", time.time() - t0)
matrix = sp.csr_matrix((self.num_node, self.num_node))
A = sp.csr_matrix(nx.adjacency_matrix(self.G))
degree = sp.diags(np.array(A.sum(axis=0))[0], format="csr")
degree_inv = degree.power(-1)
t1 = time.time()
for res in results:
matrix += res.get()
t2 = time.time()
print("construct random walk matrix time", time.time() - t1)
L = sp.csgraph.laplacian(matrix, normed=False, return_diag=False)
M = degree_inv.dot(degree - L).dot(degree_inv)
M = M * A.sum() / self.negative
M.data[M.data <= 1] = 1
M.data = np.log(M.data)
M.eliminate_zeros()
print("number of nzz", M.nnz)
print("construct matrix sparsifier time", time.time() - t2)
embeddings = self._get_embedding_rand(M)
if return_dict:
features_matrix = dict()
for vid, node in enumerate(self.G.nodes()):
features_matrix[node] = embeddings[vid]
else:
features_matrix = np.zeros((graph.num_nodes, embeddings.shape[1]))
nx_nodes = self.G.nodes()
features_matrix[nx_nodes] = embeddings[np.arange(graph.num_nodes)]
return features_matrix
def _get_embedding_rand(self, matrix):
# Sparse randomized tSVD for fast embedding
t1 = time.time()
l = matrix.shape[0] # noqa E741
smat = sp.csc_matrix(matrix)
print("svd sparse", smat.data.shape[0] * 1.0 / l ** 2)
U, Sigma, VT = randomized_svd(smat, n_components=self.dimension, n_iter=5, random_state=None)
U = U * np.sqrt(Sigma)
U = preprocessing.normalize(U, "l2")
print("sparsesvd time", time.time() - t1)
return U
def _path_sampling(self, u, v, r):
# sample a r-length path from edge(u, v) and return path end node
k = np.random.randint(r) + 1
zp, rand_u, rand_v = 2.0 / self.node_weight[u][v], k - 1, r - k
for i in range(rand_u):
new_u = self.neighbors[u][alias_draw(self.alias_nodes[u][0], self.alias_nodes[u][1])]
zp += 2.0 / self.node_weight[u][new_u]
u = new_u
for j in range(rand_v):
new_v = self.neighbors[v][alias_draw(self.alias_nodes[v][0], self.alias_nodes[v][1])]
zp += 2.0 / self.node_weight[v][new_v]
v = new_v
return u, v, zp
def _random_walk_matrix(self, pid):
# construct matrix based on random walk
np.random.seed(pid)
matrix = sp.lil_matrix((self.num_node, self.num_node))
for i in tqdm(range(self.num_edge * self.num_round // self.worker)):
u, v = self.edges[i % self.num_edge]
if not self.is_directed and np.random.rand() > 0.5:
v, u = u, v
for r in range(1, self.window_size + 1):
u_, v_, zp = self._path_sampling(u, v, r)
matrix[u_, v_] += 2 * r / self.window_size / self.num_round / zp
return matrix.tocsr()
| 41.229885 | 117 | 0.598411 | import numpy as np
import networkx as nx
import scipy.sparse as sp
from sklearn import preprocessing
from sklearn.utils.extmath import randomized_svd
from multiprocessing import Pool
from tqdm import tqdm
import time
from cogdl.utils import alias_draw, alias_setup
from .. import BaseModel
class NetSMF(BaseModel):
@staticmethod
def add_args(parser):
parser.add_argument("--window-size", type=int, default=10,
help="Window size of approximate matrix. Default is 10.")
parser.add_argument("--negative", type=int, default=1,
help="Number of negative node in sampling. Default is 1.")
parser.add_argument("--num-round", type=int, default=100,
help="Number of round in NetSMF. Default is 100.")
parser.add_argument("--worker", type=int, default=10,
help="Number of parallel workers. Default is 10.")
parser.add_argument("--hidden-size", type=int, default=128)
@classmethod
def build_model_from_args(cls, args):
return cls(
args.hidden_size,
args.window_size,
args.negative,
args.num_round,
args.worker,
)
def __init__(self, dimension, window_size, negative, num_round, worker):
super(NetSMF, self).__init__()
self.dimension = dimension
self.window_size = window_size
self.negative = negative
self.worker = worker
self.num_round = num_round
def train(self, graph, return_dict=False):
return self.forward(graph, return_dict)
def forward(self, graph, return_dict=False):
self.G = graph.to_networkx()
node2id = dict([(node, vid) for vid, node in enumerate(self.G.nodes())])
self.is_directed = nx.is_directed(self.G)
self.num_node = self.G.number_of_nodes()
self.num_edge = self.G.number_of_edges()
self.edges = [[node2id[e[0]], node2id[e[1]]] for e in self.G.edges()]
id2node = dict(zip(node2id.values(), node2id.keys()))
self.num_neigh = np.asarray([len(list(self.G.neighbors(id2node[i]))) for i in range(self.num_node)])
self.neighbors = [[node2id[v] for v in self.G.neighbors(id2node[i])] for i in range(self.num_node)]
s = time.time()
self.alias_nodes = {}
self.node_weight = {}
for i in range(self.num_node):
unnormalized_probs = [self.G[id2node[i]][nbr].get("weight", 1.0) for nbr in self.G.neighbors(id2node[i])]
norm_const = sum(unnormalized_probs)
normalized_probs = [float(u_prob) / norm_const for u_prob in unnormalized_probs]
self.alias_nodes[i] = alias_setup(normalized_probs)
self.node_weight[i] = dict(
zip(
[node2id[nbr] for nbr in self.G.neighbors(id2node[i])],
unnormalized_probs,
)
)
t = time.time()
print("alias_nodes", t - s)
print("number of sample edges ", self.num_round * self.num_edge * self.window_size)
print("random walk start...")
t0 = time.time()
results = []
pool = Pool(processes=self.worker)
for i in range(self.worker):
results.append(pool.apply_async(func=self._random_walk_matrix, args=(i,)))
pool.close()
pool.join()
print("random walk time", time.time() - t0)
matrix = sp.csr_matrix((self.num_node, self.num_node))
A = sp.csr_matrix(nx.adjacency_matrix(self.G))
degree = sp.diags(np.array(A.sum(axis=0))[0], format="csr")
degree_inv = degree.power(-1)
t1 = time.time()
for res in results:
matrix += res.get()
t2 = time.time()
print("construct random walk matrix time", time.time() - t1)
L = sp.csgraph.laplacian(matrix, normed=False, return_diag=False)
M = degree_inv.dot(degree - L).dot(degree_inv)
M = M * A.sum() / self.negative
M.data[M.data <= 1] = 1
M.data = np.log(M.data)
M.eliminate_zeros()
print("number of nzz", M.nnz)
print("construct matrix sparsifier time", time.time() - t2)
embeddings = self._get_embedding_rand(M)
if return_dict:
features_matrix = dict()
for vid, node in enumerate(self.G.nodes()):
features_matrix[node] = embeddings[vid]
else:
features_matrix = np.zeros((graph.num_nodes, embeddings.shape[1]))
nx_nodes = self.G.nodes()
features_matrix[nx_nodes] = embeddings[np.arange(graph.num_nodes)]
return features_matrix
def _get_embedding_rand(self, matrix):
t1 = time.time()
l = matrix.shape[0]
smat = sp.csc_matrix(matrix)
print("svd sparse", smat.data.shape[0] * 1.0 / l ** 2)
U, Sigma, VT = randomized_svd(smat, n_components=self.dimension, n_iter=5, random_state=None)
U = U * np.sqrt(Sigma)
U = preprocessing.normalize(U, "l2")
print("sparsesvd time", time.time() - t1)
return U
def _path_sampling(self, u, v, r):
k = np.random.randint(r) + 1
zp, rand_u, rand_v = 2.0 / self.node_weight[u][v], k - 1, r - k
for i in range(rand_u):
new_u = self.neighbors[u][alias_draw(self.alias_nodes[u][0], self.alias_nodes[u][1])]
zp += 2.0 / self.node_weight[u][new_u]
u = new_u
for j in range(rand_v):
new_v = self.neighbors[v][alias_draw(self.alias_nodes[v][0], self.alias_nodes[v][1])]
zp += 2.0 / self.node_weight[v][new_v]
v = new_v
return u, v, zp
def _random_walk_matrix(self, pid):
np.random.seed(pid)
matrix = sp.lil_matrix((self.num_node, self.num_node))
for i in tqdm(range(self.num_edge * self.num_round // self.worker)):
u, v = self.edges[i % self.num_edge]
if not self.is_directed and np.random.rand() > 0.5:
v, u = u, v
for r in range(1, self.window_size + 1):
u_, v_, zp = self._path_sampling(u, v, r)
matrix[u_, v_] += 2 * r / self.window_size / self.num_round / zp
return matrix.tocsr()
| true | true |
f7213978ccd0f01659e3efbbcfb25973a4e526b4 | 877 | py | Python | CTFd/forms/challenges.py | KaitoRyouga/CTFd | 827a22e8ce9bdfd43ae0689e6cbcf2a6e253e920 | [
"Apache-2.0"
] | null | null | null | CTFd/forms/challenges.py | KaitoRyouga/CTFd | 827a22e8ce9bdfd43ae0689e6cbcf2a6e253e920 | [
"Apache-2.0"
] | null | null | null | CTFd/forms/challenges.py | KaitoRyouga/CTFd | 827a22e8ce9bdfd43ae0689e6cbcf2a6e253e920 | [
"Apache-2.0"
] | null | null | null | from wtforms import MultipleFileField, SelectField, StringField
from wtforms.validators import InputRequired
from CTFd.forms import BaseForm
from CTFd.forms.fields import SubmitField
class ChallengeSearchForm(BaseForm):
field = SelectField(
"Search Field",
choices=[
("name", "Name"),
("id", "ID"),
("category", "Category"),
("type", "Type"),
],
default="name",
validators=[InputRequired()],
)
q = StringField("Parameter", validators=[InputRequired()])
submit = SubmitField("Search")
class ChallengeFilesUploadForm(BaseForm):
file = MultipleFileField(
"Upload Files",
description="Attach multiple files using Control+Click or Cmd+Click.",
validators=[InputRequired()],
)
submit = SubmitField("Upload")
| 28.290323 | 79 | 0.608894 | from wtforms import MultipleFileField, SelectField, StringField
from wtforms.validators import InputRequired
from CTFd.forms import BaseForm
from CTFd.forms.fields import SubmitField
class ChallengeSearchForm(BaseForm):
field = SelectField(
"Search Field",
choices=[
("name", "Name"),
("id", "ID"),
("category", "Category"),
("type", "Type"),
],
default="name",
validators=[InputRequired()],
)
q = StringField("Parameter", validators=[InputRequired()])
submit = SubmitField("Search")
class ChallengeFilesUploadForm(BaseForm):
file = MultipleFileField(
"Upload Files",
description="Attach multiple files using Control+Click or Cmd+Click.",
validators=[InputRequired()],
)
submit = SubmitField("Upload")
| true | true |
f72139a7d81181702d6d08b0921ed75b1e2aa778 | 2,422 | py | Python | staicoin/consensus/block_rewards.py | d00kSI/staicoin-blockchain | 5783a48271c8145c8eea93169df13a9ed32817ad | [
"Apache-2.0"
] | 1 | 2021-12-03T02:39:29.000Z | 2021-12-03T02:39:29.000Z | staicoin/consensus/block_rewards.py | d00kSI/staicoin-blockchain | 5783a48271c8145c8eea93169df13a9ed32817ad | [
"Apache-2.0"
] | null | null | null | staicoin/consensus/block_rewards.py | d00kSI/staicoin-blockchain | 5783a48271c8145c8eea93169df13a9ed32817ad | [
"Apache-2.0"
] | null | null | null | from staicoin.util.ints import uint32, uint64
# 1 stai coin = 1,000,000,000 = 1 billion mojo.
_mojo_per_staicoin = 1000000000
_blocks_per_year = 1681920 # 32 * 6 * 24 * 365
def calculate_pool_reward(height: uint32) -> uint64:
"""
Returns the pool reward at a certain block height. The pool earns 4/5 of the reward in each block. If the farmer
is solo farming, they act as the pool, and therefore earn the entire block reward.
These halving events will not be hit at the exact times
(3 years, etc), due to fluctuations in difficulty. They will likely come early, if the network space and VDF
rates increase continuously.
"""
if height == 0:
return uint64(int((992 / 1000) * 55882000 * _mojo_per_staicoin))
elif height < 1 * _blocks_per_year:
return uint64(int((4 / 5) * 5 * _mojo_per_staicoin))
elif height < 2 * _blocks_per_year:
return uint64(int((2 / 2.5) * 2.5 * _mojo_per_staicoin))
else:
return uint64(int((1 / 1.25) * 1.25 * _mojo_per_staicoin))
def calculate_base_farmer_reward(height: uint32) -> uint64:
"""
Returns the base farmer reward at a certain block height.
The base fee reward is 1/5 of total block reward
Returns the coinbase reward at a certain block height. These halving events will not be hit at the exact times
(3 years, etc), due to fluctuations in difficulty. They will likely come early, if the network space and VDF
rates increase continuously. Bonus to the dev who contributed starting the blockchain !
"""
if height == 0:
return uint64(int((8 / 1000) * 55882000 * _mojo_per_staicoin))
elif height < 1 * _blocks_per_year:
return uint64(int((1 / 5) * 5 * _mojo_per_staicoin))
elif height < 2 * _blocks_per_year:
return uint64(int((1 / 2.5) * 2.5 * _mojo_per_staicoin))
else:
return uint64(int((1 / 1.25) * 1.25 * _mojo_per_staicoin))
def calculate_base_officialwallets_reward(height: uint32) -> uint64:
"""
Community Rewards: 1 stai every block at stage 1 & 2 & 3
"""
if height == 0:
return uint64(int((1 / 6) * 0 * _mojo_per_staicoin))
elif height < 1 * _blocks_per_year:
return uint64(int((1 / 6) * 6 * _mojo_per_staicoin))
elif height < 2 * _blocks_per_year:
return uint64(int((1 / 3) * 3 * _mojo_per_staicoin))
else:
return uint64(int((1 / 2) * 2 * _mojo_per_staicoin))
| 42.491228 | 116 | 0.67052 | from staicoin.util.ints import uint32, uint64
_mojo_per_staicoin = 1000000000
_blocks_per_year = 1681920
def calculate_pool_reward(height: uint32) -> uint64:
if height == 0:
return uint64(int((992 / 1000) * 55882000 * _mojo_per_staicoin))
elif height < 1 * _blocks_per_year:
return uint64(int((4 / 5) * 5 * _mojo_per_staicoin))
elif height < 2 * _blocks_per_year:
return uint64(int((2 / 2.5) * 2.5 * _mojo_per_staicoin))
else:
return uint64(int((1 / 1.25) * 1.25 * _mojo_per_staicoin))
def calculate_base_farmer_reward(height: uint32) -> uint64:
if height == 0:
return uint64(int((8 / 1000) * 55882000 * _mojo_per_staicoin))
elif height < 1 * _blocks_per_year:
return uint64(int((1 / 5) * 5 * _mojo_per_staicoin))
elif height < 2 * _blocks_per_year:
return uint64(int((1 / 2.5) * 2.5 * _mojo_per_staicoin))
else:
return uint64(int((1 / 1.25) * 1.25 * _mojo_per_staicoin))
def calculate_base_officialwallets_reward(height: uint32) -> uint64:
if height == 0:
return uint64(int((1 / 6) * 0 * _mojo_per_staicoin))
elif height < 1 * _blocks_per_year:
return uint64(int((1 / 6) * 6 * _mojo_per_staicoin))
elif height < 2 * _blocks_per_year:
return uint64(int((1 / 3) * 3 * _mojo_per_staicoin))
else:
return uint64(int((1 / 2) * 2 * _mojo_per_staicoin))
| true | true |
f7213d02b27c785ef91695bcb230eaaa02989fb9 | 6,910 | py | Python | nova/objects/migrate_data.py | JohnGarbutt/nova | 21f6f7b63af920aa3a5501603c3debbcd5ec5bc5 | [
"Apache-2.0"
] | null | null | null | nova/objects/migrate_data.py | JohnGarbutt/nova | 21f6f7b63af920aa3a5501603c3debbcd5ec5bc5 | [
"Apache-2.0"
] | null | null | null | nova/objects/migrate_data.py | JohnGarbutt/nova | 21f6f7b63af920aa3a5501603c3debbcd5ec5bc5 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
from oslo_serialization import jsonutils
from nova import objects
from nova.objects import base as obj_base
from nova.objects import fields
LOG = log.getLogger(__name__)
@obj_base.NovaObjectRegistry.register_if(False)
class LiveMigrateData(obj_base.NovaObject):
fields = {
'is_volume_backed': fields.BooleanField(),
'migration': fields.ObjectField('Migration'),
}
def to_legacy_dict(self, pre_migration_result=False):
legacy = {}
if self.obj_attr_is_set('is_volume_backed'):
legacy['is_volume_backed'] = self.is_volume_backed
if self.obj_attr_is_set('migration'):
legacy['migration'] = self.migration
if pre_migration_result:
legacy['pre_live_migration_result'] = {}
return legacy
def from_legacy_dict(self, legacy):
if 'is_volume_backed' in legacy:
self.is_volume_backed = legacy['is_volume_backed']
if 'migration' in legacy:
self.migration = legacy['migration']
@obj_base.NovaObjectRegistry.register
class LibvirtLiveMigrateBDMInfo(obj_base.NovaObject):
VERSION = '1.0'
fields = {
# FIXME(danms): some of these can be enums?
'serial': fields.StringField(),
'bus': fields.StringField(),
'dev': fields.StringField(),
'type': fields.StringField(),
'format': fields.StringField(nullable=True),
'boot_index': fields.IntegerField(nullable=True),
'connection_info_json': fields.StringField(),
}
# NOTE(danms): We don't have a connection_info object right
# now, and instead mostly store/pass it as JSON that we're
# careful with. When we get a connection_info object in the
# future, we should use it here, so make this easy to convert
# for later.
@property
def connection_info(self):
return jsonutils.loads(self.connection_info_json)
@connection_info.setter
def connection_info(self, info):
self.connection_info_json = jsonutils.dumps(info)
def as_disk_info(self):
info_dict = {
'dev': self.dev,
'bus': self.bus,
'type': self.type,
}
if self.obj_attr_is_set('format') and self.format:
info_dict['format'] = self.format
if self.obj_attr_is_set('boot_index') and self.boot_index is not None:
info_dict['boot_index'] = str(self.boot_index)
return info_dict
@obj_base.NovaObjectRegistry.register
class LibvirtLiveMigrateData(LiveMigrateData):
VERSION = '1.0'
fields = {
'filename': fields.StringField(),
# FIXME: image_type should be enum?
'image_type': fields.StringField(),
'block_migration': fields.BooleanField(),
'disk_over_commit': fields.BooleanField(),
'disk_available_mb': fields.IntegerField(nullable=True),
'is_shared_instance_path': fields.BooleanField(),
'is_shared_block_storage': fields.BooleanField(),
'instance_relative_path': fields.StringField(),
'graphics_listen_addr_vnc': fields.IPAddressField(nullable=True),
'graphics_listen_addr_spice': fields.IPAddressField(nullable=True),
'serial_listen_addr': fields.StringField(),
'bdms': fields.ListOfObjectsField('LibvirtLiveMigrateBDMInfo'),
}
def _bdms_to_legacy(self, legacy):
if not self.obj_attr_is_set('bdms'):
return
legacy['volume'] = {}
for bdmi in self.bdms:
legacy['volume'][bdmi.serial] = {
'disk_info': bdmi.as_disk_info(),
'connection_info': bdmi.connection_info}
def _bdms_from_legacy(self, legacy_pre_result):
self.bdms = []
volume = legacy_pre_result.get('volume', {})
for serial in volume:
vol = volume[serial]
bdmi = objects.LibvirtLiveMigrateBDMInfo(serial=serial)
bdmi.connection_info = vol['connection_info']
bdmi.bus = vol['disk_info']['bus']
bdmi.dev = vol['disk_info']['dev']
bdmi.type = vol['disk_info']['type']
if 'format' in vol:
bdmi.format = vol['disk_info']['format']
if 'boot_index' in vol:
bdmi.boot_index = int(vol['disk_info']['boot_index'])
self.bdms.append(bdmi)
def to_legacy_dict(self, pre_migration_result=False):
LOG.debug('Converting to legacy: %s' % self)
legacy = super(LibvirtLiveMigrateData, self).to_legacy_dict()
keys = (set(self.fields.keys()) -
set(LiveMigrateData.fields.keys()) - {'bdms'})
legacy.update({k: getattr(self, k) for k in keys
if self.obj_attr_is_set(k)})
graphics_vnc = legacy.pop('graphics_listen_addr_vnc', None)
graphics_spice = legacy.pop('graphics_listen_addr_spice', None)
live_result = {
'graphics_listen_addrs': {
'vnc': graphics_vnc and str(graphics_vnc),
'spice': graphics_spice and str(graphics_spice),
},
'serial_listen_addr': legacy.pop('serial_listen_addr', None),
}
if pre_migration_result:
legacy['pre_live_migration_result'] = live_result
self._bdms_to_legacy(live_result)
LOG.debug('Legacy result: %s' % legacy)
return legacy
def from_legacy_dict(self, legacy):
LOG.debug('Converting legacy dict to obj: %s' % legacy)
super(LibvirtLiveMigrateData, self).from_legacy_dict(legacy)
keys = set(self.fields.keys()) - set(LiveMigrateData.fields.keys())
for k in keys - {'bdms'}:
if k in legacy:
setattr(self, k, legacy[k])
if 'pre_live_migration_result' in legacy:
pre_result = legacy['pre_live_migration_result']
self.graphics_listen_addr_vnc = \
pre_result['graphics_listen_addrs'].get('vnc')
self.graphics_listen_addr_spice = \
pre_result['graphics_listen_addrs'].get('spice')
if 'serial_listen_addr' in pre_result:
self.serial_listen_addr = pre_result['serial_listen_addr']
self._bdms_from_legacy(pre_result)
LOG.debug('Converted object: %s' % self)
| 38.603352 | 78 | 0.639363 |
from oslo_log import log
from oslo_serialization import jsonutils
from nova import objects
from nova.objects import base as obj_base
from nova.objects import fields
LOG = log.getLogger(__name__)
@obj_base.NovaObjectRegistry.register_if(False)
class LiveMigrateData(obj_base.NovaObject):
fields = {
'is_volume_backed': fields.BooleanField(),
'migration': fields.ObjectField('Migration'),
}
def to_legacy_dict(self, pre_migration_result=False):
legacy = {}
if self.obj_attr_is_set('is_volume_backed'):
legacy['is_volume_backed'] = self.is_volume_backed
if self.obj_attr_is_set('migration'):
legacy['migration'] = self.migration
if pre_migration_result:
legacy['pre_live_migration_result'] = {}
return legacy
def from_legacy_dict(self, legacy):
if 'is_volume_backed' in legacy:
self.is_volume_backed = legacy['is_volume_backed']
if 'migration' in legacy:
self.migration = legacy['migration']
@obj_base.NovaObjectRegistry.register
class LibvirtLiveMigrateBDMInfo(obj_base.NovaObject):
VERSION = '1.0'
fields = {
'serial': fields.StringField(),
'bus': fields.StringField(),
'dev': fields.StringField(),
'type': fields.StringField(),
'format': fields.StringField(nullable=True),
'boot_index': fields.IntegerField(nullable=True),
'connection_info_json': fields.StringField(),
}
# now, and instead mostly store/pass it as JSON that we're
@property
def connection_info(self):
return jsonutils.loads(self.connection_info_json)
@connection_info.setter
def connection_info(self, info):
self.connection_info_json = jsonutils.dumps(info)
def as_disk_info(self):
info_dict = {
'dev': self.dev,
'bus': self.bus,
'type': self.type,
}
if self.obj_attr_is_set('format') and self.format:
info_dict['format'] = self.format
if self.obj_attr_is_set('boot_index') and self.boot_index is not None:
info_dict['boot_index'] = str(self.boot_index)
return info_dict
@obj_base.NovaObjectRegistry.register
class LibvirtLiveMigrateData(LiveMigrateData):
VERSION = '1.0'
fields = {
'filename': fields.StringField(),
'image_type': fields.StringField(),
'block_migration': fields.BooleanField(),
'disk_over_commit': fields.BooleanField(),
'disk_available_mb': fields.IntegerField(nullable=True),
'is_shared_instance_path': fields.BooleanField(),
'is_shared_block_storage': fields.BooleanField(),
'instance_relative_path': fields.StringField(),
'graphics_listen_addr_vnc': fields.IPAddressField(nullable=True),
'graphics_listen_addr_spice': fields.IPAddressField(nullable=True),
'serial_listen_addr': fields.StringField(),
'bdms': fields.ListOfObjectsField('LibvirtLiveMigrateBDMInfo'),
}
def _bdms_to_legacy(self, legacy):
if not self.obj_attr_is_set('bdms'):
return
legacy['volume'] = {}
for bdmi in self.bdms:
legacy['volume'][bdmi.serial] = {
'disk_info': bdmi.as_disk_info(),
'connection_info': bdmi.connection_info}
def _bdms_from_legacy(self, legacy_pre_result):
self.bdms = []
volume = legacy_pre_result.get('volume', {})
for serial in volume:
vol = volume[serial]
bdmi = objects.LibvirtLiveMigrateBDMInfo(serial=serial)
bdmi.connection_info = vol['connection_info']
bdmi.bus = vol['disk_info']['bus']
bdmi.dev = vol['disk_info']['dev']
bdmi.type = vol['disk_info']['type']
if 'format' in vol:
bdmi.format = vol['disk_info']['format']
if 'boot_index' in vol:
bdmi.boot_index = int(vol['disk_info']['boot_index'])
self.bdms.append(bdmi)
def to_legacy_dict(self, pre_migration_result=False):
LOG.debug('Converting to legacy: %s' % self)
legacy = super(LibvirtLiveMigrateData, self).to_legacy_dict()
keys = (set(self.fields.keys()) -
set(LiveMigrateData.fields.keys()) - {'bdms'})
legacy.update({k: getattr(self, k) for k in keys
if self.obj_attr_is_set(k)})
graphics_vnc = legacy.pop('graphics_listen_addr_vnc', None)
graphics_spice = legacy.pop('graphics_listen_addr_spice', None)
live_result = {
'graphics_listen_addrs': {
'vnc': graphics_vnc and str(graphics_vnc),
'spice': graphics_spice and str(graphics_spice),
},
'serial_listen_addr': legacy.pop('serial_listen_addr', None),
}
if pre_migration_result:
legacy['pre_live_migration_result'] = live_result
self._bdms_to_legacy(live_result)
LOG.debug('Legacy result: %s' % legacy)
return legacy
def from_legacy_dict(self, legacy):
LOG.debug('Converting legacy dict to obj: %s' % legacy)
super(LibvirtLiveMigrateData, self).from_legacy_dict(legacy)
keys = set(self.fields.keys()) - set(LiveMigrateData.fields.keys())
for k in keys - {'bdms'}:
if k in legacy:
setattr(self, k, legacy[k])
if 'pre_live_migration_result' in legacy:
pre_result = legacy['pre_live_migration_result']
self.graphics_listen_addr_vnc = \
pre_result['graphics_listen_addrs'].get('vnc')
self.graphics_listen_addr_spice = \
pre_result['graphics_listen_addrs'].get('spice')
if 'serial_listen_addr' in pre_result:
self.serial_listen_addr = pre_result['serial_listen_addr']
self._bdms_from_legacy(pre_result)
LOG.debug('Converted object: %s' % self)
| true | true |
f72141757d175109c5dcb045ecabaf1763ea12c7 | 1,249 | py | Python | backend/verify/migrations/0001_initial.py | dabonthatbih/kkr-rest-api | e469183a99bd650c2ab979c4e420c3673b9ec049 | [
"Apache-2.0"
] | 1 | 2019-10-07T11:14:33.000Z | 2019-10-07T11:14:33.000Z | backend/verify/migrations/0001_initial.py | dabonthatbih/kkr-rest-api | e469183a99bd650c2ab979c4e420c3673b9ec049 | [
"Apache-2.0"
] | 15 | 2019-10-07T10:57:58.000Z | 2019-10-13T12:35:19.000Z | backend/verify/migrations/0001_initial.py | dabonthatbih/kkr-rest-api | e469183a99bd650c2ab979c4e420c3673b9ec049 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 2.2.6 on 2019-10-06 13:51
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_countries.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Customer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=100)),
('gender', models.CharField(choices=[('M', 'Male'), ('F', 'Female')], max_length=200)),
('first_name', models.CharField(max_length=100)),
('last_name', models.CharField(max_length=100)),
('country', django_countries.fields.CountryField(max_length=2)),
('adress', models.CharField(max_length=300)),
('zip_code', models.CharField(max_length=20)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 37.848485 | 122 | 0.604484 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_countries.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Customer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=100)),
('gender', models.CharField(choices=[('M', 'Male'), ('F', 'Female')], max_length=200)),
('first_name', models.CharField(max_length=100)),
('last_name', models.CharField(max_length=100)),
('country', django_countries.fields.CountryField(max_length=2)),
('adress', models.CharField(max_length=300)),
('zip_code', models.CharField(max_length=20)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| true | true |
f7214236590167f0e9c078503c47ef27d6da679f | 19,636 | py | Python | tests/models/dsettlement/test_acceptance.py | Deltares/GEOLib | 73c3f325ba40a3e0c586e337541d491f4296f50c | [
"MIT"
] | 4 | 2021-10-29T21:30:47.000Z | 2022-03-18T13:15:17.000Z | tests/models/dsettlement/test_acceptance.py | Deltares/GEOLib | 73c3f325ba40a3e0c586e337541d491f4296f50c | [
"MIT"
] | 3 | 2021-11-05T07:56:16.000Z | 2022-03-27T13:27:05.000Z | tests/models/dsettlement/test_acceptance.py | Deltares/GEOLib | 73c3f325ba40a3e0c586e337541d491f4296f50c | [
"MIT"
] | 4 | 2021-10-29T21:30:51.000Z | 2022-01-17T13:20:40.000Z | import logging
import os
import pathlib
from datetime import timedelta
from pathlib import Path
from typing import List
from warnings import warn
import pydantic
import pytest
from pydantic.color import Color
from teamcity import is_running_under_teamcity
import geolib.models.dsettlement.loads as loads
import geolib.soils as soil_external
from geolib.geometry.one import Point
from geolib.models import BaseModel
from geolib.models.dsettlement.dsettlement_model import DSettlementModel
from geolib.models.dsettlement.internal import (
Bool,
Boundaries,
Boundary,
ConsolidationModel,
Curve,
Curves,
Dimension,
DispersionConditionLayerBoundary,
DSeriePoint,
DSettlementStructure,
GeometryData,
Layer,
Layers,
Model,
Points,
PreconPressureWithinLayer,
SoilModel,
StrainType,
StressDistributionLoads,
StressDistributionSoil,
Version,
)
from geolib.models.dsettlement.loads import RectangularLoad
from geolib.soils import (
IsotacheParameters,
Soil,
SoilClassificationParameters,
SoilWeightParameters,
StateType,
)
from tests.utils import TestUtils, only_teamcity
class TestDSettlementAcceptance:
def setup_class(self):
self.soils = [
Soil(
name="Sand",
soil_weight_parameters=SoilWeightParameters(
saturated_weight=19.0, unsaturated_weight=17.0
),
),
Soil(
name="Peat",
soil_weight_parameters=SoilWeightParameters(
saturated_weight=10.0, unsaturated_weight=10.0
),
),
Soil(
name="Clay",
soil_weight_parameters=SoilWeightParameters(
saturated_weight=14.0, unsaturated_weight=14.0
),
),
Soil(
name="Embankement",
soil_weight_parameters=SoilWeightParameters(
saturated_weight=16.0, unsaturated_weight=16.0
),
),
]
self.points = [
Point(x=-50, z=0.0), # 0 top layer
Point(x=-10, z=0.0), # 1
Point(x=0, z=2), # 2
Point(x=10, z=2), # 3
Point(x=30, z=0.0), # 4
Point(x=50, z=0.0), # 5
Point(x=-50, z=-5), # 6 second layer
Point(x=50, z=-5), # 7
Point(x=-50, z=-10), # 8 third layer
Point(x=50, z=-10), # 9
Point(x=-50, z=-20), # 10 fourth layer
Point(x=50, z=-20), # 11
Point(x=-50, z=-2), # 12 phreatic line
Point(x=50, z=-2), # 13
Point(x=-50, z=1), # 14 headline 1
Point(x=50, z=1), # 15
]
dm = DSettlementModel()
self.outputdir = Path(
TestUtils.get_output_test_data_dir("dsettlement/acceptancetest/")
)
self.inputfile = Path(
TestUtils.get_test_data_dir("test_data/dsettlement", "2dgeom_with10.sld")
)
@pytest.mark.systemtest
def test_dsettlement_empty(self):
dm = DSettlementModel()
path = self.outputdir / "test_empty.sli"
dm.serialize(path)
@pytest.mark.systemtest
def test_add_soils(self):
dm = DSettlementModel()
for soil in self.soils:
dm.add_soil(soil)
path = self.outputdir / "test_add_soils.sli"
dm.serialize(path)
@pytest.mark.systemtest
def test_add_soil_koppejan(self):
dm = DSettlementModel()
# TODO adding soils is too complex
# should be something like
# Soil(
# soilcp = 100.,
# soilcp1 = 10.,
# etc.
# )
soil_input = Soil(name="MyNewSoil")
soil_input.soil_classification_parameters = SoilClassificationParameters()
soil_input.soil_weight_parameters = soil_external.SoilWeightParameters()
soil_input.soil_weight_parameters.saturated_weight = (
soil_external.StochasticParameter(mean=20)
)
soil_input.soil_weight_parameters.unsaturated_weight = (
soil_external.StochasticParameter(mean=30)
)
soil_input.soil_classification_parameters.initial_void_ratio = (
soil_external.StochasticParameter(mean=0.1)
)
soil_input.koppejan_parameters = soil_external.KoppejanParameters(
precon_koppejan_type=StateType.YIELD_STRESS
)
soil_input.soil_state = soil_external.SoilState(
use_equivalent_age=True, equivalent_age=2
)
soil_input.koppejan_parameters.preconsolidation_pressure = (
soil_external.StochasticParameter(mean=10)
)
dm.add_soil(soil_input)
path = self.outputdir / "test_add_soil_koppejan.sli"
dm.serialize(path)
@pytest.mark.systemtest
def test_add_simple_geometry(self):
dm = DSettlementModel()
for soil in self.soils:
dm.add_soil(soil)
pl_id = dm.add_head_line(
points=[self.points[12], self.points[13]], is_phreatic=True
)
b1 = dm.add_boundary(points=[self.points[10], self.points[11]])
b2 = dm.add_boundary(points=[self.points[8], self.points[9]])
b3 = dm.add_boundary(points=[self.points[6], self.points[7]])
b4 = dm.add_boundary(
points=[self.points[0], self.points[1], self.points[4], self.points[5]]
)
b5 = dm.add_boundary(
points=[
self.points[0],
self.points[1],
self.points[2],
self.points[3],
self.points[4],
self.points[5],
]
)
l1 = dm.add_layer(
material_name="Sand",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b2,
boundary_bottom=b1,
)
l2 = dm.add_layer(
material_name="Clay",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b3,
boundary_bottom=b2,
)
l3 = dm.add_layer(
material_name="Peat",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b4,
boundary_bottom=b3,
)
l4 = dm.add_layer(
material_name="Embankement",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b5,
boundary_bottom=b4,
)
path = self.outputdir / "test_simple_geometry.sli"
dm.serialize(path)
@pytest.mark.systemtest
def test_add_headlines(self):
dm = DSettlementModel()
for soil in self.soils:
dm.add_soil(soil)
pl_id = dm.add_head_line(
points=[self.points[12], self.points[13]], is_phreatic=True
)
hl_id = dm.add_head_line(
points=[self.points[14], self.points[15]], is_phreatic=False
)
b1 = dm.add_boundary(points=[self.points[10], self.points[11]])
b2 = dm.add_boundary(points=[self.points[8], self.points[9]])
b3 = dm.add_boundary(points=[self.points[6], self.points[7]])
b4 = dm.add_boundary(
points=[self.points[0], self.points[1], self.points[4], self.points[5]]
)
b5 = dm.add_boundary(
points=[
self.points[0],
self.points[1],
self.points[2],
self.points[3],
self.points[4],
self.points[5],
]
)
l1 = dm.add_layer(
material_name="Sand",
head_line_top=hl_id,
head_line_bottom=hl_id,
boundary_top=b2,
boundary_bottom=b1,
)
l2 = dm.add_layer(
material_name="Clay",
head_line_top=99,
head_line_bottom=hl_id,
boundary_top=b3,
boundary_bottom=b2,
)
l3 = dm.add_layer(
material_name="Peat",
head_line_top=pl_id,
head_line_bottom=99,
boundary_top=b4,
boundary_bottom=b3,
)
l4 = dm.add_layer(
material_name="Embankement",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b5,
boundary_bottom=b4,
)
path = self.outputdir / "test_headlines.sli"
dm.serialize(path)
@pytest.mark.systemtest
def test_add_load(self):
dm = DSettlementModel()
for soil in self.soils:
dm.add_soil(soil)
pl_id = dm.add_head_line(
points=[self.points[12], self.points[13]], is_phreatic=True
)
b1 = dm.add_boundary(points=[self.points[10], self.points[11]])
b2 = dm.add_boundary(points=[self.points[8], self.points[9]])
b3 = dm.add_boundary(points=[self.points[6], self.points[7]])
b4 = dm.add_boundary(
points=[self.points[0], self.points[1], self.points[4], self.points[5]]
)
b5 = dm.add_boundary(
points=[
self.points[0],
self.points[1],
self.points[2],
self.points[3],
self.points[4],
self.points[5],
]
)
l1 = dm.add_layer(
material_name="Sand",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b2,
boundary_bottom=b1,
)
l2 = dm.add_layer(
material_name="Clay",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b3,
boundary_bottom=b2,
)
l3 = dm.add_layer(
material_name="Peat",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b4,
boundary_bottom=b3,
)
l4 = dm.add_layer(
material_name="Embankement",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b5,
boundary_bottom=b4,
)
dm.add_non_uniform_load(
"traffic",
points=[self.points[2], Point(x=1, z=3), Point(x=9, z=3), self.points[3]],
gamma_wet=25.0,
gamma_dry=25.0,
time_start=timedelta(days=0),
time_end=timedelta(days=1000),
)
path = self.outputdir / "test_add_load.sli"
dm.serialize(path)
@pytest.mark.systemtest
def test_add_verticals(self):
dm = DSettlementModel()
for soil in self.soils:
dm.add_soil(soil)
pl_id = dm.add_head_line(
points=[self.points[12], self.points[13]], is_phreatic=True
)
b1 = dm.add_boundary(points=[self.points[10], self.points[11]])
b2 = dm.add_boundary(points=[self.points[8], self.points[9]])
b3 = dm.add_boundary(points=[self.points[6], self.points[7]])
b4 = dm.add_boundary(
points=[self.points[0], self.points[1], self.points[4], self.points[5]]
)
b5 = dm.add_boundary(
points=[
self.points[0],
self.points[1],
self.points[2],
self.points[3],
self.points[4],
self.points[5],
]
)
l1 = dm.add_layer(
material_name="Sand",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b2,
boundary_bottom=b1,
)
l2 = dm.add_layer(
material_name="Clay",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b3,
boundary_bottom=b2,
)
l3 = dm.add_layer(
material_name="Peat",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b4,
boundary_bottom=b3,
)
l4 = dm.add_layer(
material_name="Embankement",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b5,
boundary_bottom=b4,
)
dm.set_verticals(locations=[Point(x=-10), Point(x=0), Point(x=10)])
path = self.outputdir / "test_set_verticals.sli"
dm.serialize(path)
@pytest.mark.systemtest
def test_set_model(self):
# koppejan, natural strain, darcy, vertical drains
dm = DSettlementModel()
dm.set_model(
constitutive_model=SoilModel.NEN_KOPPEJAN,
consolidation_model=ConsolidationModel.DARCY,
is_vertical_drain=True,
strain_type=StrainType.NATURAL,
is_two_dimensional=True,
is_fit_for_settlement_plate=False,
is_probabilistic=False,
is_horizontal_displacements=False,
is_secondary_swelling=True, # TODO document this parameter
is_waspan=False, # TODO document this parameter
)
path = self.outputdir / "test_set_model.sli"
dm.serialize(path)
@pytest.mark.systemtest
def test_set_residualtimes(self):
# koppejan, natural strain, darcy, vertical drains
dm = DSettlementModel()
dm.set_calculation_times(
time_steps=[timedelta(days=d) for d in [10, 100, 1000, 2000, 3000, 4000]]
)
path = self.outputdir / "test_set_residualtimes.sli"
dm.serialize(path)
@pytest.mark.systemtest
def test_layerload(self):
dm = DSettlementModel()
for soil in self.soils:
dm.add_soil(soil)
pl_id = dm.add_head_line(
points=[self.points[12], self.points[13]], is_phreatic=True
)
b1 = dm.add_boundary(points=[self.points[10], self.points[11]])
b2 = dm.add_boundary(points=[self.points[8], self.points[9]])
b3 = dm.add_boundary(points=[self.points[6], self.points[7]])
b4 = dm.add_boundary(
points=[self.points[0], self.points[1], self.points[4], self.points[5]]
)
b5 = dm.add_boundary(
points=[
self.points[0],
self.points[1],
self.points[2],
self.points[3],
self.points[4],
self.points[5],
]
)
l1 = dm.add_layer(
material_name="Sand",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b2,
boundary_bottom=b1,
)
l2 = dm.add_layer(
material_name="Clay",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b3,
boundary_bottom=b2,
)
l3 = dm.add_layer(
material_name="Peat",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b4,
boundary_bottom=b3,
)
l4 = dm.add_layer(
material_name="Embankement",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b5,
boundary_bottom=b4,
)
path = self.outputdir / "test_layerload.sli"
dm.serialize(path)
@pytest.mark.systemtest
def test_other_load(self):
dm = DSettlementModel()
for soil in self.soils:
dm.add_soil(soil)
pl_id = dm.add_head_line(
points=[self.points[12], self.points[13]], is_phreatic=True
)
b1 = dm.add_boundary(points=[self.points[10], self.points[11]])
b2 = dm.add_boundary(points=[self.points[8], self.points[9]])
b3 = dm.add_boundary(points=[self.points[6], self.points[7]])
b4 = dm.add_boundary(
points=[self.points[0], self.points[1], self.points[4], self.points[5]]
)
b5 = dm.add_boundary(
points=[
self.points[0],
self.points[1],
self.points[2],
self.points[3],
self.points[4],
self.points[5],
]
)
l1 = dm.add_layer(
material_name="Sand",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b2,
boundary_bottom=b1,
)
l2 = dm.add_layer(
material_name="Clay",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b3,
boundary_bottom=b2,
)
l3 = dm.add_layer(
material_name="Peat",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b4,
boundary_bottom=b3,
)
l4 = dm.add_layer(
material_name="Embankement",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b5,
boundary_bottom=b4,
)
dm.add_other_load(
name="rectangle",
time=timedelta(days=100),
point=Point(x=5.0, y=10.0, z=2.0),
other_load=RectangularLoad(weight=25, alpha=0, xwidth=5.0, zwidth=10.0),
)
path = self.outputdir / "test_other_load.sli"
dm.serialize(path)
@pytest.mark.acceptance
@pytest.mark.xfail # Wrong soils for now
@only_teamcity
def test_sorting_vertical_layer_boundaries(self):
"""
Test sorting boundaries with 2 vertical layer boundaries
Returns:
"""
points = [
Point(x=-50, z=-10), # 0
Point(x=50, z=-10), # 1
Point(x=-50, z=0.0), # 2
Point(x=0, z=0.0), # 3
Point(x=0.0, z=-10.0), # 4
Point(x=-50, z=-20), # 5
Point(x=50, z=-20), # 6
Point(x=50, z=0.0), # 7
]
dm = DSettlementModel()
for soil in self.soils:
dm.add_soil(soil)
pl_id = dm.add_head_line(points=[points[0], points[1]], is_phreatic=True)
b1 = dm.add_boundary(points=[points[0], points[4], points[1]])
b2 = dm.add_boundary(points=[points[2], points[3], points[7]])
b3 = dm.add_boundary(points=[points[0], points[4], points[3], points[7]])
b4 = dm.add_boundary(points=[points[5], points[6]])
l1 = dm.add_layer(
material_name="Sand",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b1,
boundary_bottom=b4,
)
l2 = dm.add_layer(
material_name="Clay",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b3,
boundary_bottom=b1,
)
l3 = dm.add_layer(
material_name="peat",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b2,
boundary_bottom=b3,
)
dm.set_verticals(locations=[Point(x=-10), Point(x=0), Point(x=10)])
dm.add_other_load(
name="rectangle",
time=timedelta(days=100),
point=Point(x=-5.0, y=-5.0, z=0.0),
other_load=RectangularLoad(weight=25, alpha=0, xwidth=5.0, zwidth=10.0),
)
# For manual checks
path = self.outputdir / "test_sort_vertical_layer_boundaries.sli"
dm.serialize(path)
# Verify geometry is correct and we can parse output
dm.execute() # will raise on execution error
assert dm.datastructure
| 30.443411 | 86 | 0.544052 | import logging
import os
import pathlib
from datetime import timedelta
from pathlib import Path
from typing import List
from warnings import warn
import pydantic
import pytest
from pydantic.color import Color
from teamcity import is_running_under_teamcity
import geolib.models.dsettlement.loads as loads
import geolib.soils as soil_external
from geolib.geometry.one import Point
from geolib.models import BaseModel
from geolib.models.dsettlement.dsettlement_model import DSettlementModel
from geolib.models.dsettlement.internal import (
Bool,
Boundaries,
Boundary,
ConsolidationModel,
Curve,
Curves,
Dimension,
DispersionConditionLayerBoundary,
DSeriePoint,
DSettlementStructure,
GeometryData,
Layer,
Layers,
Model,
Points,
PreconPressureWithinLayer,
SoilModel,
StrainType,
StressDistributionLoads,
StressDistributionSoil,
Version,
)
from geolib.models.dsettlement.loads import RectangularLoad
from geolib.soils import (
IsotacheParameters,
Soil,
SoilClassificationParameters,
SoilWeightParameters,
StateType,
)
from tests.utils import TestUtils, only_teamcity
class TestDSettlementAcceptance:
def setup_class(self):
self.soils = [
Soil(
name="Sand",
soil_weight_parameters=SoilWeightParameters(
saturated_weight=19.0, unsaturated_weight=17.0
),
),
Soil(
name="Peat",
soil_weight_parameters=SoilWeightParameters(
saturated_weight=10.0, unsaturated_weight=10.0
),
),
Soil(
name="Clay",
soil_weight_parameters=SoilWeightParameters(
saturated_weight=14.0, unsaturated_weight=14.0
),
),
Soil(
name="Embankement",
soil_weight_parameters=SoilWeightParameters(
saturated_weight=16.0, unsaturated_weight=16.0
),
),
]
self.points = [
Point(x=-50, z=0.0),
Point(x=-10, z=0.0),
Point(x=0, z=2),
Point(x=10, z=2),
Point(x=30, z=0.0),
Point(x=50, z=0.0),
Point(x=-50, z=-5),
Point(x=50, z=-5),
Point(x=-50, z=-10),
Point(x=50, z=-10),
Point(x=-50, z=-20),
Point(x=50, z=-20),
Point(x=-50, z=-2),
Point(x=50, z=-2),
Point(x=-50, z=1),
Point(x=50, z=1),
]
dm = DSettlementModel()
self.outputdir = Path(
TestUtils.get_output_test_data_dir("dsettlement/acceptancetest/")
)
self.inputfile = Path(
TestUtils.get_test_data_dir("test_data/dsettlement", "2dgeom_with10.sld")
)
@pytest.mark.systemtest
def test_dsettlement_empty(self):
dm = DSettlementModel()
path = self.outputdir / "test_empty.sli"
dm.serialize(path)
@pytest.mark.systemtest
def test_add_soils(self):
dm = DSettlementModel()
for soil in self.soils:
dm.add_soil(soil)
path = self.outputdir / "test_add_soils.sli"
dm.serialize(path)
@pytest.mark.systemtest
def test_add_soil_koppejan(self):
dm = DSettlementModel()
soil_input = Soil(name="MyNewSoil")
soil_input.soil_classification_parameters = SoilClassificationParameters()
soil_input.soil_weight_parameters = soil_external.SoilWeightParameters()
soil_input.soil_weight_parameters.saturated_weight = (
soil_external.StochasticParameter(mean=20)
)
soil_input.soil_weight_parameters.unsaturated_weight = (
soil_external.StochasticParameter(mean=30)
)
soil_input.soil_classification_parameters.initial_void_ratio = (
soil_external.StochasticParameter(mean=0.1)
)
soil_input.koppejan_parameters = soil_external.KoppejanParameters(
precon_koppejan_type=StateType.YIELD_STRESS
)
soil_input.soil_state = soil_external.SoilState(
use_equivalent_age=True, equivalent_age=2
)
soil_input.koppejan_parameters.preconsolidation_pressure = (
soil_external.StochasticParameter(mean=10)
)
dm.add_soil(soil_input)
path = self.outputdir / "test_add_soil_koppejan.sli"
dm.serialize(path)
@pytest.mark.systemtest
def test_add_simple_geometry(self):
dm = DSettlementModel()
for soil in self.soils:
dm.add_soil(soil)
pl_id = dm.add_head_line(
points=[self.points[12], self.points[13]], is_phreatic=True
)
b1 = dm.add_boundary(points=[self.points[10], self.points[11]])
b2 = dm.add_boundary(points=[self.points[8], self.points[9]])
b3 = dm.add_boundary(points=[self.points[6], self.points[7]])
b4 = dm.add_boundary(
points=[self.points[0], self.points[1], self.points[4], self.points[5]]
)
b5 = dm.add_boundary(
points=[
self.points[0],
self.points[1],
self.points[2],
self.points[3],
self.points[4],
self.points[5],
]
)
l1 = dm.add_layer(
material_name="Sand",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b2,
boundary_bottom=b1,
)
l2 = dm.add_layer(
material_name="Clay",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b3,
boundary_bottom=b2,
)
l3 = dm.add_layer(
material_name="Peat",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b4,
boundary_bottom=b3,
)
l4 = dm.add_layer(
material_name="Embankement",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b5,
boundary_bottom=b4,
)
path = self.outputdir / "test_simple_geometry.sli"
dm.serialize(path)
@pytest.mark.systemtest
def test_add_headlines(self):
dm = DSettlementModel()
for soil in self.soils:
dm.add_soil(soil)
pl_id = dm.add_head_line(
points=[self.points[12], self.points[13]], is_phreatic=True
)
hl_id = dm.add_head_line(
points=[self.points[14], self.points[15]], is_phreatic=False
)
b1 = dm.add_boundary(points=[self.points[10], self.points[11]])
b2 = dm.add_boundary(points=[self.points[8], self.points[9]])
b3 = dm.add_boundary(points=[self.points[6], self.points[7]])
b4 = dm.add_boundary(
points=[self.points[0], self.points[1], self.points[4], self.points[5]]
)
b5 = dm.add_boundary(
points=[
self.points[0],
self.points[1],
self.points[2],
self.points[3],
self.points[4],
self.points[5],
]
)
l1 = dm.add_layer(
material_name="Sand",
head_line_top=hl_id,
head_line_bottom=hl_id,
boundary_top=b2,
boundary_bottom=b1,
)
l2 = dm.add_layer(
material_name="Clay",
head_line_top=99,
head_line_bottom=hl_id,
boundary_top=b3,
boundary_bottom=b2,
)
l3 = dm.add_layer(
material_name="Peat",
head_line_top=pl_id,
head_line_bottom=99,
boundary_top=b4,
boundary_bottom=b3,
)
l4 = dm.add_layer(
material_name="Embankement",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b5,
boundary_bottom=b4,
)
path = self.outputdir / "test_headlines.sli"
dm.serialize(path)
@pytest.mark.systemtest
def test_add_load(self):
dm = DSettlementModel()
for soil in self.soils:
dm.add_soil(soil)
pl_id = dm.add_head_line(
points=[self.points[12], self.points[13]], is_phreatic=True
)
b1 = dm.add_boundary(points=[self.points[10], self.points[11]])
b2 = dm.add_boundary(points=[self.points[8], self.points[9]])
b3 = dm.add_boundary(points=[self.points[6], self.points[7]])
b4 = dm.add_boundary(
points=[self.points[0], self.points[1], self.points[4], self.points[5]]
)
b5 = dm.add_boundary(
points=[
self.points[0],
self.points[1],
self.points[2],
self.points[3],
self.points[4],
self.points[5],
]
)
l1 = dm.add_layer(
material_name="Sand",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b2,
boundary_bottom=b1,
)
l2 = dm.add_layer(
material_name="Clay",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b3,
boundary_bottom=b2,
)
l3 = dm.add_layer(
material_name="Peat",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b4,
boundary_bottom=b3,
)
l4 = dm.add_layer(
material_name="Embankement",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b5,
boundary_bottom=b4,
)
dm.add_non_uniform_load(
"traffic",
points=[self.points[2], Point(x=1, z=3), Point(x=9, z=3), self.points[3]],
gamma_wet=25.0,
gamma_dry=25.0,
time_start=timedelta(days=0),
time_end=timedelta(days=1000),
)
path = self.outputdir / "test_add_load.sli"
dm.serialize(path)
@pytest.mark.systemtest
def test_add_verticals(self):
dm = DSettlementModel()
for soil in self.soils:
dm.add_soil(soil)
pl_id = dm.add_head_line(
points=[self.points[12], self.points[13]], is_phreatic=True
)
b1 = dm.add_boundary(points=[self.points[10], self.points[11]])
b2 = dm.add_boundary(points=[self.points[8], self.points[9]])
b3 = dm.add_boundary(points=[self.points[6], self.points[7]])
b4 = dm.add_boundary(
points=[self.points[0], self.points[1], self.points[4], self.points[5]]
)
b5 = dm.add_boundary(
points=[
self.points[0],
self.points[1],
self.points[2],
self.points[3],
self.points[4],
self.points[5],
]
)
l1 = dm.add_layer(
material_name="Sand",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b2,
boundary_bottom=b1,
)
l2 = dm.add_layer(
material_name="Clay",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b3,
boundary_bottom=b2,
)
l3 = dm.add_layer(
material_name="Peat",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b4,
boundary_bottom=b3,
)
l4 = dm.add_layer(
material_name="Embankement",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b5,
boundary_bottom=b4,
)
dm.set_verticals(locations=[Point(x=-10), Point(x=0), Point(x=10)])
path = self.outputdir / "test_set_verticals.sli"
dm.serialize(path)
@pytest.mark.systemtest
def test_set_model(self):
dm = DSettlementModel()
dm.set_model(
constitutive_model=SoilModel.NEN_KOPPEJAN,
consolidation_model=ConsolidationModel.DARCY,
is_vertical_drain=True,
strain_type=StrainType.NATURAL,
is_two_dimensional=True,
is_fit_for_settlement_plate=False,
is_probabilistic=False,
is_horizontal_displacements=False,
is_secondary_swelling=True,
is_waspan=False,
)
path = self.outputdir / "test_set_model.sli"
dm.serialize(path)
@pytest.mark.systemtest
def test_set_residualtimes(self):
dm = DSettlementModel()
dm.set_calculation_times(
time_steps=[timedelta(days=d) for d in [10, 100, 1000, 2000, 3000, 4000]]
)
path = self.outputdir / "test_set_residualtimes.sli"
dm.serialize(path)
@pytest.mark.systemtest
def test_layerload(self):
dm = DSettlementModel()
for soil in self.soils:
dm.add_soil(soil)
pl_id = dm.add_head_line(
points=[self.points[12], self.points[13]], is_phreatic=True
)
b1 = dm.add_boundary(points=[self.points[10], self.points[11]])
b2 = dm.add_boundary(points=[self.points[8], self.points[9]])
b3 = dm.add_boundary(points=[self.points[6], self.points[7]])
b4 = dm.add_boundary(
points=[self.points[0], self.points[1], self.points[4], self.points[5]]
)
b5 = dm.add_boundary(
points=[
self.points[0],
self.points[1],
self.points[2],
self.points[3],
self.points[4],
self.points[5],
]
)
l1 = dm.add_layer(
material_name="Sand",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b2,
boundary_bottom=b1,
)
l2 = dm.add_layer(
material_name="Clay",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b3,
boundary_bottom=b2,
)
l3 = dm.add_layer(
material_name="Peat",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b4,
boundary_bottom=b3,
)
l4 = dm.add_layer(
material_name="Embankement",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b5,
boundary_bottom=b4,
)
path = self.outputdir / "test_layerload.sli"
dm.serialize(path)
@pytest.mark.systemtest
def test_other_load(self):
dm = DSettlementModel()
for soil in self.soils:
dm.add_soil(soil)
pl_id = dm.add_head_line(
points=[self.points[12], self.points[13]], is_phreatic=True
)
b1 = dm.add_boundary(points=[self.points[10], self.points[11]])
b2 = dm.add_boundary(points=[self.points[8], self.points[9]])
b3 = dm.add_boundary(points=[self.points[6], self.points[7]])
b4 = dm.add_boundary(
points=[self.points[0], self.points[1], self.points[4], self.points[5]]
)
b5 = dm.add_boundary(
points=[
self.points[0],
self.points[1],
self.points[2],
self.points[3],
self.points[4],
self.points[5],
]
)
l1 = dm.add_layer(
material_name="Sand",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b2,
boundary_bottom=b1,
)
l2 = dm.add_layer(
material_name="Clay",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b3,
boundary_bottom=b2,
)
l3 = dm.add_layer(
material_name="Peat",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b4,
boundary_bottom=b3,
)
l4 = dm.add_layer(
material_name="Embankement",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b5,
boundary_bottom=b4,
)
dm.add_other_load(
name="rectangle",
time=timedelta(days=100),
point=Point(x=5.0, y=10.0, z=2.0),
other_load=RectangularLoad(weight=25, alpha=0, xwidth=5.0, zwidth=10.0),
)
path = self.outputdir / "test_other_load.sli"
dm.serialize(path)
@pytest.mark.acceptance
@pytest.mark.xfail
@only_teamcity
def test_sorting_vertical_layer_boundaries(self):
points = [
Point(x=-50, z=-10),
Point(x=50, z=-10),
Point(x=-50, z=0.0),
Point(x=0, z=0.0),
Point(x=0.0, z=-10.0),
Point(x=-50, z=-20),
Point(x=50, z=-20),
Point(x=50, z=0.0),
]
dm = DSettlementModel()
for soil in self.soils:
dm.add_soil(soil)
pl_id = dm.add_head_line(points=[points[0], points[1]], is_phreatic=True)
b1 = dm.add_boundary(points=[points[0], points[4], points[1]])
b2 = dm.add_boundary(points=[points[2], points[3], points[7]])
b3 = dm.add_boundary(points=[points[0], points[4], points[3], points[7]])
b4 = dm.add_boundary(points=[points[5], points[6]])
l1 = dm.add_layer(
material_name="Sand",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b1,
boundary_bottom=b4,
)
l2 = dm.add_layer(
material_name="Clay",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b3,
boundary_bottom=b1,
)
l3 = dm.add_layer(
material_name="peat",
head_line_top=pl_id,
head_line_bottom=pl_id,
boundary_top=b2,
boundary_bottom=b3,
)
dm.set_verticals(locations=[Point(x=-10), Point(x=0), Point(x=10)])
dm.add_other_load(
name="rectangle",
time=timedelta(days=100),
point=Point(x=-5.0, y=-5.0, z=0.0),
other_load=RectangularLoad(weight=25, alpha=0, xwidth=5.0, zwidth=10.0),
)
path = self.outputdir / "test_sort_vertical_layer_boundaries.sli"
dm.serialize(path)
dm.execute()
assert dm.datastructure
| true | true |
f7214283b1a050b951ccaeb5b99108ab85e04e6d | 48,023 | py | Python | authortitle/views.py | MLGB3/mysite | 433f245918cfc85f3d42b51e7405ae101160d3cd | [
"Apache-2.0"
] | null | null | null | authortitle/views.py | MLGB3/mysite | 433f245918cfc85f3d42b51e7405ae101160d3cd | [
"Apache-2.0"
] | null | null | null | authortitle/views.py | MLGB3/mysite | 433f245918cfc85f3d42b51e7405ae101160d3cd | [
"Apache-2.0"
] | null | null | null | """
# Setup script for index based on Richard Sharpe's List of Identifications
"""
#--------------------------------------------------------------------------------
import math
from django.template import Context, loader
from django.http import HttpResponse,Http404,HttpResponseRedirect
from django.shortcuts import get_object_or_404, render_to_response
from django.core.urlresolvers import reverse
from django.utils.html import escape
from django.db import connection
from urllib import quote, unquote
from mysite.config import *
from mysite.MLGBsolr import *
import mysite.mlgb.views as mv
#--------------------------------------------------------------------------------
solr_query = '' # for debugging
printing = False
editable = False
baseurl="/authortitle"
medieval_catalogues_url = "/authortitle/medieval_catalogues"
mlgb_book_url = '/mlgb/book'
default_order_by = "solr_id_sort"
catalogue_provenance_sort_list = [ "s_library_type asc",
"s_library_loc asc",
"s_document_code_sort asc",
"s_seqno_in_doc_sort asc",
"s_copy_code asc",
"solr_id_sort asc" ]
catalogue_date_sort_list = [ "d_document_start asc",
"d_document_end asc",
"s_library_type asc",
"s_library_loc asc",
"s_document_code_sort asc",
"s_seqno_in_doc_sort asc",
"s_copy_code asc",
"solr_id_sort asc" ]
searchable_fields = [
{ "fieldname": "text", "label": "All fields", "info": "", "value": "" },
{ "fieldname": "t_author", "label": "Author", "info": "", "value": "" },
{ "fieldname": "t_title", "label": "Title of book", "info": "", "value": "" },
{ "fieldname": "t_bibliography", "label": "Bibliographical details", "info": "", "value": "" },
{ "fieldname": "t_library", "label": "Catalogue provenance",
"info": "E.g. 'Benedictines Peterborough' or 'Henry de Kirkestede'", "value": "" },
{ "fieldname": "t_document", "label": "Description of document", "value": "", "info":
"E.g. 'Books read in the refectory, 13th cent'." \
+ " Description includes either an indication of document date or the word 'undated'." },
{ "fieldname": "s_document_type", "label": "Type of document", "value": "", "info": "" },
# The next 2 fields do not map directly to ones in the Solr index.
# We'll use them to query on s_document_start/end_year
{ "fieldname": "q_earliest_year", "label": "Start of required date range", "value": "",
"info": "Enter the earliest year that you are interested in, e.g. 1400." },
{ "fieldname": "q_latest_year", "label": "End of required date range", "value": "",
"info": "Enter the latest year that you are interested in, e.g. 1499." },
]
facet = False
newline = '\n'
carriage_return = '\r'
right_arrow = '→'
biblio_block_line_length = 100
#================= Top-level functions, called directly from URL ================
#--------------------------------------------------------------------------------
# The function browse() allows browsing of the index by author/title
def browse( request, letter = '', pagename = 'index', called_by_editable_page = False ): #{
global searchable_fields # this is used in advanced search
for field in searchable_fields:
field[ "value" ] = "" # re-initialise every field to blank so that old searches don't hang around
# The call to 'enable edit' is just so that they can (eventually) navigate from the main site to
# the index and then back again, without losing their 'editability' setting on the way.
if called_by_editable_page: enable_edit()
else: disable_edit()
global printing # are we about to print this page, or view it in onscreen mode?
printing = False
printing = mv.get_value_from_GET( request, "printing", False )
if letter != '' and not letter.isalpha(): letter = 'A'
letter = letter.upper()
# Get a list of document types for a dropdown list
doctypes = get_doctype_dropdown_options()
# For now, just for testing, let's use a hard-coded block of HTML, generated by writeHTML.py.
# This may need changing later.
t = loader.get_template('authortitle/index%s.html' % letter )
c = Context( {
'pagename' : pagename,
'editable' : editable,
'letter' : letter,
'printing' : printing,
'print_link' : mv.get_link_for_print_button( request ),
'called_by_collapsible_page': True,
'default_rows_per_page': mv.default_rows_per_page,
'advanced_search_fields': searchable_fields,
'doctype_dropdown_options': doctypes,
} )
return HttpResponse( t.render( c ) )
#}
# end function browse()
#--------------------------------------------------------------------------------
def browse_e( request, letter = '', pagename = 'index' ): #{
return browse( request, letter, pagename, True )
#}
#--------------------------------------------------------------------------------
# The function medieval_catalogues() allows browsing of the index by medieval document
def medieval_catalogues( request, cat = '', pagename = 'cats', called_by_editable_page = False ): #{
global searchable_fields # this is used in advanced search
for field in searchable_fields:
field[ "value" ] = "" # re-initialise every field to blank so that old searches don't hang around
# The call to 'enable edit' is just so that they can (eventually) navigate from the main site to
# the index and then back again, without losing their 'editability' setting on the way.
if called_by_editable_page: enable_edit()
else: disable_edit()
global printing # are we about to print this page, or view it in onscreen mode?
printing = False
printing = mv.get_value_from_GET( request, "printing", False )
sort_by_date = False
display_decodes = False
if not cat.isalnum(): #{
cat = ''
#}
elif cat == 'bydate': #{
cat = ''
sort_by_date = True
#}
elif cat == 'decode': #{
cat = ''
display_decodes = True
#}
else:
cat = cat.upper()
called_by_collapsible_page = False
# Get a list of document types for a dropdown list
doctypes = get_doctype_dropdown_options()
# For now, just for testing, let's use a hard-coded block of HTML,
# generated by cataloguesHTML.py. This may need changing later.
if cat:
t = loader.get_template('authortitle/catalogue%s.html' % cat )
elif sort_by_date:
t = loader.get_template('authortitle/cataloguelistbydate.html' )
elif display_decodes:
t = loader.get_template('authortitle/decode.html' )
else: #{
called_by_collapsible_page = True
t = loader.get_template('authortitle/cataloguelist.html' )
#}
c = Context( {
'pagename' : pagename,
'editable' : editable,
'cat' : cat,
'printing' : printing,
'print_link': mv.get_link_for_print_button( request ),
'called_by_collapsible_page': called_by_collapsible_page,
'default_rows_per_page': mv.default_rows_per_page,
'advanced_search_fields': searchable_fields,
'doctype_dropdown_options': doctypes,
} )
return HttpResponse( t.render( c ) )
#}
# end function medieval_catalogues()
#--------------------------------------------------------------------------------
def medieval_catalogues_e( request, cat = '', pagename = 'cats' ): #{
return medieval_catalogues( request, cat, pagename, True )
#}
#--------------------------------------------------------------------------------
# The function cat_source() allows browsing of the index by source of medieval catalogue.
# The primary source is the type of institution (document group type), e.g. A for Augustinian Canons.
# You can also browse one location within an institution type (document group type/document group ID),
# e.g. /A/15/ for the Augustinian location 'Lanthony', which has document group ID 15.
def cat_source( request, source = '', loc = '', pagename = 'cats', called_by_editable_page = False ): #{
global searchable_fields # this is used in advanced search
for field in searchable_fields:
field[ "value" ] = "" # re-initialise every field to blank so that old searches don't hang around
if called_by_editable_page: enable_edit()
else: disable_edit()
global printing # are we about to print this page, or view it in onscreen mode?
printing = False
printing = mv.get_value_from_GET( request, "printing", False )
if not source.isalpha(): #{
source = ''
loc = ''
#}
else:
source = source.upper()
if not loc.isalnum(): loc = ''
full_source = source
if loc: full_source += '-%s' % loc.lower()
# Get a list of document types for a dropdown list
doctypes = get_doctype_dropdown_options()
# For now, just for testing, let's use a hard-coded block of HTML,
# generated by cataloguesHTML.py. This may need changing later.
t = loader.get_template('authortitle/cataloguelist%s.html' % full_source )
c = Context( {
'pagename' : pagename,
'editable' : editable,
'source' : source,
'location' : loc,
'printing' : printing,
'print_link': mv.get_link_for_print_button( request ),
'default_rows_per_page': mv.default_rows_per_page,
'advanced_search_fields': searchable_fields,
'doctype_dropdown_options': doctypes,
} )
return HttpResponse( t.render( c ) )
#}
# end function cat_source()
#--------------------------------------------------------------------------------
def cat_source_e( request, source = '', loc = '', pagename = 'cats' ): #{
return cat_source( request, source, loc, pagename, True )
#}
#--------------------------------------------------------------------------------
# The function results() is called either from Quick Search or from Advanced Search
def results( request, pagename = 'results', called_by_editable_page = False ): #{
# Set editability status
if called_by_editable_page: enable_edit()
else: disable_edit()
# Set printing status
global printing
printing = False
printing = mv.get_value_from_GET( request, "printing", False )
# See if you are doing quick or advanced search
search_type = mv.get_value_from_GET( request, "search_type", "quick" )
# Run the Solr query
(resultsets, number_of_records, search_term, \
solr_start, solr_rows, page_size ) = run_solr_query( request )
mv.printing = printing
pag = mv.pagination( rows_found = number_of_records, \
current_row = solr_start, \
rows_per_page = solr_rows, \
link_for_print_button = mv.get_link_for_print_button( request ),
link_for_download_button = mv.get_link_for_download_button( request ) )
# Format the results into an HTML string ready for display
order_by = mv.get_value_from_GET( request, "order_by", default_order_by )
result_string = get_result_string( resultsets, order_by )
result_string = pag + newline + '<p></p>' + newline + result_string
if number_of_records > solr_rows: # repeat pagination at the bottom
result_string += newline + '<p></p>' + newline + pag
# Get a list of document types for a dropdown list
doctypes = get_doctype_dropdown_options()
# Pass HTML string and other data to the template for display
t = loader.get_template( 'authortitle/results.html' )
c = Context( {
'pagename' : pagename,
'editable' : editable,
'results' : result_string,
'order_by' : order_by,
'printing' : printing,
'print_link' : mv.get_link_for_print_button( request ),
'default_rows_per_page': mv.default_rows_per_page,
'number_of_records': number_of_records,
'search_type': search_type,
'search_term': search_term,
'advanced_search_fields': searchable_fields,
'doctype_dropdown_options': doctypes,
'solr_query': solr_query,
} )
return HttpResponse( t.render( c ) )
#}
# end function results()
#--------------------------------------------------------------------------------
def results_e( request, pagename = 'results' ): #{
return results( request, pagename, True )
#}
#--------------------------------------------------------------------------------
#================ End top-level functions called directly from URL ==============
#--------------------------------------------------------------------------------
## This changes links to exclude the 'editable' part of the URL
def disable_edit(): #{
global editable
editable = False
global baseurl
baseurl = '/authortitle'
#}
#--------------------------------------------------------------------------------
## This changes links to include the 'editable' part of the URL
def enable_edit(): #{
global editable
editable = True
global baseurl
baseurl = '/e/authortitle'
#}
#--------------------------------------------------------------------------------
# Either run a basic Solr query (i.e. on a single search term) against default field of 'catalogues' core
# Or run an *advanced* Solr query (i.e. on a multiple search terms)
def run_solr_query( request ): #{
global solr_query # for debugging
global searchable_fields # this is used in advanced search
for field in searchable_fields:
field[ "value" ] = "" # initialise every field value to blank
resultsets = []
number_of_records = 0
search_type = ""
search_term = solr_start = page_size = solr_query = solr_sort = solr_rows = ""
if request.GET: #{ # was a search term found in GET?
#=====================================================================
# Get search type, records per page, start row and "order by" from GET
#=====================================================================
# Set search type (quick or advanced)
search_type = mv.get_value_from_GET( request, 'search_type', 'quick' )
if search_type not in [ 'quick', 'advanced' ]: search_type = 'quick'
# Set page size
page_size = mv.get_value_from_GET( request, "page_size", str( mv.default_rows_per_page ) )
if page_size.isdigit():
solr_rows = int( page_size )
else:
solr_rows = mv.default_rows_per_page
# Set start page
solr_start = mv.get_value_from_GET( request, "start", 0 )
# Set "order by"
order_by = mv.get_value_from_GET( request, "order_by", default_order_by )
if order_by == default_order_by:
solr_sort = order_by + " asc"
elif order_by == "catalogue_provenance":
solr_sort = ",".join( catalogue_provenance_sort_list )
elif order_by == "catalogue_date":
solr_sort = ",".join( catalogue_date_sort_list )
else:
solr_sort = default_order_by + " asc"
#=====================
# Construct Solr query
#=====================
if search_type == 'quick': #{ # search on all fields via the single form field 'search_term'
search_term = mv.get_value_from_GET( request, 'search_term' )
if not search_term: search_term = '*'
solr_query = mv.escape_for_solr( search_term )
if ' ' in solr_query:
solr_query = '(%s)' % solr_query
if search_term=='*' or search_term=='':
solr_query='*:*'
else: #{
solr_query = "text:%s" % solr_query
for field in searchable_fields: #{ # store the search term in the 'text' field
fieldname = field[ "fieldname" ]
if fieldname == "text": #{
field[ "value" ] = search_term
break
#}
#}
#}
#}
else: #{ # advanced search on any combination of multiple searchable fields
fields_searched = []
for field in searchable_fields: #{
fieldname = field[ "fieldname" ]
fieldval = mv.get_value_from_GET( request, fieldname, "" )
if fieldval == '*': fieldval = ''
field[ "value" ] = fieldval
if fieldval: #{ # they entered a query on this field
if fieldname in [ "q_earliest_year", "q_latest_year" ]: #{
if fieldval.isdigit(): #{
query_clause = get_date_range_query( fieldname, fieldval )
if query_clause: fields_searched.append( query_clause )
#}
else: # non-numeric year, can't be queried on
field[ "value" ] = ""
#}
else: #{
fieldval = mv.escape_for_solr( fieldval )
if ' ' in fieldval: #{
if fieldname == 's_document_type': # string not text
fieldval = '"%s"' % fieldval
else:
fieldval = '(%s)' % fieldval
#}
fields_searched.append( "%s:%s" % (fieldname, fieldval))
#}
#}
#}
if len( fields_searched ) > 0:
solr_query = " AND ".join( fields_searched )
else: #{
solr_query='*:*'
for field in searchable_fields: #{
fieldname = field[ "fieldname" ]
if fieldname == 'text': #{
field[ "value" ] = "*"
break
#}
#}
#}
#}
#===================
# Run the Solr query
#===================
s_para={'q' : solr_query,
'wt' : s_wt, # 's_wt', i.e. 'writer type' is set in config.py, defaults to "json"
'start': solr_start,
'rows' : solr_rows,
'sort' : solr_sort}
r = MLGBsolr()
r.solrresults( s_para, facet, 'catalogues' )
if r.connstatus and r.s_result: #{ #did we retrieve a result?
resultsets = r.s_result.get( 'docs' )
number_of_records = r.s_result.get( 'numFound' )
#}
#} # end of check on whether a search term was found in GET
#===================
# Return the results
#===================
return ( resultsets, number_of_records,
search_term, solr_start, solr_rows, page_size )
#}
# end function run_solr_query()
#--------------------------------------------------------------------------------
def get_date_range_query( fieldname, fieldval ): #{
# E.g. if required range = 1420-1460:
# document start <= 1460, i.e. document START:[ * TO required END]
# document end >= 1420, i.e. document END:[ required START to *]
q = ''
if len( fieldval ) < 4: fieldval = fieldval.rjust( 4, '0' )
if fieldname == 'q_earliest_year': #{ # required START
q = 's_document_end_year:["%s" TO *]' % fieldval
#}
elif fieldname == 'q_latest_year': #{ # required END
q = 's_document_start_year:[* TO "%s"]' % fieldval
#}
return q
#}
# end function get_date_range_query()
#--------------------------------------------------------------------------------
def extract_from_result( record ): #{
solr_id = record[ "id" ]
solr_id_sort = record[ "solr_id_sort" ]
sql_entry_id = record.get( "sql_entry_id", "" )
sql_entry_book_count = record.get( "sql_entry_book_count", "" )
sql_copy_count = record.get( "sql_copy_count", "" )
# from the 'entries' table
s_entry_name = record.get( "s_entry_name", "" )
s_entry_xref_name = record.get( "s_entry_xref_name", "" )
s_author_name = record.get( "s_author_name", "" )
s_entry_biblio_line = record.get( "s_entry_biblio_line", "" )
s_entry_biblio_block = record.get( "s_entry_biblio_block", "" )
s_entry_letter = record.get( "s_entry_letter", "" )
# from the 'books' table
s_title_of_book = record.get( "s_title_of_book", "" )
s_xref_title_of_book = record.get( "s_xref_title_of_book", "" )
s_role_in_book = record.get( "s_role_in_book", "" )
s_problem = record.get( "s_problem", "" )
s_book_biblio_line = record.get( "s_book_biblio_line", "" )
# from the 'copies' table
s_copy_code = record.get( "s_copy_code", "" )
s_copy_notes = record.get( "s_copy_notes", "" )
s_printed_yn = record.get( "s_printed_yn", "" )
s_survives_yn = record.get( "s_survives_yn", "" )
s_uncertain_yn = record.get( "s_uncertain_yn", "" )
s_duplicate_title_yn = record.get( "s_duplicate_title_yn", "" )
# from the 'documents' table
# and 'document groups' table (normally institution location e.g. Canterbury)
# and 'document group types' table (institition type e.g. A for Augustinians)
s_document_code = record.get( "s_document_code", "" )
s_document_code_sort = record.get( "s_document_code_sort", "" )
s_seqno_in_document = record.get( "s_seqno_in_document", "" )
s_seqno_in_doc_sort = record.get( "s_seqno_in_doc_sort", "" )
s_document_name = record.get( "s_document_name", "" )
# these fields are for SORTING on, e.g. '12th century' has a start year of 1198
# and 'late 12th century' has a start year of 1199.
d_document_start = record.get( "d_document_start", "" )
d_document_end = record.get( "d_document_end", "" )
# these fields are for SEARCHING on or displaying
s_document_start_year = record.get( "s_document_start_year", "" )
s_document_end_year = record.get( "s_document_end_year", "" )
s_document_date_in_words = record.get( "s_document_date_in_words", "" )
s_document_type = record.get( "s_document_type", "" )
# doc_group_type_name
s_library_type = record.get( "s_library_type", "" )
# doc_group_name
s_library_loc = record.get( "s_library_loc", "" )
# doc_group_type_code
s_library_type_code = record.get( "s_library_type_code", "" )
# doc_group_id
s_library_loc_id = record.get( "s_library_loc_id", "" )
# from the 'MLGB links' table
s_mlgb_book_id = record.get( "s_mlgb_book_id", [] ) # MLGB book ID is multi-valued, just in case
return (solr_id,
solr_id_sort,
sql_entry_id,
sql_entry_book_count,
sql_copy_count,
s_entry_name,
s_entry_xref_name,
s_author_name,
s_entry_biblio_line,
s_entry_biblio_block,
s_title_of_book,
s_xref_title_of_book,
s_role_in_book,
s_problem,
s_book_biblio_line,
s_copy_code,
s_copy_notes,
s_printed_yn,
s_survives_yn,
s_uncertain_yn,
s_duplicate_title_yn,
s_document_code,
s_document_code_sort,
s_seqno_in_document,
s_seqno_in_doc_sort,
s_document_name,
d_document_start,
d_document_end,
s_document_type,
s_library_type,
s_library_loc,
s_library_type_code,
s_library_loc_id,
s_mlgb_book_id,
s_entry_letter,
s_document_start_year,
s_document_end_year,
s_document_date_in_words,
)
#}
# end function extract_from_result()
#--------------------------------------------------------------------------------
def get_result_string( results, order_by ): #{
if len( results ) == 0: return '<p></p>' + newline
if order_by == 'catalogue_provenance':
return get_result_string_by_catalogue_provenance( results )
elif order_by == 'catalogue_date':
return get_result_string_by_catalogue_date( results )
else:
return get_result_string_by_author_title( results )
#}
# end function get_result_string()
#--------------------------------------------------------------------------------
def get_result_string_by_author_title( results ): #{
html = '<ul><!-- start list of author/title entries -->' + newline
prev_entry_id = ''
prev_entry_book_count = ''
prev_title_of_book = ''
prev_copy_code = ''
for row in results: #{
new_entry = False
new_book = False
(solr_id, solr_id_sort,
sql_entry_id, sql_entry_book_count, sql_copy_count, s_entry_name, s_entry_xref_name,
s_author_name, s_entry_biblio_line, s_entry_biblio_block, s_title_of_book, s_xref_title_of_book,
s_role_in_book, s_problem, s_book_biblio_line, s_copy_code, s_copy_notes, s_printed_yn,
s_survives_yn, s_uncertain_yn, s_duplicate_title_yn, s_document_code, s_document_code_sort,
s_seqno_in_document, s_seqno_in_doc_sort, s_document_name, d_document_start, d_document_end,
s_document_type, s_library_type, s_library_loc, s_library_type_code, s_library_loc_id,
s_mlgb_book_id, s_entry_letter, s_document_start_year, s_document_end_year,
s_document_date_in_words) = extract_from_result( row )
if sql_entry_id != prev_entry_id: #{
new_entry = True
new_book = True
#}
elif sql_entry_id == prev_entry_id and sql_entry_book_count != prev_entry_book_count: #{
new_book = True
#}
if new_entry: #{
if prev_entry_id: #{
html += '</ul><!-- end catalogue entry list -->' + newline
html += '</ul><!-- end book list -->' + newline
html += '</li><!-- end author/title entry -->' + newline
#}
html += newline + '<li class="medieval_cat_result"><!-- start author/title entry -->' + newline
html += get_entry_name_and_biblio_string( solr_id, s_entry_name, s_entry_xref_name, \
s_entry_biblio_line, s_entry_biblio_block, \
sql_entry_id, s_entry_letter )
html += '<ul><!-- start book list -->' + newline
#}
if new_book: #{
prev_copy_code = ''
if not new_entry: #{
html += '</ul><!-- end catalogue entry list -->' + newline
if prev_title_of_book: html += '</li><!-- end book -->' + newline
#}
# check if the entry refers to a book title rather than an author
if s_title_of_book.strip() == s_entry_name.strip(): # just a dummy book record
s_title_of_book = ''
if s_title_of_book.strip(): html += '<li class="medieval_cat_result"><!-- start book -->' + newline
prev_title_of_book = s_title_of_book.strip()
html += get_book_title_and_biblio_string( s_title_of_book, s_xref_title_of_book, s_role_in_book, \
s_problem, s_book_biblio_line )
html += '<ul><!-- start list of catalogue entries -->' + newline
#}
if sql_copy_count: #{
if s_copy_code != prev_copy_code: #{
html += '<li class="medieval_cat_result"><!-- start catalogue entry -->' + newline
html += get_copy_string( s_copy_code, s_copy_notes, s_mlgb_book_id, \
s_entry_name, s_title_of_book )
html += newline + '<ul>' + newline
if s_library_type: #{
html += '<li>From '
html += get_library_link( s_library_type_code, s_library_type, s_library_loc_id, s_library_loc )
if s_document_code and s_document_name:
html += ': %s' % get_document_link( s_document_code, s_document_name, s_document_type )
html += '</li>' + newline
#}
html += get_flags_string( s_survives_yn, s_printed_yn, s_uncertain_yn, s_duplicate_title_yn )
html += newline + '</ul>' + newline
html += '</li><!-- end catalogue entry -->' + newline
#}
#}
prev_entry_id = sql_entry_id
prev_entry_book_count = sql_entry_book_count
prev_copy_code = s_copy_code
#}
html += '</ul><!-- end catalogue entry list -->' + newline
html += '</ul><!-- end book list -->' + newline
html += '</li><!-- end author/title entry -->' + newline
html += '</ul><!-- end author/title list -->' + newline
return html
#}
# end get_result_string_by_author_title()
#--------------------------------------------------------------------------------
def get_result_string_by_catalogue_provenance( results ): #{
html = '<ul><!-- start list of library types (A) -->' + newline
prev_library = ''
prev_document_code = ''
prev_copy_code = ''
for row in results: #{
new_library = False
new_document_code = False
(solr_id, solr_id_sort,
sql_entry_id, sql_entry_book_count, sql_copy_count, s_entry_name, s_entry_xref_name,
s_author_name, s_entry_biblio_line, s_entry_biblio_block, s_title_of_book, s_xref_title_of_book,
s_role_in_book, s_problem, s_book_biblio_line, s_copy_code, s_copy_notes, s_printed_yn,
s_survives_yn, s_uncertain_yn, s_duplicate_title_yn, s_document_code, s_document_code_sort,
s_seqno_in_document, s_seqno_in_doc_sort, s_document_name, d_document_start, d_document_end,
s_document_type, s_library_type, s_library_loc, s_library_type_code, s_library_loc_id,
s_mlgb_book_id, s_entry_letter, s_document_start_year, s_document_end_year,
s_document_date_in_words) = extract_from_result( row )
curr_library = s_library_type + s_library_loc
if curr_library != prev_library: #{
new_library = True
new_document_code = True
#}
elif curr_library == prev_library and s_document_code != prev_document_code: #{
new_document_code = True
#}
if new_library: #{
if prev_library: #{
html += '</ul><!-- end list of catalogue entries (C) -->' + newline
html += '</ul><!-- end document list (B) -->' + newline
html += '</li><!-- end library list-item (A) -->' + newline
#}
html += newline + '<li class="medieval_cat_result"><!-- start library list-item (A) -->' + newline
html += get_library_link( s_library_type_code, s_library_type, s_library_loc_id, s_library_loc )
html += newline + '<ul><!-- start document list (B) -->' + newline
#}
if new_document_code: #{
prev_copy_code = ''
if not new_library: #{
html += newline + '</ul><!-- end list of catalogue entries (C) -->' + newline
html += newline + '</li><!-- end document list-item (B) -->' + newline
#}
html += newline + '<li class="medieval_cat_result"><!-- start document list-item( B) -->' + newline
if s_document_code and s_document_name:
html += get_document_link( s_document_code, s_document_name, s_document_type )
else:
html += '[no document found]'
html += newline + '<ul><!-- start list of catalogue entries (C) -->' + newline
#}
if sql_copy_count: #{
if s_copy_code != prev_copy_code: #{
html += newline + '<li class="medieval_cat_result"><!-- start catalogue entry list-item (C) -->'
html += newline
hover_library = s_library_type
if not s_library_type.endswith( s_library_loc ):
hover_library += ': %s' % s_library_loc
html += get_copy_string( s_copy_code, s_copy_notes, s_mlgb_book_id, \
hover_library, s_document_name )
html += '<br />'
html += get_entry_name_and_biblio_string( solr_id, s_entry_name, s_entry_xref_name, \
s_entry_biblio_line, s_entry_biblio_block,\
sql_entry_id, s_entry_letter )
# check if the entry refers to a book title rather than an author
if s_title_of_book.strip() == s_entry_name.strip(): # just a dummy book record
s_title_of_book = ''
if s_title_of_book and not s_entry_biblio_block: html += '<br />'
html += get_book_title_and_biblio_string( s_title_of_book, s_xref_title_of_book, s_role_in_book, \
s_problem, s_book_biblio_line )
html += newline + '<ul><!-- further details list (D) -->' + newline
html += get_flags_string( s_survives_yn, s_printed_yn, s_uncertain_yn, s_duplicate_title_yn )
html += newline + '</ul><!-- end further details list (D) -->' + newline
html += newline + '</li><!-- end catalogue entry list-item (C) -->' + newline
#}
#}
else: #{ # just a cross-reference entry
html += newline + '<li class="medieval_cat_result"><!-- start cross-reference entry (C) -->'
html += newline
html += get_entry_name_and_biblio_string( solr_id, s_entry_name, s_entry_xref_name, \
s_entry_biblio_line, s_entry_biblio_block,\
sql_entry_id, s_entry_letter )
if s_title_of_book.strip() == s_entry_name.strip(): # just a dummy book record
s_title_of_book = ''
if s_title_of_book and not s_entry_biblio_block: html += '<br />'
html += get_book_title_and_biblio_string( s_title_of_book, s_xref_title_of_book, s_role_in_book, \
s_problem, s_book_biblio_line )
html += newline + '</li><!-- end cross-reference entry (C) -->' + newline
#}
prev_library = curr_library
prev_document_code = s_document_code
prev_copy_code = s_copy_code
#}
html += newline
html += '</ul><!-- end list of catalogue entries (C) -->' + newline
html += '</ul><!-- end list of documents (B) -->' + newline
html += '</li><!-- end library list-item (A) -->' + newline
html += '</ul><!-- end list of libraries (A) -->' + newline
return html
#}
# end get_result_string_by_catalogue_provenance()
#--------------------------------------------------------------------------------
def get_result_string_by_catalogue_date( results ): #{
html = ''
html = '<ul><!-- start list of centuries (A) -->' + newline
prev_century = ''
prev_document_code = ''
prev_copy_code = ''
for row in results: #{
new_century = False
new_document_code = False
(solr_id, solr_id_sort,
sql_entry_id, sql_entry_book_count, sql_copy_count, s_entry_name, s_entry_xref_name,
s_author_name, s_entry_biblio_line, s_entry_biblio_block, s_title_of_book, s_xref_title_of_book,
s_role_in_book, s_problem, s_book_biblio_line, s_copy_code, s_copy_notes, s_printed_yn,
s_survives_yn, s_uncertain_yn, s_duplicate_title_yn, s_document_code, s_document_code_sort,
s_seqno_in_document, s_seqno_in_doc_sort, s_document_name, d_document_start, d_document_end,
s_document_type, s_library_type, s_library_loc, s_library_type_code, s_library_loc_id,
s_mlgb_book_id, s_entry_letter, s_document_start_year, s_document_end_year,
s_document_date_in_words) = extract_from_result( row )
curr_century = get_century_from_date( d_document_start )
if curr_century != prev_century: #{
new_century = True
new_document_code = True
#}
elif curr_century == prev_century and s_document_code != prev_document_code: #{
new_document_code = True
#}
if new_century: #{
if prev_century: #{
html += '</table><!-- end list of catalogue entries (C) -->' + newline
html += '</ul><!-- end document list (B) -->' + newline
html += '</li><!-- end century list-item (A) -->' + newline
#}
html += newline + '<li class="medieval_cat_result"><!-- start century list-item (A) -->' + newline
html += '<h3>' + get_century_desc( curr_century ) + '</h3>'
html += newline + '<ul><!-- start document list (B) -->' + newline
#}
if new_document_code: #{
prev_copy_code = ''
if not new_century: #{
html += newline + '</table><!-- end list of catalogue entries (C) -->' + newline
html += newline + '</li><!-- end document list-item (B) -->' + newline
#}
html += newline + '<li class="medieval_cat_result"><!-- start document list-item( B) -->' + newline
if s_document_code and s_document_name: #{
html += get_library_link( s_library_type_code, s_library_type, s_library_loc_id, s_library_loc )
html += ': ' + get_document_link( s_document_code, s_document_name, s_document_type )
#}
else:
html += '[no document found]'
html += newline + '<table class="century">'
html += '<!-- start list of catalogue entries (C) -->' + newline
#}
if sql_copy_count: #{
if s_copy_code != prev_copy_code: #{
html += newline
html += '<tr><!-- start catalogue entry table row (C) -->'
# Summary of date
html += '<td class="medieval_cat_result"><em>'
html += s_document_date_in_words
html += '</em></td>'
html += newline
# Copy code, copy notes, author/title, bibliography
html += '<td class="medieval_cat_result">'
html += newline
hover_library = s_library_type
if not s_library_type.endswith( s_library_loc ):
hover_library += ': %s' % s_library_loc
html += get_copy_string( s_copy_code, s_copy_notes, s_mlgb_book_id, \
hover_library, s_document_name )
html += '<br />'
html += get_entry_name_and_biblio_string( solr_id, s_entry_name, s_entry_xref_name, \
s_entry_biblio_line, s_entry_biblio_block,\
sql_entry_id, s_entry_letter )
# check if the entry refers to a book title rather than an author
if s_title_of_book.strip() == s_entry_name.strip(): # just a dummy book record
s_title_of_book = ''
if s_title_of_book and not s_entry_biblio_block: html += '<br />'
html += get_book_title_and_biblio_string( s_title_of_book, s_xref_title_of_book, s_role_in_book, \
s_problem, s_book_biblio_line )
html += newline + '<ul><!-- further details list (D) -->' + newline
html += get_flags_string( s_survives_yn, s_printed_yn, s_uncertain_yn, s_duplicate_title_yn )
html += newline + '</ul><!-- end further details list (D) -->' + newline
html += newline + '</td></tr><!-- end catalogue entry row (C) -->' + newline
#}
#}
else: #{ # just a cross-reference entry
html += newline
html += '<tr><td></td><td class="medieval_cat_result">'
html += '<!-- start cross-reference entry (C) -->'
html += newline
html += get_entry_name_and_biblio_string( solr_id, s_entry_name, s_entry_xref_name, \
s_entry_biblio_line, s_entry_biblio_block,\
sql_entry_id, s_entry_letter )
if s_title_of_book.strip() == s_entry_name.strip(): # just a dummy book record
s_title_of_book = ''
if s_title_of_book and not s_entry_biblio_block: html += '<br />'
html += get_book_title_and_biblio_string( s_title_of_book, s_xref_title_of_book, s_role_in_book, \
s_problem, s_book_biblio_line )
html += newline + '</td></tr><!-- end cross-reference entry (C) -->' + newline
#}
prev_century = curr_century
prev_document_code = s_document_code
prev_copy_code = s_copy_code
#}
html += newline
html += '</table><!-- end list of catalogue entries (C) -->' + newline
html += '</ul><!-- end list of documents (B) -->' + newline
html += '</li><!-- end century list-item (A) -->' + newline
html += '</ul><!-- end list of centuries (A) -->' + newline
return html
#}
# end get_result_string_by_catalogue_date()
#--------------------------------------------------------------------------------
def get_century_from_date( date_string ): #{
the_year = ''
date_string = str( date_string )
if len( date_string ) >= 4: the_year = date_string[ 0 : 4 ]
if not the_year.isdigit(): return 'undated'
if the_year.startswith( '0' ): the_year = the_year[ 1 : ]
century = int( math.floor( int( the_year ) / 100 ) + 1 )
return str( century )
#}
#--------------------------------------------------------------------------------
def get_century_desc( century ): #{
if century.isdigit(): #{
if int( century ) >= 20: #{ # undated documents are sorted to the end
century_desc = 'Undated'
#}
else: #{
century_desc = '%sth century' % century
#}
#}
elif century.lower() == 'undated': #{
century_desc = 'Undated'
#}
return century_desc
#}
##=====================================================================================
def get_entry_name_and_biblio_string( solr_id, s_entry_name, s_entry_xref_name, \
s_entry_biblio_line, s_entry_biblio_block,\
sql_entry_id, s_entry_letter ): #{
if s_entry_letter == 'I/J': s_entry_letter = 'IJ'
entry_href = '%s/browse/%s/#entry%s_anchor' % (baseurl, s_entry_letter, sql_entry_id)
html = '<a href="%s" title="%s">' % (entry_href, s_entry_name)
html += s_entry_name
html += '</a>'
if s_entry_xref_name: html += ' %s %s' % (right_arrow, s_entry_xref_name)
if s_entry_biblio_line: html += ': ' + s_entry_biblio_line + newline
if s_entry_biblio_block: #{
display_chars = s_entry_biblio_block.replace( '<span class="biblio_block">', "" )
display_chars = display_chars.replace( '</span>', "" )
if len( display_chars ) > biblio_block_line_length: # show up to 1 line of block
show_biblio_block = False
else:
show_biblio_block = True
if show_biblio_block: #{
html += newline + '<div>'
html += s_entry_biblio_block
html += '</div>' + newline
#}
else: #{
pointing_at = 'bibliographical details'
html += newline + '<script type="text/javascript">' + newline
html += "function expand_collapse_biblio_block_%s() {" % solr_id
html += newline
html += ' var the_block = document.getElementById( "biblio_block_%s" );' % solr_id
html += newline
html += ' var the_button = document.getElementById( "biblio_button_%s" );' % solr_id
html += newline
html += ' if( the_block.style.display == "block" ) {'
html += newline
html += ' the_block.style.display = "none";'
html += newline
html += " the_button.innerHTML = '%s';" % mv.manicule_pointing_right_img( pointing_at )
html += newline
html += ' }'
html += newline
html += ' else {'
html += newline
html += ' the_block.style.display = "block";'
html += newline
html += " the_button.innerHTML = '%s';" % mv.manicule_pointing_down_img( pointing_at )
html += newline
html += ' }'
html += newline
html += '}'
html += newline
html += '</script>' + newline
html += '<button id="biblio_button_%s" ' % solr_id
html += ' class="manicule" onclick="expand_collapse_biblio_block_%s()" >' % solr_id
html += mv.manicule_pointing_right_img( pointing_at )
html += '</button>' + newline
html += '<br />' + newline
html += '<div id="biblio_block_%s" style="display:none">' % solr_id
html += s_entry_biblio_block
html += '<p></p>' + newline
html += '</div>'
html += newline
#}
#}
return html
#}
# end get_entry_name_and_biblio_string()
#--------------------------------------------------------------------------------
def get_book_title_and_biblio_string( s_title_of_book, s_xref_title_of_book, s_role_in_book, \
s_problem, s_book_biblio_line ): #{
html = ''
if s_problem: html += s_problem + ' '
if s_role_in_book: html += s_role_in_book + ' '
if s_title_of_book and s_title_of_book.strip() != s_xref_title_of_book.strip():
html += s_title_of_book
if s_book_biblio_line: html += ": " + s_book_biblio_line
if s_xref_title_of_book: html += "%s %s" % (right_arrow, s_xref_title_of_book)
return html
#}
# end get_book_title_and_biblio_string()
#--------------------------------------------------------------------------------
def get_flags_string( s_survives_yn, s_printed_yn, s_uncertain_yn, s_duplicate_title_yn ): #{
html = ''
if s_survives_yn == 'y':
html += '<li>Surviving book</li>' + newline
if s_printed_yn == 'y':
html += '<li>Printed book</li>' + newline
if s_uncertain_yn == 'y':
html += '<li>Uncertain identification</li>' + newline
if s_duplicate_title_yn == 'y':
html += '<li>Could refer to one of several works with the same title</li>' + newline
return html
#}
# end get_flags_string()
#--------------------------------------------------------------------------------
def get_copy_string( s_copy_code, s_copy_notes, s_mlgb_book_id, \
hover_title_part_1 = '', hover_title_part_2 = '' ): #{
html = ''
editable_link = ''
if editable: editable_link = '/e'
hover_title = hover_title_part_1
if hover_title_part_2.strip() and hover_title_part_2.strip() != hover_title_part_1.strip():
hover_title += ' -- %s' % hover_title_part_2
hover_title = hover_title.replace( '<i>', '' )
hover_title = hover_title.replace( '</i>', '' )
hover_title = hover_title.replace( '"', "'" )
onclick_title = hover_title.replace( newline, ' ' )
onclick_title = onclick_title.replace( carriage_return, '' )
onclick_title = onclick_title.replace( "'", "\\'" )
# Either start a link to the MLGB book record...
for book_id in s_mlgb_book_id: #{
html += '<a href="%s%s/%s/" ' % (editable_link, mlgb_book_url, book_id)
html += ' title="Further details of book" '
html += ' class="link_from_index_to_book">'
html += s_copy_code
html += '</a> '
#}
# Or start a span which you can hover over and get a bit more info.
if not html: #{
html += '<span title="%s" class="index_catalogue_entry" ' % hover_title
html += ' onclick="alert(' + "'" + onclick_title + "'" + ')">'
html += s_copy_code
html += '</span>'
#}
# Add description/notes if there are any,
# e.g. 'sermones Ailmeri prioris in glosis' or '(1 copy) = K5.7'
if s_copy_notes.strip(): html += ' %s' % s_copy_notes
return html
#}
# end get_copy_string()
#--------------------------------------------------------------------------------
def get_library_link( library_type_code, library_type_name, library_loc_id, library_loc_name ): #{
if not library_type_code or not library_type_name:
return '[no library found]'
html = ''
editable_link = ''
if editable: editable_link = '/e'
library_type_url = "%s%s/source/%s/" % (editable_link, medieval_catalogues_url, library_type_code)
html += '<a href="%s" title="%s">%s</a>' % (library_type_url, library_type_name, library_type_name)
if library_loc_id and library_loc_name: #{
if not library_type_name.endswith( library_loc_name ): #{ e.g HENRY DE KIRKESTEDE gets repeated twice
library_loc_url = "%s%s/" % (library_type_url, library_loc_id)
html += ': <a href="%s" title="%s">%s</a>' % (library_loc_url, library_loc_name, library_loc_name)
#}
#}
return html
#}
#--------------------------------------------------------------------------------
def get_document_link( document_code, document_name, s_document_type = '' ): #{
if not document_code or not document_name: return ''
html = ''
editable_link = ''
if editable: editable_link = '/e'
url = "%s%s/%s/" % (editable_link, medieval_catalogues_url, document_code)
html += '<a href="%s" title="%s">%s</a>' % (url, document_name, document_name)
# Was going to show document type, but that's unnecessary (it's already given in document name)
#if s_document_type and s_document_type != 'undefined': #{
#html += ' [type of list: %s]' % s_document_type
#}
return html
#}
#--------------------------------------------------------------------------------
def get_doctype_dropdown_options(): #{
# Get a list of document types for a dropdown list
doctypes = [ "" ]
the_cursor = connection.cursor()
statement = "select distinct document_type from index_medieval_documents order by document_type"
the_cursor.execute( statement )
sql_doctypes = the_cursor.fetchall()
for sql_row in sql_doctypes:
doctypes.append( sql_row[ 0 ] )
the_cursor.close()
return doctypes
#}
#--------------------------------------------------------------------------------
| 37.169505 | 106 | 0.596089 |
import math
from django.template import Context, loader
from django.http import HttpResponse,Http404,HttpResponseRedirect
from django.shortcuts import get_object_or_404, render_to_response
from django.core.urlresolvers import reverse
from django.utils.html import escape
from django.db import connection
from urllib import quote, unquote
from mysite.config import *
from mysite.MLGBsolr import *
import mysite.mlgb.views as mv
solr_query = ''
printing = False
editable = False
baseurl="/authortitle"
medieval_catalogues_url = "/authortitle/medieval_catalogues"
mlgb_book_url = '/mlgb/book'
default_order_by = "solr_id_sort"
catalogue_provenance_sort_list = [ "s_library_type asc",
"s_library_loc asc",
"s_document_code_sort asc",
"s_seqno_in_doc_sort asc",
"s_copy_code asc",
"solr_id_sort asc" ]
catalogue_date_sort_list = [ "d_document_start asc",
"d_document_end asc",
"s_library_type asc",
"s_library_loc asc",
"s_document_code_sort asc",
"s_seqno_in_doc_sort asc",
"s_copy_code asc",
"solr_id_sort asc" ]
searchable_fields = [
{ "fieldname": "text", "label": "All fields", "info": "", "value": "" },
{ "fieldname": "t_author", "label": "Author", "info": "", "value": "" },
{ "fieldname": "t_title", "label": "Title of book", "info": "", "value": "" },
{ "fieldname": "t_bibliography", "label": "Bibliographical details", "info": "", "value": "" },
{ "fieldname": "t_library", "label": "Catalogue provenance",
"info": "E.g. 'Benedictines Peterborough' or 'Henry de Kirkestede'", "value": "" },
{ "fieldname": "t_document", "label": "Description of document", "value": "", "info":
"E.g. 'Books read in the refectory, 13th cent'." \
+ " Description includes either an indication of document date or the word 'undated'." },
{ "fieldname": "s_document_type", "label": "Type of document", "value": "", "info": "" },
{ "fieldname": "q_earliest_year", "label": "Start of required date range", "value": "",
"info": "Enter the earliest year that you are interested in, e.g. 1400." },
{ "fieldname": "q_latest_year", "label": "End of required date range", "value": "",
"info": "Enter the latest year that you are interested in, e.g. 1499." },
]
facet = False
newline = '\n'
carriage_return = '\r'
right_arrow = '→'
biblio_block_line_length = 100
#================= Top-level functions, called directly from URL ================
#--------------------------------------------------------------------------------
# The function browse() allows browsing of the index by author/title
def browse( request, letter = '', pagename = 'index', called_by_editable_page = False ): #{
global searchable_fields # this is used in advanced search
for field in searchable_fields:
field[ "value" ] = "" # re-initialise every field to blank so that old searches don't hang around
if called_by_editable_page: enable_edit()
else: disable_edit()
global printing
printing = False
printing = mv.get_value_from_GET( request, "printing", False )
if letter != '' and not letter.isalpha(): letter = 'A'
letter = letter.upper()
doctypes = get_doctype_dropdown_options()
# This may need changing later.
t = loader.get_template('authortitle/index%s.html' % letter )
c = Context( {
'pagename' : pagename,
'editable' : editable,
'letter' : letter,
'printing' : printing,
'print_link' : mv.get_link_for_print_button( request ),
'called_by_collapsible_page': True,
'default_rows_per_page': mv.default_rows_per_page,
'advanced_search_fields': searchable_fields,
'doctype_dropdown_options': doctypes,
} )
return HttpResponse( t.render( c ) )
#}
# end function browse()
#--------------------------------------------------------------------------------
def browse_e( request, letter = '', pagename = 'index' ): #{
return browse( request, letter, pagename, True )
#}
#--------------------------------------------------------------------------------
# The function medieval_catalogues() allows browsing of the index by medieval document
def medieval_catalogues( request, cat = '', pagename = 'cats', called_by_editable_page = False ): #{
global searchable_fields # this is used in advanced search
for field in searchable_fields:
field[ "value" ] = "" # re-initialise every field to blank so that old searches don't hang around
if called_by_editable_page: enable_edit()
else: disable_edit()
global printing
printing = False
printing = mv.get_value_from_GET( request, "printing", False )
sort_by_date = False
display_decodes = False
if not cat.isalnum():
cat = ''
elif cat == 'bydate':
cat = ''
sort_by_date = True
elif cat == 'decode':
cat = ''
display_decodes = True
else:
cat = cat.upper()
called_by_collapsible_page = False
doctypes = get_doctype_dropdown_options()
# generated by cataloguesHTML.py. This may need changing later.
if cat:
t = loader.get_template('authortitle/catalogue%s.html' % cat )
elif sort_by_date:
t = loader.get_template('authortitle/cataloguelistbydate.html' )
elif display_decodes:
t = loader.get_template('authortitle/decode.html' )
else: #{
called_by_collapsible_page = True
t = loader.get_template('authortitle/cataloguelist.html' )
#}
c = Context( {
'pagename' : pagename,
'editable' : editable,
'cat' : cat,
'printing' : printing,
'print_link': mv.get_link_for_print_button( request ),
'called_by_collapsible_page': called_by_collapsible_page,
'default_rows_per_page': mv.default_rows_per_page,
'advanced_search_fields': searchable_fields,
'doctype_dropdown_options': doctypes,
} )
return HttpResponse( t.render( c ) )
#}
# end function medieval_catalogues()
#--------------------------------------------------------------------------------
def medieval_catalogues_e( request, cat = '', pagename = 'cats' ): #{
return medieval_catalogues( request, cat, pagename, True )
#}
#--------------------------------------------------------------------------------
# The function cat_source() allows browsing of the index by source of medieval catalogue.
# The primary source is the type of institution (document group type), e.g. A for Augustinian Canons.
# You can also browse one location within an institution type (document group type/document group ID),
# e.g. /A/15/ for the Augustinian location 'Lanthony', which has document group ID 15.
def cat_source( request, source = '', loc = '', pagename = 'cats', called_by_editable_page = False ): #{
global searchable_fields # this is used in advanced search
for field in searchable_fields:
field[ "value" ] = "" # re-initialise every field to blank so that old searches don't hang around
if called_by_editable_page: enable_edit()
else: disable_edit()
global printing
printing = False
printing = mv.get_value_from_GET( request, "printing", False )
if not source.isalpha():
source = ''
loc = ''
else:
source = source.upper()
if not loc.isalnum(): loc = ''
full_source = source
if loc: full_source += '-%s' % loc.lower()
doctypes = get_doctype_dropdown_options()
# generated by cataloguesHTML.py. This may need changing later.
t = loader.get_template('authortitle/cataloguelist%s.html' % full_source )
c = Context( {
'pagename' : pagename,
'editable' : editable,
'source' : source,
'location' : loc,
'printing' : printing,
'print_link': mv.get_link_for_print_button( request ),
'default_rows_per_page': mv.default_rows_per_page,
'advanced_search_fields': searchable_fields,
'doctype_dropdown_options': doctypes,
} )
return HttpResponse( t.render( c ) )
#}
# end function cat_source()
#--------------------------------------------------------------------------------
def cat_source_e( request, source = '', loc = '', pagename = 'cats' ): #{
return cat_source( request, source, loc, pagename, True )
#}
#--------------------------------------------------------------------------------
# The function results() is called either from Quick Search or from Advanced Search
def results( request, pagename = 'results', called_by_editable_page = False ): #{
# Set editability status
if called_by_editable_page: enable_edit()
else: disable_edit()
# Set printing status
global printing
printing = False
printing = mv.get_value_from_GET( request, "printing", False )
# See if you are doing quick or advanced search
search_type = mv.get_value_from_GET( request, "search_type", "quick" )
# Run the Solr query
(resultsets, number_of_records, search_term, \
solr_start, solr_rows, page_size ) = run_solr_query( request )
mv.printing = printing
pag = mv.pagination( rows_found = number_of_records, \
current_row = solr_start, \
rows_per_page = solr_rows, \
link_for_print_button = mv.get_link_for_print_button( request ),
link_for_download_button = mv.get_link_for_download_button( request ) )
# Format the results into an HTML string ready for display
order_by = mv.get_value_from_GET( request, "order_by", default_order_by )
result_string = get_result_string( resultsets, order_by )
result_string = pag + newline + '<p></p>' + newline + result_string
if number_of_records > solr_rows: # repeat pagination at the bottom
result_string += newline + '<p></p>' + newline + pag
# Get a list of document types for a dropdown list
doctypes = get_doctype_dropdown_options()
# Pass HTML string and other data to the template for display
t = loader.get_template( 'authortitle/results.html' )
c = Context( {
'pagename' : pagename,
'editable' : editable,
'results' : result_string,
'order_by' : order_by,
'printing' : printing,
'print_link' : mv.get_link_for_print_button( request ),
'default_rows_per_page': mv.default_rows_per_page,
'number_of_records': number_of_records,
'search_type': search_type,
'search_term': search_term,
'advanced_search_fields': searchable_fields,
'doctype_dropdown_options': doctypes,
'solr_query': solr_query,
} )
return HttpResponse( t.render( c ) )
#}
# end function results()
#--------------------------------------------------------------------------------
def results_e( request, pagename = 'results' ): #{
return results( request, pagename, True )
#}
#--------------------------------------------------------------------------------
#================ End top-level functions called directly from URL ==============
#--------------------------------------------------------------------------------
## This changes links to exclude the 'editable' part of the URL
def disable_edit(): #{
global editable
editable = False
global baseurl
baseurl = '/authortitle'
#}
#--------------------------------------------------------------------------------
## This changes links to include the 'editable' part of the URL
def enable_edit(): #{
global editable
editable = True
global baseurl
baseurl = '/e/authortitle'
#}
#--------------------------------------------------------------------------------
# Either run a basic Solr query (i.e. on a single search term) against default field of 'catalogues' core
# Or run an *advanced* Solr query (i.e. on a multiple search terms)
def run_solr_query( request ): #{
global solr_query # for debugging
global searchable_fields # this is used in advanced search
for field in searchable_fields:
field[ "value" ] = "" # initialise every field value to blank
resultsets = []
number_of_records = 0
search_type = ""
search_term = solr_start = page_size = solr_query = solr_sort = solr_rows = ""
if request.GET: #{ # was a search term found in GET?
#=====================================================================
# Get search type, records per page, start row and "order by" from GET
#=====================================================================
# Set search type (quick or advanced)
search_type = mv.get_value_from_GET( request, 'search_type', 'quick' )
if search_type not in [ 'quick', 'advanced' ]: search_type = 'quick'
# Set page size
page_size = mv.get_value_from_GET( request, "page_size", str( mv.default_rows_per_page ) )
if page_size.isdigit():
solr_rows = int( page_size )
else:
solr_rows = mv.default_rows_per_page
# Set start page
solr_start = mv.get_value_from_GET( request, "start", 0 )
# Set "order by"
order_by = mv.get_value_from_GET( request, "order_by", default_order_by )
if order_by == default_order_by:
solr_sort = order_by + " asc"
elif order_by == "catalogue_provenance":
solr_sort = ",".join( catalogue_provenance_sort_list )
elif order_by == "catalogue_date":
solr_sort = ",".join( catalogue_date_sort_list )
else:
solr_sort = default_order_by + " asc"
#=====================
# Construct Solr query
#=====================
if search_type == 'quick': #{ # search on all fields via the single form field 'search_term'
search_term = mv.get_value_from_GET( request, 'search_term' )
if not search_term: search_term = '*'
solr_query = mv.escape_for_solr( search_term )
if ' ' in solr_query:
solr_query = '(%s)' % solr_query
if search_term=='*' or search_term=='':
solr_query='*:*'
else: #{
solr_query = "text:%s" % solr_query
for field in searchable_fields: #{ # store the search term in the 'text' field
fieldname = field[ "fieldname" ]
if fieldname == "text": #{
field[ "value" ] = search_term
break
#}
#}
#}
#}
else: #{ # advanced search on any combination of multiple searchable fields
fields_searched = []
for field in searchable_fields: #{
fieldname = field[ "fieldname" ]
fieldval = mv.get_value_from_GET( request, fieldname, "" )
if fieldval == '*': fieldval = ''
field[ "value" ] = fieldval
if fieldval: #{ # they entered a query on this field
if fieldname in [ "q_earliest_year", "q_latest_year" ]: #{
if fieldval.isdigit(): #{
query_clause = get_date_range_query( fieldname, fieldval )
if query_clause: fields_searched.append( query_clause )
#}
else: # non-numeric year, can't be queried on
field[ "value" ] = ""
else:
fieldval = mv.escape_for_solr( fieldval )
if ' ' in fieldval:
if fieldname == 's_document_type':
fieldval = '"%s"' % fieldval
else:
fieldval = '(%s)' % fieldval
fields_searched.append( "%s:%s" % (fieldname, fieldval))
if len( fields_searched ) > 0:
solr_query = " AND ".join( fields_searched )
else:
solr_query='*:*'
for field in searchable_fields:
fieldname = field[ "fieldname" ]
if fieldname == 'text':
field[ "value" ] = "*"
break
s_para={'q' : solr_query,
'wt' : s_wt,
'start': solr_start,
'rows' : solr_rows,
'sort' : solr_sort}
r = MLGBsolr()
r.solrresults( s_para, facet, 'catalogues' )
if r.connstatus and r.s_result: esult.get( 'docs' )
number_of_records = r.s_result.get( 'numFound' )
search_term, solr_start, solr_rows, page_size )
def get_date_range_query( fieldname, fieldval ):
q = ''
if len( fieldval ) < 4: fieldval = fieldval.rjust( 4, '0' )
if fieldname == 'q_earliest_year': ment_end_year:["%s" TO *]' % fieldval
elif fieldname == 'q_latest_year': cument_start_year:[* TO "%s"]' % fieldval
return q
def extract_from_result( record ):
solr_id = record[ "id" ]
solr_id_sort = record[ "solr_id_sort" ]
sql_entry_id = record.get( "sql_entry_id", "" )
sql_entry_book_count = record.get( "sql_entry_book_count", "" )
sql_copy_count = record.get( "sql_copy_count", "" )
s_entry_name = record.get( "s_entry_name", "" )
s_entry_xref_name = record.get( "s_entry_xref_name", "" )
s_author_name = record.get( "s_author_name", "" )
s_entry_biblio_line = record.get( "s_entry_biblio_line", "" )
s_entry_biblio_block = record.get( "s_entry_biblio_block", "" )
s_entry_letter = record.get( "s_entry_letter", "" )
s_title_of_book = record.get( "s_title_of_book", "" )
s_xref_title_of_book = record.get( "s_xref_title_of_book", "" )
s_role_in_book = record.get( "s_role_in_book", "" )
s_problem = record.get( "s_problem", "" )
s_book_biblio_line = record.get( "s_book_biblio_line", "" )
s_copy_code = record.get( "s_copy_code", "" )
s_copy_notes = record.get( "s_copy_notes", "" )
s_printed_yn = record.get( "s_printed_yn", "" )
s_survives_yn = record.get( "s_survives_yn", "" )
s_uncertain_yn = record.get( "s_uncertain_yn", "" )
s_duplicate_title_yn = record.get( "s_duplicate_title_yn", "" )
s_document_code = record.get( "s_document_code", "" )
s_document_code_sort = record.get( "s_document_code_sort", "" )
s_seqno_in_document = record.get( "s_seqno_in_document", "" )
s_seqno_in_doc_sort = record.get( "s_seqno_in_doc_sort", "" )
s_document_name = record.get( "s_document_name", "" )
d_document_start = record.get( "d_document_start", "" )
d_document_end = record.get( "d_document_end", "" )
s_document_start_year = record.get( "s_document_start_year", "" )
s_document_end_year = record.get( "s_document_end_year", "" )
s_document_date_in_words = record.get( "s_document_date_in_words", "" )
s_document_type = record.get( "s_document_type", "" )
s_library_type = record.get( "s_library_type", "" )
s_library_loc = record.get( "s_library_loc", "" )
s_library_type_code = record.get( "s_library_type_code", "" )
s_library_loc_id = record.get( "s_library_loc_id", "" )
s_mlgb_book_id = record.get( "s_mlgb_book_id", [] )
return (solr_id,
solr_id_sort,
sql_entry_id,
sql_entry_book_count,
sql_copy_count,
s_entry_name,
s_entry_xref_name,
s_author_name,
s_entry_biblio_line,
s_entry_biblio_block,
s_title_of_book,
s_xref_title_of_book,
s_role_in_book,
s_problem,
s_book_biblio_line,
s_copy_code,
s_copy_notes,
s_printed_yn,
s_survives_yn,
s_uncertain_yn,
s_duplicate_title_yn,
s_document_code,
s_document_code_sort,
s_seqno_in_document,
s_seqno_in_doc_sort,
s_document_name,
d_document_start,
d_document_end,
s_document_type,
s_library_type,
s_library_loc,
s_library_type_code,
s_library_loc_id,
s_mlgb_book_id,
s_entry_letter,
s_document_start_year,
s_document_end_year,
s_document_date_in_words,
)
def get_result_string( results, order_by ):
if len( results ) == 0: return '<p></p>' + newline
if order_by == 'catalogue_provenance':
return get_result_string_by_catalogue_provenance( results )
elif order_by == 'catalogue_date':
return get_result_string_by_catalogue_date( results )
else:
return get_result_string_by_author_title( results )
def get_result_string_by_author_title( results ):
html = '<ul><!-- start list of author/title entries -->' + newline
prev_entry_id = ''
prev_entry_book_count = ''
prev_title_of_book = ''
prev_copy_code = ''
for row in results:
new_entry = False
new_book = False
(solr_id, solr_id_sort,
sql_entry_id, sql_entry_book_count, sql_copy_count, s_entry_name, s_entry_xref_name,
s_author_name, s_entry_biblio_line, s_entry_biblio_block, s_title_of_book, s_xref_title_of_book,
s_role_in_book, s_problem, s_book_biblio_line, s_copy_code, s_copy_notes, s_printed_yn,
s_survives_yn, s_uncertain_yn, s_duplicate_title_yn, s_document_code, s_document_code_sort,
s_seqno_in_document, s_seqno_in_doc_sort, s_document_name, d_document_start, d_document_end,
s_document_type, s_library_type, s_library_loc, s_library_type_code, s_library_loc_id,
s_mlgb_book_id, s_entry_letter, s_document_start_year, s_document_end_year,
s_document_date_in_words) = extract_from_result( row )
if sql_entry_id != prev_entry_id:
new_entry = True
new_book = True
elif sql_entry_id == prev_entry_id and sql_entry_book_count != prev_entry_book_count:
new_book = True
if new_entry:
if prev_entry_id:
html += '</ul><!-- end catalogue entry list -->' + newline
html += '</ul><!-- end book list -->' + newline
html += '</li><!-- end author/title entry -->' + newline
html += newline + '<li class="medieval_cat_result"><!-- start author/title entry -->' + newline
html += get_entry_name_and_biblio_string( solr_id, s_entry_name, s_entry_xref_name, \
s_entry_biblio_line, s_entry_biblio_block, \
sql_entry_id, s_entry_letter )
html += '<ul><!-- start book list -->' + newline
if new_book:
prev_copy_code = ''
if not new_entry:
html += '</ul><!-- end catalogue entry list -->' + newline
if prev_title_of_book: html += '</li><!-- end book -->' + newline
if s_title_of_book.strip() == s_entry_name.strip():
s_title_of_book = ''
if s_title_of_book.strip(): html += '<li class="medieval_cat_result"><!-- start book -->' + newline
prev_title_of_book = s_title_of_book.strip()
html += get_book_title_and_biblio_string( s_title_of_book, s_xref_title_of_book, s_role_in_book, \
s_problem, s_book_biblio_line )
html += '<ul><!-- start list of catalogue entries -->' + newline
if sql_copy_count:
if s_copy_code != prev_copy_code:
html += '<li class="medieval_cat_result"><!-- start catalogue entry -->' + newline
html += get_copy_string( s_copy_code, s_copy_notes, s_mlgb_book_id, \
s_entry_name, s_title_of_book )
html += newline + '<ul>' + newline
if s_library_type:
html += '<li>From '
html += get_library_link( s_library_type_code, s_library_type, s_library_loc_id, s_library_loc )
if s_document_code and s_document_name:
html += ': %s' % get_document_link( s_document_code, s_document_name, s_document_type )
html += '</li>' + newline
html += get_flags_string( s_survives_yn, s_printed_yn, s_uncertain_yn, s_duplicate_title_yn )
html += newline + '</ul>' + newline
html += '</li><!-- end catalogue entry -->' + newline
prev_entry_id = sql_entry_id
prev_entry_book_count = sql_entry_book_count
prev_copy_code = s_copy_code
html += '</ul><!-- end catalogue entry list -->' + newline
html += '</ul><!-- end book list -->' + newline
html += '</li><!-- end author/title entry -->' + newline
html += '</ul><!-- end author/title list -->' + newline
return html
def get_result_string_by_catalogue_provenance( results ):
html = '<ul><!-- start list of library types (A) -->' + newline
prev_library = ''
prev_document_code = ''
prev_copy_code = ''
for row in results:
new_library = False
new_document_code = False
(solr_id, solr_id_sort,
sql_entry_id, sql_entry_book_count, sql_copy_count, s_entry_name, s_entry_xref_name,
s_author_name, s_entry_biblio_line, s_entry_biblio_block, s_title_of_book, s_xref_title_of_book,
s_role_in_book, s_problem, s_book_biblio_line, s_copy_code, s_copy_notes, s_printed_yn,
s_survives_yn, s_uncertain_yn, s_duplicate_title_yn, s_document_code, s_document_code_sort,
s_seqno_in_document, s_seqno_in_doc_sort, s_document_name, d_document_start, d_document_end,
s_document_type, s_library_type, s_library_loc, s_library_type_code, s_library_loc_id,
s_mlgb_book_id, s_entry_letter, s_document_start_year, s_document_end_year,
s_document_date_in_words) = extract_from_result( row )
curr_library = s_library_type + s_library_loc
if curr_library != prev_library:
new_library = True
new_document_code = True
elif curr_library == prev_library and s_document_code != prev_document_code:
new_document_code = True
if new_library:
if prev_library:
html += '</ul><!-- end list of catalogue entries (C) -->' + newline
html += '</ul><!-- end document list (B) -->' + newline
html += '</li><!-- end library list-item (A) -->' + newline
html += newline + '<li class="medieval_cat_result"><!-- start library list-item (A) -->' + newline
html += get_library_link( s_library_type_code, s_library_type, s_library_loc_id, s_library_loc )
html += newline + '<ul><!-- start document list (B) -->' + newline
if new_document_code:
prev_copy_code = ''
if not new_library:
html += newline + '</ul><!-- end list of catalogue entries (C) -->' + newline
html += newline + '</li><!-- end document list-item (B) -->' + newline
html += newline + '<li class="medieval_cat_result"><!-- start document list-item( B) -->' + newline
if s_document_code and s_document_name:
html += get_document_link( s_document_code, s_document_name, s_document_type )
else:
html += '[no document found]'
html += newline + '<ul><!-- start list of catalogue entries (C) -->' + newline
if sql_copy_count:
if s_copy_code != prev_copy_code:
html += newline + '<li class="medieval_cat_result"><!-- start catalogue entry list-item (C) -->'
html += newline
hover_library = s_library_type
if not s_library_type.endswith( s_library_loc ):
hover_library += ': %s' % s_library_loc
html += get_copy_string( s_copy_code, s_copy_notes, s_mlgb_book_id, \
hover_library, s_document_name )
html += '<br />'
html += get_entry_name_and_biblio_string( solr_id, s_entry_name, s_entry_xref_name, \
s_entry_biblio_line, s_entry_biblio_block,\
sql_entry_id, s_entry_letter )
if s_title_of_book.strip() == s_entry_name.strip():
s_title_of_book = ''
if s_title_of_book and not s_entry_biblio_block: html += '<br />'
html += get_book_title_and_biblio_string( s_title_of_book, s_xref_title_of_book, s_role_in_book, \
s_problem, s_book_biblio_line )
html += newline + '<ul><!-- further details list (D) -->' + newline
html += get_flags_string( s_survives_yn, s_printed_yn, s_uncertain_yn, s_duplicate_title_yn )
html += newline + '</ul><!-- end further details list (D) -->' + newline
html += newline + '</li><!-- end catalogue entry list-item (C) -->' + newline
else: class="medieval_cat_result"><!-- start cross-reference entry (C) -->'
html += newline
html += get_entry_name_and_biblio_string( solr_id, s_entry_name, s_entry_xref_name, \
s_entry_biblio_line, s_entry_biblio_block,\
sql_entry_id, s_entry_letter )
if s_title_of_book.strip() == s_entry_name.strip():
s_title_of_book = ''
if s_title_of_book and not s_entry_biblio_block: html += '<br />'
html += get_book_title_and_biblio_string( s_title_of_book, s_xref_title_of_book, s_role_in_book, \
s_problem, s_book_biblio_line )
html += newline + '</li><!-- end cross-reference entry (C) -->' + newline
prev_library = curr_library
prev_document_code = s_document_code
prev_copy_code = s_copy_code
html += newline
html += '</ul><!-- end list of catalogue entries (C) -->' + newline
html += '</ul><!-- end list of documents (B) -->' + newline
html += '</li><!-- end library list-item (A) -->' + newline
html += '</ul><!-- end list of libraries (A) -->' + newline
return html
def get_result_string_by_catalogue_date( results ):
html = ''
html = '<ul><!-- start list of centuries (A) -->' + newline
prev_century = ''
prev_document_code = ''
prev_copy_code = ''
for row in results:
new_century = False
new_document_code = False
(solr_id, solr_id_sort,
sql_entry_id, sql_entry_book_count, sql_copy_count, s_entry_name, s_entry_xref_name,
s_author_name, s_entry_biblio_line, s_entry_biblio_block, s_title_of_book, s_xref_title_of_book,
s_role_in_book, s_problem, s_book_biblio_line, s_copy_code, s_copy_notes, s_printed_yn,
s_survives_yn, s_uncertain_yn, s_duplicate_title_yn, s_document_code, s_document_code_sort,
s_seqno_in_document, s_seqno_in_doc_sort, s_document_name, d_document_start, d_document_end,
s_document_type, s_library_type, s_library_loc, s_library_type_code, s_library_loc_id,
s_mlgb_book_id, s_entry_letter, s_document_start_year, s_document_end_year,
s_document_date_in_words) = extract_from_result( row )
curr_century = get_century_from_date( d_document_start )
if curr_century != prev_century:
new_century = True
new_document_code = True
elif curr_century == prev_century and s_document_code != prev_document_code:
new_document_code = True
if new_century:
if prev_century:
html += '</table><!-- end list of catalogue entries (C) -->' + newline
html += '</ul><!-- end document list (B) -->' + newline
html += '</li><!-- end century list-item (A) -->' + newline
html += newline + '<li class="medieval_cat_result"><!-- start century list-item (A) -->' + newline
html += '<h3>' + get_century_desc( curr_century ) + '</h3>'
html += newline + '<ul><!-- start document list (B) -->' + newline
if new_document_code:
prev_copy_code = ''
if not new_century:
html += newline + '</table><!-- end list of catalogue entries (C) -->' + newline
html += newline + '</li><!-- end document list-item (B) -->' + newline
html += newline + '<li class="medieval_cat_result"><!-- start document list-item( B) -->' + newline
if s_document_code and s_document_name:
html += get_library_link( s_library_type_code, s_library_type, s_library_loc_id, s_library_loc )
html += ': ' + get_document_link( s_document_code, s_document_name, s_document_type )
else:
html += '[no document found]'
html += newline + '<table class="century">'
html += '<!-- start list of catalogue entries (C) -->' + newline
if sql_copy_count:
if s_copy_code != prev_copy_code:
html += newline
html += '<tr><!-- start catalogue entry table row (C) -->'
html += '<td class="medieval_cat_result"><em>'
html += s_document_date_in_words
html += '</em></td>'
html += newline
html += '<td class="medieval_cat_result">'
html += newline
hover_library = s_library_type
if not s_library_type.endswith( s_library_loc ):
hover_library += ': %s' % s_library_loc
html += get_copy_string( s_copy_code, s_copy_notes, s_mlgb_book_id, \
hover_library, s_document_name )
html += '<br />'
html += get_entry_name_and_biblio_string( solr_id, s_entry_name, s_entry_xref_name, \
s_entry_biblio_line, s_entry_biblio_block,\
sql_entry_id, s_entry_letter )
if s_title_of_book.strip() == s_entry_name.strip():
s_title_of_book = ''
if s_title_of_book and not s_entry_biblio_block: html += '<br />'
html += get_book_title_and_biblio_string( s_title_of_book, s_xref_title_of_book, s_role_in_book, \
s_problem, s_book_biblio_line )
html += newline + '<ul><!-- further details list (D) -->' + newline
html += get_flags_string( s_survives_yn, s_printed_yn, s_uncertain_yn, s_duplicate_title_yn )
html += newline + '</ul><!-- end further details list (D) -->' + newline
html += newline + '</td></tr><!-- end catalogue entry row (C) -->' + newline
else: html += '<tr><td></td><td class="medieval_cat_result">'
html += '<!-- start cross-reference entry (C) -->'
html += newline
html += get_entry_name_and_biblio_string( solr_id, s_entry_name, s_entry_xref_name, \
s_entry_biblio_line, s_entry_biblio_block,\
sql_entry_id, s_entry_letter )
if s_title_of_book.strip() == s_entry_name.strip():
s_title_of_book = ''
if s_title_of_book and not s_entry_biblio_block: html += '<br />'
html += get_book_title_and_biblio_string( s_title_of_book, s_xref_title_of_book, s_role_in_book, \
s_problem, s_book_biblio_line )
html += newline + '</td></tr><!-- end cross-reference entry (C) -->' + newline
prev_century = curr_century
prev_document_code = s_document_code
prev_copy_code = s_copy_code
html += newline
html += '</table><!-- end list of catalogue entries (C) -->' + newline
html += '</ul><!-- end list of documents (B) -->' + newline
html += '</li><!-- end century list-item (A) -->' + newline
html += '</ul><!-- end list of centuries (A) -->' + newline
return html
def get_century_from_date( date_string ):
the_year = ''
date_string = str( date_string )
if len( date_string ) >= 4: the_year = date_string[ 0 : 4 ]
if not the_year.isdigit(): return 'undated'
if the_year.startswith( '0' ): the_year = the_year[ 1 : ]
century = int( math.floor( int( the_year ) / 100 ) + 1 )
return str( century )
def get_century_desc( century ):
if century.isdigit():
if int( century ) >= 20: else:
century_desc = '%sth century' % century
elif century.lower() == 'undated':
century_desc = 'Undated'
return century_desc
s_entry_biblio_line, s_entry_biblio_block,\
sql_entry_id, s_entry_letter ):
if s_entry_letter == 'I/J': s_entry_letter = 'IJ'
entry_href = '%s/browse/%s/#entry%s_anchor' % (baseurl, s_entry_letter, sql_entry_id)
html = '<a href="%s" title="%s">' % (entry_href, s_entry_name)
html += s_entry_name
html += '</a>'
if s_entry_xref_name: html += ' %s %s' % (right_arrow, s_entry_xref_name)
if s_entry_biblio_line: html += ': ' + s_entry_biblio_line + newline
if s_entry_biblio_block:
display_chars = s_entry_biblio_block.replace( '<span class="biblio_block">', "" )
display_chars = display_chars.replace( '</span>', "" )
if len( display_chars ) > biblio_block_line_length:
show_biblio_block = False
else:
show_biblio_block = True
if show_biblio_block:
html += newline + '<div>'
html += s_entry_biblio_block
html += '</div>' + newline
else:
pointing_at = 'bibliographical details'
html += newline + '<script type="text/javascript">' + newline
html += "function expand_collapse_biblio_block_%s() {" % solr_id
html += newline
html += ' var the_block = document.getElementById( "biblio_block_%s" );' % solr_id
html += newline
html += ' var the_button = document.getElementById( "biblio_button_%s" );' % solr_id
html += newline
html += ' if( the_block.style.display == "block" ) {'
html += newline
html += ' the_block.style.display = "none";'
html += newline
html += " the_button.innerHTML = '%s';" % mv.manicule_pointing_right_img( pointing_at )
html += newline
html += ' }'
html += newline
html += ' else {'
html += newline
html += ' the_block.style.display = "block";'
html += newline
html += " the_button.innerHTML = '%s';" % mv.manicule_pointing_down_img( pointing_at )
html += newline
html += ' }'
html += newline
html += '}'
html += newline
html += '</script>' + newline
html += '<button id="biblio_button_%s" ' % solr_id
html += ' class="manicule" onclick="expand_collapse_biblio_block_%s()" >' % solr_id
html += mv.manicule_pointing_right_img( pointing_at )
html += '</button>' + newline
html += '<br />' + newline
html += '<div id="biblio_block_%s" style="display:none">' % solr_id
html += s_entry_biblio_block
html += '<p></p>' + newline
html += '</div>'
html += newline
return html
def get_book_title_and_biblio_string( s_title_of_book, s_xref_title_of_book, s_role_in_book, \
s_problem, s_book_biblio_line ):
html = ''
if s_problem: html += s_problem + ' '
if s_role_in_book: html += s_role_in_book + ' '
if s_title_of_book and s_title_of_book.strip() != s_xref_title_of_book.strip():
html += s_title_of_book
if s_book_biblio_line: html += ": " + s_book_biblio_line
if s_xref_title_of_book: html += "%s %s" % (right_arrow, s_xref_title_of_book)
return html
def get_flags_string( s_survives_yn, s_printed_yn, s_uncertain_yn, s_duplicate_title_yn ):
html = ''
if s_survives_yn == 'y':
html += '<li>Surviving book</li>' + newline
if s_printed_yn == 'y':
html += '<li>Printed book</li>' + newline
if s_uncertain_yn == 'y':
html += '<li>Uncertain identification</li>' + newline
if s_duplicate_title_yn == 'y':
html += '<li>Could refer to one of several works with the same title</li>' + newline
return html
def get_copy_string( s_copy_code, s_copy_notes, s_mlgb_book_id, \
hover_title_part_1 = '', hover_title_part_2 = '' ):
html = ''
editable_link = ''
if editable: editable_link = '/e'
hover_title = hover_title_part_1
if hover_title_part_2.strip() and hover_title_part_2.strip() != hover_title_part_1.strip():
hover_title += ' -- %s' % hover_title_part_2
hover_title = hover_title.replace( '<i>', '' )
hover_title = hover_title.replace( '</i>', '' )
hover_title = hover_title.replace( '"', "'" )
onclick_title = hover_title.replace( newline, ' ' )
onclick_title = onclick_title.replace( carriage_return, '' )
onclick_title = onclick_title.replace( "'", "\\'" )
# Either start a link to the MLGB book record...
for book_id in s_mlgb_book_id: #{
html += '<a href="%s%s/%s/" ' % (editable_link, mlgb_book_url, book_id)
html += ' title="Further details of book" '
html += ' class="link_from_index_to_book">'
html += s_copy_code
html += '</a> '
#}
# Or start a span which you can hover over and get a bit more info.
if not html: #{
html += '<span title="%s" class="index_catalogue_entry" ' % hover_title
html += ' onclick="alert(' + "'" + onclick_title + "'" + ')">'
html += s_copy_code
html += '</span>'
#}
# Add description/notes if there are any,
# e.g. 'sermones Ailmeri prioris in glosis' or '(1 copy) = K5.7'
if s_copy_notes.strip(): html += ' %s' % s_copy_notes
return html
#}
# end get_copy_string()
#--------------------------------------------------------------------------------
def get_library_link( library_type_code, library_type_name, library_loc_id, library_loc_name ): #{
if not library_type_code or not library_type_name:
return '[no library found]'
html = ''
editable_link = ''
if editable: editable_link = '/e'
library_type_url = "%s%s/source/%s/" % (editable_link, medieval_catalogues_url, library_type_code)
html += '<a href="%s" title="%s">%s</a>' % (library_type_url, library_type_name, library_type_name)
if library_loc_id and library_loc_name: #{
if not library_type_name.endswith( library_loc_name ): #{ e.g HENRY DE KIRKESTEDE gets repeated twice
library_loc_url = "%s%s/" % (library_type_url, library_loc_id)
html += ': <a href="%s" title="%s">%s</a>' % (library_loc_url, library_loc_name, library_loc_name)
#}
#}
return html
#}
#--------------------------------------------------------------------------------
def get_document_link( document_code, document_name, s_document_type = '' ): #{
if not document_code or not document_name: return ''
html = ''
editable_link = ''
if editable: editable_link = '/e'
url = "%s%s/%s/" % (editable_link, medieval_catalogues_url, document_code)
html += '<a href="%s" title="%s">%s</a>' % (url, document_name, document_name)
# Was going to show document type, but that's unnecessary (it's already given in document name)
#if s_document_type and s_document_type != 'undefined': #{
#html += ' [type of list: %s]' % s_document_type
#}
return html
#}
#--------------------------------------------------------------------------------
def get_doctype_dropdown_options(): #{
# Get a list of document types for a dropdown list
doctypes = [ "" ]
the_cursor = connection.cursor()
statement = "select distinct document_type from index_medieval_documents order by document_type"
the_cursor.execute( statement )
sql_doctypes = the_cursor.fetchall()
for sql_row in sql_doctypes:
doctypes.append( sql_row[ 0 ] )
the_cursor.close()
return doctypes
#}
#--------------------------------------------------------------------------------
| true | true |
f72142840be762476a0be5e21baec4a6ef055bf3 | 939 | py | Python | lists/models.py | danrneal/superlists | d8e956720429915eaee732020a2c51b884a3d143 | [
"MIT"
] | null | null | null | lists/models.py | danrneal/superlists | d8e956720429915eaee732020a2c51b884a3d143 | [
"MIT"
] | null | null | null | lists/models.py | danrneal/superlists | d8e956720429915eaee732020a2c51b884a3d143 | [
"MIT"
] | null | null | null | from django.core.urlresolvers import reverse
from django.conf import settings
from django.db import models
class List(models.Model):
owner = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True)
shared_with = models.ManyToManyField(
settings.AUTH_USER_MODEL, related_name='shared_lists'
)
@property
def name(self):
return self.item_set.first().text
def get_absolute_url(self):
return reverse('view_list', args=[self.id])
@staticmethod
def create_new(first_item_text, owner=None):
list_ = List.objects.create(owner=owner)
Item.objects.create(text=first_item_text, list=list_)
return list_
class Item(models.Model):
text = models.TextField(default='')
list = models.ForeignKey(List, default=None)
class Meta:
ordering = ('id',)
unique_together = ('list', 'text')
def __str__(self):
return self.text
| 26.083333 | 78 | 0.681576 | from django.core.urlresolvers import reverse
from django.conf import settings
from django.db import models
class List(models.Model):
owner = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True)
shared_with = models.ManyToManyField(
settings.AUTH_USER_MODEL, related_name='shared_lists'
)
@property
def name(self):
return self.item_set.first().text
def get_absolute_url(self):
return reverse('view_list', args=[self.id])
@staticmethod
def create_new(first_item_text, owner=None):
list_ = List.objects.create(owner=owner)
Item.objects.create(text=first_item_text, list=list_)
return list_
class Item(models.Model):
text = models.TextField(default='')
list = models.ForeignKey(List, default=None)
class Meta:
ordering = ('id',)
unique_together = ('list', 'text')
def __str__(self):
return self.text
| true | true |
f721429b03aedf2a6362c8a4270184f7d7d464c4 | 4,555 | py | Python | project/ionicv1/main.py | Bhanditz/JavaScriptEnhancements | f87ff0ae9dba99bab69bf4fe4e73ca29d198f81e | [
"MIT"
] | null | null | null | project/ionicv1/main.py | Bhanditz/JavaScriptEnhancements | f87ff0ae9dba99bab69bf4fe4e73ca29d198f81e | [
"MIT"
] | null | null | null | project/ionicv1/main.py | Bhanditz/JavaScriptEnhancements | f87ff0ae9dba99bab69bf4fe4e73ca29d198f81e | [
"MIT"
] | null | null | null | import sublime, sublime_plugin
import os, webbrowser, shlex, json, collections
def ionicv1_ask_custom_path(project_path, type):
sublime.active_window().show_input_panel("Ionic v1 CLI custom path", "ionic", lambda ionicv1_custom_path: ionicv1_prepare_project(project_path, ionicv1_custom_path) if type == "create_new_project" or type == "add_project_type" else add_ionicv1_settings(project_path, ionicv1_custom_path), None, None)
def add_ionicv1_settings(working_directory, ionicv1_custom_path):
project_path = working_directory
settings = get_project_settings()
if settings :
project_path = settings["project_dir_name"]
flowconfig_file_path = os.path.join(project_path, ".flowconfig")
with open(flowconfig_file_path, 'r+', encoding="utf-8") as file:
content = file.read()
content = content.replace("[ignore]", """[ignore]
<PROJECT_ROOT>/platforms/.*
<PROJECT_ROOT>/hooks/.*
<PROJECT_ROOT>/plugins/.*
<PROJECT_ROOT>/resources/.*""")
file.seek(0)
file.truncate()
file.write(content)
PROJECT_SETTINGS_FOLDER_PATH = os.path.join(project_path, PROJECT_SETTINGS_FOLDER_NAME)
default_config = json.loads(open(os.path.join(PROJECT_FOLDER, "ionicv1", "default_config.json")).read(), object_pairs_hook=collections.OrderedDict)
default_config["working_directory"] = working_directory
default_config["cli_custom_path"] = ionicv1_custom_path
ionicv1_settings = os.path.join(PROJECT_SETTINGS_FOLDER_PATH, "ionicv1_settings.json")
with open(ionicv1_settings, 'w+') as file:
file.write(json.dumps(default_config, indent=2))
def ionicv1_prepare_project(project_path, ionicv1_custom_path):
terminal = Terminal(cwd=project_path)
if sublime.platform() != "windows":
open_project = ["&&", shlex.quote(sublime_executable_path()), shlex.quote(get_project_settings(project_path)["project_file_name"])] if not is_project_open(get_project_settings(project_path)["project_file_name"]) else []
terminal.run([shlex.quote(ionicv1_custom_path), "start", "myApp", "blank", "--type", "ionic1", ";", "mv", "./myApp/{.[!.],}*", "./", ";", "rm", "-rf", "myApp"] + open_project)
else:
open_project = [sublime_executable_path(), get_project_settings(project_path)["project_file_name"], "&&", "exit"] if not is_project_open(get_project_settings(project_path)["project_file_name"]) else []
terminal.run([ionicv1_custom_path, "start", "myApp", "blank", "--type", "ionic1", "&", os.path.join(WINDOWS_BATCH_FOLDER, "move_all.bat"), "myApp", ".", "&", "rd", "/s", "/q", "myApp"])
if open_project:
terminal.run(open_project)
add_ionicv1_settings(project_path, ionicv1_custom_path)
Hook.add("ionicv1_after_create_new_project", ionicv1_ask_custom_path)
Hook.add("ionicv1_add_javascript_project_configuration", ionicv1_ask_custom_path)
Hook.add("ionicv1_add_javascript_project_type", ionicv1_ask_custom_path)
class enable_menu_ionicv1EventListener(enable_menu_project_typeEventListener):
project_type = "ionicv1"
path = os.path.join(PROJECT_FOLDER, "ionicv1", "Main.sublime-menu")
path_disabled = os.path.join(PROJECT_FOLDER, "ionicv1", "Main_disabled.sublime-menu")
class ionicv1_cliCommand(manage_cliCommand):
cli = "ionic"
custom_name = "ionicv1"
settings_name = "ionicv1_settings"
def prepare_command(self, **kwargs):
if ":platform" in self.command:
self.window.show_input_panel("Platform:", "", self.platform_on_done, None, None)
else :
self._run()
def platform_on_done(self, platform):
self.placeholders[":platform"] = shlex.quote(platform.strip())
self.command = self.substitute_placeholders(self.command)
self._run()
def _run(self):
try:
self.command = {
'run': lambda : self.command + self.settings["ionicv1_settings"]["platform_run_options"][self.command[2].replace('--', '')][self.command[1]],
'compile': lambda : self.command + self.settings["ionicv1_settings"]["platform_compile_options"][self.command[2].replace('--', '')][self.command[1]],
'build': lambda : self.command + self.settings["ionicv1_settings"]["platform_build_options"][self.command[2].replace('--', '')][self.command[1]],
'prepare': lambda : self.command + self.settings["ionicv2_settings"]["platform_prepare_options"][self.command[1]],
'serve': lambda : self.command + self.settings["ionicv1_settings"]["serve_options"]
}[self.command[0]]()
except KeyError as err:
pass
except Exception as err:
print(traceback.format_exc())
pass
super(ionicv1_cliCommand, self)._run()
| 47.947368 | 304 | 0.730626 | import sublime, sublime_plugin
import os, webbrowser, shlex, json, collections
def ionicv1_ask_custom_path(project_path, type):
sublime.active_window().show_input_panel("Ionic v1 CLI custom path", "ionic", lambda ionicv1_custom_path: ionicv1_prepare_project(project_path, ionicv1_custom_path) if type == "create_new_project" or type == "add_project_type" else add_ionicv1_settings(project_path, ionicv1_custom_path), None, None)
def add_ionicv1_settings(working_directory, ionicv1_custom_path):
project_path = working_directory
settings = get_project_settings()
if settings :
project_path = settings["project_dir_name"]
flowconfig_file_path = os.path.join(project_path, ".flowconfig")
with open(flowconfig_file_path, 'r+', encoding="utf-8") as file:
content = file.read()
content = content.replace("[ignore]", """[ignore]
<PROJECT_ROOT>/platforms/.*
<PROJECT_ROOT>/hooks/.*
<PROJECT_ROOT>/plugins/.*
<PROJECT_ROOT>/resources/.*""")
file.seek(0)
file.truncate()
file.write(content)
PROJECT_SETTINGS_FOLDER_PATH = os.path.join(project_path, PROJECT_SETTINGS_FOLDER_NAME)
default_config = json.loads(open(os.path.join(PROJECT_FOLDER, "ionicv1", "default_config.json")).read(), object_pairs_hook=collections.OrderedDict)
default_config["working_directory"] = working_directory
default_config["cli_custom_path"] = ionicv1_custom_path
ionicv1_settings = os.path.join(PROJECT_SETTINGS_FOLDER_PATH, "ionicv1_settings.json")
with open(ionicv1_settings, 'w+') as file:
file.write(json.dumps(default_config, indent=2))
def ionicv1_prepare_project(project_path, ionicv1_custom_path):
terminal = Terminal(cwd=project_path)
if sublime.platform() != "windows":
open_project = ["&&", shlex.quote(sublime_executable_path()), shlex.quote(get_project_settings(project_path)["project_file_name"])] if not is_project_open(get_project_settings(project_path)["project_file_name"]) else []
terminal.run([shlex.quote(ionicv1_custom_path), "start", "myApp", "blank", "--type", "ionic1", ";", "mv", "./myApp/{.[!.],}*", "./", ";", "rm", "-rf", "myApp"] + open_project)
else:
open_project = [sublime_executable_path(), get_project_settings(project_path)["project_file_name"], "&&", "exit"] if not is_project_open(get_project_settings(project_path)["project_file_name"]) else []
terminal.run([ionicv1_custom_path, "start", "myApp", "blank", "--type", "ionic1", "&", os.path.join(WINDOWS_BATCH_FOLDER, "move_all.bat"), "myApp", ".", "&", "rd", "/s", "/q", "myApp"])
if open_project:
terminal.run(open_project)
add_ionicv1_settings(project_path, ionicv1_custom_path)
Hook.add("ionicv1_after_create_new_project", ionicv1_ask_custom_path)
Hook.add("ionicv1_add_javascript_project_configuration", ionicv1_ask_custom_path)
Hook.add("ionicv1_add_javascript_project_type", ionicv1_ask_custom_path)
class enable_menu_ionicv1EventListener(enable_menu_project_typeEventListener):
project_type = "ionicv1"
path = os.path.join(PROJECT_FOLDER, "ionicv1", "Main.sublime-menu")
path_disabled = os.path.join(PROJECT_FOLDER, "ionicv1", "Main_disabled.sublime-menu")
class ionicv1_cliCommand(manage_cliCommand):
cli = "ionic"
custom_name = "ionicv1"
settings_name = "ionicv1_settings"
def prepare_command(self, **kwargs):
if ":platform" in self.command:
self.window.show_input_panel("Platform:", "", self.platform_on_done, None, None)
else :
self._run()
def platform_on_done(self, platform):
self.placeholders[":platform"] = shlex.quote(platform.strip())
self.command = self.substitute_placeholders(self.command)
self._run()
def _run(self):
try:
self.command = {
'run': lambda : self.command + self.settings["ionicv1_settings"]["platform_run_options"][self.command[2].replace('--', '')][self.command[1]],
'compile': lambda : self.command + self.settings["ionicv1_settings"]["platform_compile_options"][self.command[2].replace('--', '')][self.command[1]],
'build': lambda : self.command + self.settings["ionicv1_settings"]["platform_build_options"][self.command[2].replace('--', '')][self.command[1]],
'prepare': lambda : self.command + self.settings["ionicv2_settings"]["platform_prepare_options"][self.command[1]],
'serve': lambda : self.command + self.settings["ionicv1_settings"]["serve_options"]
}[self.command[0]]()
except KeyError as err:
pass
except Exception as err:
print(traceback.format_exc())
pass
super(ionicv1_cliCommand, self)._run()
| true | true |
f72142e5ac00cf950ce98fbca8180f0dd514c5e9 | 1,671 | py | Python | cn_proj/USocket.py | Carl-Rabbit/CS305-CN-Proj | d005c32674b7ce3f90e4099c536b3c914e133d7d | [
"Apache-2.0"
] | null | null | null | cn_proj/USocket.py | Carl-Rabbit/CS305-CN-Proj | d005c32674b7ce3f90e4099c536b3c914e133d7d | [
"Apache-2.0"
] | null | null | null | cn_proj/USocket.py | Carl-Rabbit/CS305-CN-Proj | d005c32674b7ce3f90e4099c536b3c914e133d7d | [
"Apache-2.0"
] | null | null | null | from socket import socket, AF_INET, SOCK_DGRAM, inet_aton, inet_ntoa
import time
sockets = {}
network = ('127.0.0.1', 12345)
def bytes_to_addr(bytes):
return inet_ntoa(bytes[:4]), int.from_bytes(bytes[4:8], 'big')
def addr_to_bytes(addr):
return inet_aton(addr[0]) + addr[1].to_bytes(4, 'big')
def get_sendto(id, rate=None):
if rate:
def sendto(data: bytes, addr):
time.sleep(len(data) / rate)
sockets[id].sendto(addr_to_bytes(addr) + data, network)
return sendto
else:
def sendto(data: bytes, addr):
sockets[id].sendto(addr_to_bytes(addr) + data, network)
return sendto
class UnreliableSocket:
def __init__(self, rate=None):
assert rate is None or rate > 0, 'Rate should be positive or None.'
sockets[id(self)] = socket(AF_INET, SOCK_DGRAM)
self.sendto = get_sendto(id(self), rate)
def bind(self, address: (str, int)):
sockets[id(self)].bind(address)
def recvfrom(self, bufsize) -> bytes:
data, frm = sockets[id(self)].recvfrom(bufsize)
addr = bytes_to_addr(data[:8])
if frm == network:
return data[8:], addr
else:
return self.recvfrom(bufsize)
def settimeout(self, value):
sockets[id(self)].settimeout(value)
def gettimeout(self):
return sockets[id(self)].gettimeout()
def setblocking(self, flag):
sockets[id(self)].setblocking(flag)
def getblocking(self):
sockets[id(self)].getblocking()
def getsockname(self):
return sockets[id(self)].getsockname()
def close(self):
sockets[id(self)].close()
| 26.109375 | 75 | 0.618791 | from socket import socket, AF_INET, SOCK_DGRAM, inet_aton, inet_ntoa
import time
sockets = {}
network = ('127.0.0.1', 12345)
def bytes_to_addr(bytes):
return inet_ntoa(bytes[:4]), int.from_bytes(bytes[4:8], 'big')
def addr_to_bytes(addr):
return inet_aton(addr[0]) + addr[1].to_bytes(4, 'big')
def get_sendto(id, rate=None):
if rate:
def sendto(data: bytes, addr):
time.sleep(len(data) / rate)
sockets[id].sendto(addr_to_bytes(addr) + data, network)
return sendto
else:
def sendto(data: bytes, addr):
sockets[id].sendto(addr_to_bytes(addr) + data, network)
return sendto
class UnreliableSocket:
def __init__(self, rate=None):
assert rate is None or rate > 0, 'Rate should be positive or None.'
sockets[id(self)] = socket(AF_INET, SOCK_DGRAM)
self.sendto = get_sendto(id(self), rate)
def bind(self, address: (str, int)):
sockets[id(self)].bind(address)
def recvfrom(self, bufsize) -> bytes:
data, frm = sockets[id(self)].recvfrom(bufsize)
addr = bytes_to_addr(data[:8])
if frm == network:
return data[8:], addr
else:
return self.recvfrom(bufsize)
def settimeout(self, value):
sockets[id(self)].settimeout(value)
def gettimeout(self):
return sockets[id(self)].gettimeout()
def setblocking(self, flag):
sockets[id(self)].setblocking(flag)
def getblocking(self):
sockets[id(self)].getblocking()
def getsockname(self):
return sockets[id(self)].getsockname()
def close(self):
sockets[id(self)].close()
| true | true |
f721433a67499332ba6e8d52379605bedd3d870c | 408 | py | Python | evan/services/mailer.py | eillarra/evan | befe0f8daedd1b1f629097110d92e68534e43da1 | [
"MIT"
] | null | null | null | evan/services/mailer.py | eillarra/evan | befe0f8daedd1b1f629097110d92e68534e43da1 | [
"MIT"
] | 20 | 2021-03-31T20:10:46.000Z | 2022-02-15T09:58:13.000Z | evan/services/mailer.py | eillarra/evan | befe0f8daedd1b1f629097110d92e68534e43da1 | [
"MIT"
] | null | null | null | from django.core.mail import EmailMultiAlternatives
from django.template.loader import render_to_string
from typing import List
def send_email(
*, from_email: str = "Evan <evan@ugent.be>", to: List[str], subject: str, template: str, context_data: dict
):
text_content = render_to_string(template, context_data)
msg = EmailMultiAlternatives(subject, text_content, from_email, to)
msg.send()
| 34 | 111 | 0.759804 | from django.core.mail import EmailMultiAlternatives
from django.template.loader import render_to_string
from typing import List
def send_email(
*, from_email: str = "Evan <evan@ugent.be>", to: List[str], subject: str, template: str, context_data: dict
):
text_content = render_to_string(template, context_data)
msg = EmailMultiAlternatives(subject, text_content, from_email, to)
msg.send()
| true | true |
f721457bba4d592a55104c5e37b8693bb3fe93c6 | 857 | py | Python | posts/migrations/0005_vote.py | MrRezoo/django-social-network | 253afed6f12ed5cb2c22066961ea3fa33727be20 | [
"MIT"
] | 1 | 2021-05-18T08:42:18.000Z | 2021-05-18T08:42:18.000Z | posts/migrations/0005_vote.py | MrRezoo/django-social-network | 253afed6f12ed5cb2c22066961ea3fa33727be20 | [
"MIT"
] | null | null | null | posts/migrations/0005_vote.py | MrRezoo/django-social-network | 253afed6f12ed5cb2c22066961ea3fa33727be20 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.3 on 2021-05-19 19:52
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('posts', '0004_alter_comment_reply'),
]
operations = [
migrations.CreateModel(
name='Vote',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='votes', to='posts.post')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='votes', to=settings.AUTH_USER_MODEL)),
],
),
]
| 34.28 | 140 | 0.654609 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('posts', '0004_alter_comment_reply'),
]
operations = [
migrations.CreateModel(
name='Vote',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='votes', to='posts.post')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='votes', to=settings.AUTH_USER_MODEL)),
],
),
]
| true | true |
f72145de142e44a5179e105ce79e68d8d169b232 | 7,927 | py | Python | test/functional/qtum_block_header.py | tongshiguanzi/O2O | c11983c922c83cdc97bd754d9f8a0d5a094f004f | [
"MIT"
] | 1 | 2020-07-22T08:45:28.000Z | 2020-07-22T08:45:28.000Z | test/functional/qtum_block_header.py | tongshiguanzi/O2O | c11983c922c83cdc97bd754d9f8a0d5a094f004f | [
"MIT"
] | null | null | null | test/functional/qtum_block_header.py | tongshiguanzi/O2O | c11983c922c83cdc97bd754d9f8a0d5a094f004f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.blocktools import *
from test_framework.mininode import *
from test_framework.address import *
from test_framework.qtum import *
import time
from test_framework.key import ECKey
from test_framework.script import *
import struct
import io
def find_unspent(node, amount):
for unspent in node.listunspent():
if unspent['amount'] == amount and unspent['spendable']:
return CTxIn(COutPoint(int(unspent['txid'], 16), unspent['vout']), nSequence=0)
assert(False)
class QtumBlockHeaderTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [[]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.nodes[0].add_p2p_connection(P2PDataStore())
self.nodes[0].p2p.wait_for_getheaders(timeout=5)
node = self.nodes[0]
#mocktime = 1490247077
#node.setmocktime(mocktime)
node.generate(10)
self.block_time = int(time.time())+20
for i in range(500):
self.tip = create_block(int(node.getbestblockhash(), 16), create_coinbase(node.getblockcount()+1), self.block_time+i)
self.tip.solve()
self.sync_blocks([self.tip])
#node.generate(COINBASE_MATURITY+50)
mocktime = COINBASE_MATURITY+50
spendable_addresses = []
# store some addresses to use later
for unspent in node.listunspent():
spendable_addresses.append(unspent['address'])
# first make sure that what is a valid block is accepted
coinbase = create_coinbase(node.getblockcount()+1)
coinbase.rehash()
self.tip = create_block(int(node.getbestblockhash(), 16), coinbase, int(time.time()+mocktime+100))
self.tip.hashMerkleRoot = self.tip.calc_merkle_root()
self.tip.solve()
self.sync_blocks([self.tip])
coinbase = create_coinbase(node.getblockcount()+1)
coinbase.rehash()
# A block that has an OP_CREATE tx, butwith an incorrect state root
"""
pragma solidity ^0.4.11;
contract Test {
function() payable {}
}
"""
tx_hex = node.createcontract("60606040523415600b57fe5b5b60398060196000396000f30060606040525b600b5b5b565b0000a165627a7a72305820693c4900c412f72a51f8c01a36d38d9038d822d953faf5a5b28e40ec6e1a25020029", 1000000, QTUM_MIN_GAS_PRICE_STR, spendable_addresses.pop(-1), False)['raw transaction']
f = io.BytesIO(hex_str_to_bytes(tx_hex))
tx = CTransaction()
tx.deserialize(f)
coinbase = create_coinbase(node.getblockcount()+1)
coinbase.rehash()
self.tip = create_block(int(node.getbestblockhash(), 16), coinbase, int(mocktime+200))
self.tip.vtx.append(tx)
self.tip.hashMerkleRoot = self.tip.calc_merkle_root()
self.tip.solve()
self.sync_blocks([self.tip], success=False, reconnect=True)
# Create a contract for use later.
"""
pragma solidity ^0.4.11;
contract Test {
function() payable {}
}
"""
contract_address = node.createcontract("60606040523415600b57fe5b5b60398060196000396000f30060606040525b600b5b5b565b0000a165627a7a72305820693c4900c412f72a51f8c01a36d38d9038d822d953faf5a5b28e40ec6e1a25020029")['address']
node.generate(1)
realHashUTXORoot = int(node.getblock(node.getbestblockhash())['hashUTXORoot'], 16)
realHashStateRoot = int(node.getblock(node.getbestblockhash())['hashStateRoot'], 16)
# A block with both an invalid hashStateRoot and hashUTXORoot
coinbase = create_coinbase(node.getblockcount()+1)
coinbase.rehash()
self.tip = create_block(int(node.getbestblockhash(), 16), coinbase, int(mocktime+300))
self.tip.hashUTXORoot = 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
self.tip.hashStateRoot = 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
self.tip.hashMerkleRoot = self.tip.calc_merkle_root()
self.tip.solve()
self.sync_blocks([self.tip], success=False, reconnect=True)
# A block with a tx, but without updated state hashes
tx_hex = node.sendtocontract(contract_address, "00", 1, 100000, QTUM_MIN_GAS_PRICE_STR, spendable_addresses.pop(-1), False)['raw transaction']
f = io.BytesIO(hex_str_to_bytes(tx_hex))
tx = CTransaction()
tx.deserialize(f)
coinbase = create_coinbase(node.getblockcount()+1)
coinbase.rehash()
self.tip = create_block(int(node.getbestblockhash(), 16), coinbase, int(mocktime+400))
self.tip.hashUTXORoot = realHashUTXORoot
self.tip.hashStateRoot = realHashStateRoot
self.tip.vtx.append(tx)
self.tip.hashMerkleRoot = self.tip.calc_merkle_root()
self.tip.solve()
self.sync_blocks([self.tip], success=False, reconnect=True)
# A block with an invalid hashUTXORoot
coinbase = create_coinbase(node.getblockcount()+1)
coinbase.rehash()
self.tip = create_block(int(node.getbestblockhash(), 16), coinbase, int(mocktime+500))
self.tip.hashStateRoot = realHashStateRoot
self.tip.hashUTXORoot = 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
self.tip.hashMerkleRoot = self.tip.calc_merkle_root()
self.tip.solve()
self.sync_blocks([self.tip], success=False, reconnect=True)
# A block with an invalid hashStateRoot
coinbase = create_coinbase(node.getblockcount()+1)
coinbase.rehash()
self.tip = create_block(int(node.getbestblockhash(), 16), coinbase, int(mocktime+600))
self.tip.hashUTXORoot = realHashUTXORoot
self.tip.hashStateRoot = 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
self.tip.hashMerkleRoot = self.tip.calc_merkle_root()
self.tip.solve()
self.sync_blocks([self.tip], success=False, reconnect=True)
# Verify that blocks with a correct hashStateRoot and hashUTXORoot are accepted.
coinbase = create_coinbase(node.getblockcount()+1)
coinbase.rehash()
self.tip = create_block(int(node.getbestblockhash(), 16), coinbase, int(mocktime+700))
self.tip.hashUTXORoot = realHashUTXORoot
self.tip.hashStateRoot = realHashStateRoot
self.tip.hashMerkleRoot = self.tip.calc_merkle_root()
self.tip.solve()
self.sync_blocks([self.tip])
def reconnect_p2p(self):
"""Tear down and bootstrap the P2P connection to the node.
The node gets disconnected several times in this test. This helper
method reconnects the p2p and restarts the network thread."""
self.nodes[0].disconnect_p2ps()
self.nodes[0].add_p2p_connection(P2PDataStore())
self.nodes[0].p2p.wait_for_getheaders(timeout=5)
def sync_blocks(self, blocks, success=True, reject_code=None, reject_reason=None, force_send=False, reconnect=False, timeout=5):
"""Sends blocks to test node. Syncs and verifies that tip has advanced to most recent block.
Call with success = False if the tip shouldn't advance to the most recent block."""
self.nodes[0].p2p.send_blocks_and_test(blocks, self.nodes[0], success=success, reject_reason=reject_reason, force_send=force_send, timeout=timeout, expect_disconnect=reconnect)
if reconnect:
self.reconnect_p2p()
if __name__ == '__main__':
QtumBlockHeaderTest().main()
| 44.284916 | 292 | 0.692444 |
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.blocktools import *
from test_framework.mininode import *
from test_framework.address import *
from test_framework.qtum import *
import time
from test_framework.key import ECKey
from test_framework.script import *
import struct
import io
def find_unspent(node, amount):
for unspent in node.listunspent():
if unspent['amount'] == amount and unspent['spendable']:
return CTxIn(COutPoint(int(unspent['txid'], 16), unspent['vout']), nSequence=0)
assert(False)
class QtumBlockHeaderTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [[]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.nodes[0].add_p2p_connection(P2PDataStore())
self.nodes[0].p2p.wait_for_getheaders(timeout=5)
node = self.nodes[0]
node.generate(10)
self.block_time = int(time.time())+20
for i in range(500):
self.tip = create_block(int(node.getbestblockhash(), 16), create_coinbase(node.getblockcount()+1), self.block_time+i)
self.tip.solve()
self.sync_blocks([self.tip])
mocktime = COINBASE_MATURITY+50
spendable_addresses = []
for unspent in node.listunspent():
spendable_addresses.append(unspent['address'])
coinbase = create_coinbase(node.getblockcount()+1)
coinbase.rehash()
self.tip = create_block(int(node.getbestblockhash(), 16), coinbase, int(time.time()+mocktime+100))
self.tip.hashMerkleRoot = self.tip.calc_merkle_root()
self.tip.solve()
self.sync_blocks([self.tip])
coinbase = create_coinbase(node.getblockcount()+1)
coinbase.rehash()
tx_hex = node.createcontract("60606040523415600b57fe5b5b60398060196000396000f30060606040525b600b5b5b565b0000a165627a7a72305820693c4900c412f72a51f8c01a36d38d9038d822d953faf5a5b28e40ec6e1a25020029", 1000000, QTUM_MIN_GAS_PRICE_STR, spendable_addresses.pop(-1), False)['raw transaction']
f = io.BytesIO(hex_str_to_bytes(tx_hex))
tx = CTransaction()
tx.deserialize(f)
coinbase = create_coinbase(node.getblockcount()+1)
coinbase.rehash()
self.tip = create_block(int(node.getbestblockhash(), 16), coinbase, int(mocktime+200))
self.tip.vtx.append(tx)
self.tip.hashMerkleRoot = self.tip.calc_merkle_root()
self.tip.solve()
self.sync_blocks([self.tip], success=False, reconnect=True)
contract_address = node.createcontract("60606040523415600b57fe5b5b60398060196000396000f30060606040525b600b5b5b565b0000a165627a7a72305820693c4900c412f72a51f8c01a36d38d9038d822d953faf5a5b28e40ec6e1a25020029")['address']
node.generate(1)
realHashUTXORoot = int(node.getblock(node.getbestblockhash())['hashUTXORoot'], 16)
realHashStateRoot = int(node.getblock(node.getbestblockhash())['hashStateRoot'], 16)
coinbase = create_coinbase(node.getblockcount()+1)
coinbase.rehash()
self.tip = create_block(int(node.getbestblockhash(), 16), coinbase, int(mocktime+300))
self.tip.hashUTXORoot = 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
self.tip.hashStateRoot = 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
self.tip.hashMerkleRoot = self.tip.calc_merkle_root()
self.tip.solve()
self.sync_blocks([self.tip], success=False, reconnect=True)
tx_hex = node.sendtocontract(contract_address, "00", 1, 100000, QTUM_MIN_GAS_PRICE_STR, spendable_addresses.pop(-1), False)['raw transaction']
f = io.BytesIO(hex_str_to_bytes(tx_hex))
tx = CTransaction()
tx.deserialize(f)
coinbase = create_coinbase(node.getblockcount()+1)
coinbase.rehash()
self.tip = create_block(int(node.getbestblockhash(), 16), coinbase, int(mocktime+400))
self.tip.hashUTXORoot = realHashUTXORoot
self.tip.hashStateRoot = realHashStateRoot
self.tip.vtx.append(tx)
self.tip.hashMerkleRoot = self.tip.calc_merkle_root()
self.tip.solve()
self.sync_blocks([self.tip], success=False, reconnect=True)
coinbase = create_coinbase(node.getblockcount()+1)
coinbase.rehash()
self.tip = create_block(int(node.getbestblockhash(), 16), coinbase, int(mocktime+500))
self.tip.hashStateRoot = realHashStateRoot
self.tip.hashUTXORoot = 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
self.tip.hashMerkleRoot = self.tip.calc_merkle_root()
self.tip.solve()
self.sync_blocks([self.tip], success=False, reconnect=True)
coinbase = create_coinbase(node.getblockcount()+1)
coinbase.rehash()
self.tip = create_block(int(node.getbestblockhash(), 16), coinbase, int(mocktime+600))
self.tip.hashUTXORoot = realHashUTXORoot
self.tip.hashStateRoot = 0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
self.tip.hashMerkleRoot = self.tip.calc_merkle_root()
self.tip.solve()
self.sync_blocks([self.tip], success=False, reconnect=True)
coinbase = create_coinbase(node.getblockcount()+1)
coinbase.rehash()
self.tip = create_block(int(node.getbestblockhash(), 16), coinbase, int(mocktime+700))
self.tip.hashUTXORoot = realHashUTXORoot
self.tip.hashStateRoot = realHashStateRoot
self.tip.hashMerkleRoot = self.tip.calc_merkle_root()
self.tip.solve()
self.sync_blocks([self.tip])
def reconnect_p2p(self):
self.nodes[0].disconnect_p2ps()
self.nodes[0].add_p2p_connection(P2PDataStore())
self.nodes[0].p2p.wait_for_getheaders(timeout=5)
def sync_blocks(self, blocks, success=True, reject_code=None, reject_reason=None, force_send=False, reconnect=False, timeout=5):
self.nodes[0].p2p.send_blocks_and_test(blocks, self.nodes[0], success=success, reject_reason=reject_reason, force_send=force_send, timeout=timeout, expect_disconnect=reconnect)
if reconnect:
self.reconnect_p2p()
if __name__ == '__main__':
QtumBlockHeaderTest().main()
| true | true |
f721490f7323d6f2e9dbcf3d61d3cb7972830a93 | 1,481 | py | Python | addons/destinations/create_cas_destination.py | paataugrekhelidze/model-management-resources | e3cc8719f349f9755690a4cf87f7e75574966e9c | [
"Apache-2.0"
] | 7 | 2020-02-21T02:43:07.000Z | 2021-04-13T15:09:37.000Z | addons/destinations/create_cas_destination.py | paataugrekhelidze/model-management-resources | e3cc8719f349f9755690a4cf87f7e75574966e9c | [
"Apache-2.0"
] | 11 | 2020-03-19T09:49:30.000Z | 2021-05-04T15:32:24.000Z | addons/destinations/create_cas_destination.py | paataugrekhelidze/model-management-resources | e3cc8719f349f9755690a4cf87f7e75574966e9c | [
"Apache-2.0"
] | 17 | 2020-02-17T23:42:37.000Z | 2021-06-16T12:24:49.000Z | # Copyright (c) 2020, SAS Institute Inc., Cary, NC, USA. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import sys
sys.path.append('..')
import mmAuthorization
import requests
import json
viya_host = "localhost"
port = ":8080"
host_url="http://" + viya_host + port
destination_url = host_url + "/modelPublish/destinations/"
mm_auth = mmAuthorization.mmAuthorization("myAuth")
# admin user id and password
admin_userId = "<SAS_user_admin_ID>"
user_passwd = "<SAS_user_password>"
# destination name
dest_name = "<my_CAS_destination_name>"
if admin_userId == "<SAS_user_admin_ID>":
print("You must replace the example values in this script with valid values before executing the script.")
exit(1)
admin_auth_token = mm_auth.get_auth_token(host_url, admin_userId, user_passwd)
destination_cas_headers = {
"If-Match":"false",
"Content-Type":"application/vnd.sas.models.publishing.destination.cas+json",
mmAuthorization.AUTHORIZATION_HEADER: mmAuthorization.AUTHORIZATION_TOKEN + admin_auth_token
}
# create new destination, expecting 201
print("Creating the " + dest_name + " destination...")
destination_attrs = {
"name":dest_name,
"destinationType":"cas",
"casServerName":"cas-shared-default",
"casLibrary" : "public",
"destinationTable" : "SAS_MODEL_TABLE"
}
destination = requests.post(destination_url,
data=json.dumps(destination_attrs), headers=destination_cas_headers)
print(destination)
| 27.943396 | 110 | 0.740041 |
import sys
sys.path.append('..')
import mmAuthorization
import requests
import json
viya_host = "localhost"
port = ":8080"
host_url="http://" + viya_host + port
destination_url = host_url + "/modelPublish/destinations/"
mm_auth = mmAuthorization.mmAuthorization("myAuth")
admin_userId = "<SAS_user_admin_ID>"
user_passwd = "<SAS_user_password>"
dest_name = "<my_CAS_destination_name>"
if admin_userId == "<SAS_user_admin_ID>":
print("You must replace the example values in this script with valid values before executing the script.")
exit(1)
admin_auth_token = mm_auth.get_auth_token(host_url, admin_userId, user_passwd)
destination_cas_headers = {
"If-Match":"false",
"Content-Type":"application/vnd.sas.models.publishing.destination.cas+json",
mmAuthorization.AUTHORIZATION_HEADER: mmAuthorization.AUTHORIZATION_TOKEN + admin_auth_token
}
print("Creating the " + dest_name + " destination...")
destination_attrs = {
"name":dest_name,
"destinationType":"cas",
"casServerName":"cas-shared-default",
"casLibrary" : "public",
"destinationTable" : "SAS_MODEL_TABLE"
}
destination = requests.post(destination_url,
data=json.dumps(destination_attrs), headers=destination_cas_headers)
print(destination)
| true | true |
f7214984dd02b1bc0eae58d46c4bc02d9ce3fa79 | 4,206 | py | Python | thimbles/charts/radar_chart.py | quidditymaster/thimbles | b122654a012f0eb4f043d1ee757f884707c97615 | [
"MIT"
] | null | null | null | thimbles/charts/radar_chart.py | quidditymaster/thimbles | b122654a012f0eb4f043d1ee757f884707c97615 | [
"MIT"
] | null | null | null | thimbles/charts/radar_chart.py | quidditymaster/thimbles | b122654a012f0eb4f043d1ee757f884707c97615 | [
"MIT"
] | null | null | null | """
http://matplotlib.org/examples/api/radar_chart.html
Example of creating a radar chart (a.k.a. a spider or star chart) [1]_.
Although this example allows a frame of either 'circle' or 'polygon', polygon
frames don't have proper gridlines (the lines are circles instead of polygons).
It's possible to get a polygon grid by setting GRIDLINE_INTERPOLATION_STEPS in
matplotlib.axis to the desired number of vertices, but the orientation of the
polygon is not aligned with the radial axes.
.. [1] http://en.wikipedia.org/wiki/Radar_chart
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.path import Path
from matplotlib.spines import Spine
from matplotlib.projections.polar import PolarAxes
from matplotlib.projections import register_projection
def radar_factory(num_vars, frame='circle'):
"""Create a radar chart with `num_vars` axes.
This function creates a RadarAxes projection and registers it.
Parameters
----------
num_vars : int
Number of variables for radar chart.
frame : {'circle' | 'polygon'}
Shape of frame surrounding axes.
"""
# calculate evenly-spaced axis angles
theta = 2*np.pi * np.linspace(0, 1-1./num_vars, num_vars)
# rotate theta such that the first axis is at the top
theta += np.pi/2
def draw_poly_patch(self):
verts = unit_poly_verts(theta)
return plt.Polygon(verts, closed=True, edgecolor='k')
def draw_circle_patch(self):
# unit circle centered on (0.5, 0.5)
return plt.Circle((0.5, 0.5), 0.5)
patch_dict = {'polygon': draw_poly_patch, 'circle': draw_circle_patch}
if frame not in patch_dict:
raise ValueError('unknown value for `frame`: %s' % frame)
class RadarAxes(PolarAxes):
name = 'radar'
# use 1 line segment to connect specified points
RESOLUTION = 1
# define draw_frame method
draw_patch = patch_dict[frame]
def fill(self, *args, **kwargs):
"""Override fill so that line is closed by default"""
closed = kwargs.pop('closed', True)
return super(RadarAxes, self).fill(closed=closed, *args, **kwargs)
def plot(self, *args, **kwargs):
"""Override plot so that line is closed by default"""
lines = super(RadarAxes, self).plot(*args, **kwargs)
for line in lines:
self._close_line(line)
def _close_line(self, line):
x, y = line.get_data()
# FIXME: markers at x[0], y[0] get doubled-up
if x[0] != x[-1]:
x = np.concatenate((x, [x[0]]))
y = np.concatenate((y, [y[0]]))
line.set_data(x, y)
def set_varlabels(self, labels):
self.set_thetagrids(theta * 180/np.pi, labels)
def _gen_axes_patch(self):
return self.draw_patch()
def _gen_axes_spines(self):
if frame == 'circle':
return PolarAxes._gen_axes_spines(self)
# The following is a hack to get the spines (i.e. the axes frame)
# to draw correctly for a polygon frame.
# spine_type must be 'left', 'right', 'top', 'bottom', or `circle`.
spine_type = 'circle'
verts = unit_poly_verts(theta)
# close off polygon by repeating first vertex
verts.append(verts[0])
path = Path(verts)
spine = Spine(self, spine_type, path)
spine.set_transform(self.transAxes)
return {'polar': spine}
register_projection(RadarAxes)
return theta
def unit_poly_verts(theta):
"""Return vertices of polygon for subplot axes.
This polygon is circumscribed by a unit circle centered at (0.5, 0.5)
"""
x0, y0, r = [0.5] * 3
verts = [(r*np.cos(t) + x0, r*np.sin(t) + y0) for t in theta]
return verts
if __name__ == "__main__":
n_spokes = 5
theta = radar_factory(n_spokes, frame="polygon")
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection="radar")
datapoints = np.random.random(n_spokes)
ax.plot(theta, datapoints)
ax.fill(theta, datapoints)
plt.show()
| 33.380952 | 79 | 0.622444 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.path import Path
from matplotlib.spines import Spine
from matplotlib.projections.polar import PolarAxes
from matplotlib.projections import register_projection
def radar_factory(num_vars, frame='circle'):
theta = 2*np.pi * np.linspace(0, 1-1./num_vars, num_vars)
theta += np.pi/2
def draw_poly_patch(self):
verts = unit_poly_verts(theta)
return plt.Polygon(verts, closed=True, edgecolor='k')
def draw_circle_patch(self):
return plt.Circle((0.5, 0.5), 0.5)
patch_dict = {'polygon': draw_poly_patch, 'circle': draw_circle_patch}
if frame not in patch_dict:
raise ValueError('unknown value for `frame`: %s' % frame)
class RadarAxes(PolarAxes):
name = 'radar'
RESOLUTION = 1
draw_patch = patch_dict[frame]
def fill(self, *args, **kwargs):
closed = kwargs.pop('closed', True)
return super(RadarAxes, self).fill(closed=closed, *args, **kwargs)
def plot(self, *args, **kwargs):
lines = super(RadarAxes, self).plot(*args, **kwargs)
for line in lines:
self._close_line(line)
def _close_line(self, line):
x, y = line.get_data()
if x[0] != x[-1]:
x = np.concatenate((x, [x[0]]))
y = np.concatenate((y, [y[0]]))
line.set_data(x, y)
def set_varlabels(self, labels):
self.set_thetagrids(theta * 180/np.pi, labels)
def _gen_axes_patch(self):
return self.draw_patch()
def _gen_axes_spines(self):
if frame == 'circle':
return PolarAxes._gen_axes_spines(self)
spine_type = 'circle'
verts = unit_poly_verts(theta)
verts.append(verts[0])
path = Path(verts)
spine = Spine(self, spine_type, path)
spine.set_transform(self.transAxes)
return {'polar': spine}
register_projection(RadarAxes)
return theta
def unit_poly_verts(theta):
x0, y0, r = [0.5] * 3
verts = [(r*np.cos(t) + x0, r*np.sin(t) + y0) for t in theta]
return verts
if __name__ == "__main__":
n_spokes = 5
theta = radar_factory(n_spokes, frame="polygon")
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection="radar")
datapoints = np.random.random(n_spokes)
ax.plot(theta, datapoints)
ax.fill(theta, datapoints)
plt.show()
| true | true |
f7214ba2fb5f78050521fcf3f80e3a68ce4d1155 | 1,826 | py | Python | smlb/feature_selection/feature_selector_sklearn.py | CitrineInformatics/smlb | 28a3689bd36aa8d51031b4faf7e2331bbd8148a9 | [
"Apache-2.0"
] | 6 | 2020-07-27T21:08:55.000Z | 2021-05-04T07:00:29.000Z | smlb/feature_selection/feature_selector_sklearn.py | CitrineInformatics/smlb | 28a3689bd36aa8d51031b4faf7e2331bbd8148a9 | [
"Apache-2.0"
] | 18 | 2020-09-01T00:47:04.000Z | 2021-09-15T22:16:56.000Z | smlb/feature_selection/feature_selector_sklearn.py | CitrineInformatics/smlb | 28a3689bd36aa8d51031b4faf7e2331bbd8148a9 | [
"Apache-2.0"
] | 2 | 2020-08-24T21:50:16.000Z | 2020-12-06T05:18:57.000Z | from smlb import (
params,
Data,
Features,
TabularData,
)
from smlb.feature_selection.selector_protocol_sklearn import SelectorProtocolSklearn
class FeatureSelectorSklearn(Features):
"""Base class for feature selection strategies that use one of scikit-learn's feature selection methods.
This class relies on a ``selector`` provided on initialization that provides ``fit`` and ``get_support`` methods
to select features from a dataset.
"""
def __init__(self, selector: SelectorProtocolSklearn, *args, **kwargs):
"""Initialize state.
Parameters:
selector: Feature selection method that provides ``fit`` and ``get_support`` methods.
"""
super().__init__(*args, **kwargs)
self._selector: SelectorProtocolSklearn = params.instance(
selector, SelectorProtocolSklearn
)
def fit(self, data: Data) -> "FeatureSelectorSklearn":
"""Fit the model with input ``data``.
Parameters:
data: data to fit
Returns:
the instance itself
"""
data = params.instance(data, Data)
n = data.num_samples
xtrain = params.real_matrix(data.samples(), nrows=n)
ytrain = params.real_vector(data.labels(), dimensions=n)
self._selector.fit(xtrain, ytrain)
return self
def apply(self, data: Data) -> TabularData:
"""Select features from the data.
Parameters:
data: data to select features from
Returns:
data with selected features
"""
data = params.instance(data, Data)
samples = params.real_matrix(data.samples())
support = self._selector.get_support()
selected = samples[:, support]
return TabularData(selected, data.labels())
| 28.984127 | 116 | 0.633078 | from smlb import (
params,
Data,
Features,
TabularData,
)
from smlb.feature_selection.selector_protocol_sklearn import SelectorProtocolSklearn
class FeatureSelectorSklearn(Features):
def __init__(self, selector: SelectorProtocolSklearn, *args, **kwargs):
super().__init__(*args, **kwargs)
self._selector: SelectorProtocolSklearn = params.instance(
selector, SelectorProtocolSklearn
)
def fit(self, data: Data) -> "FeatureSelectorSklearn":
data = params.instance(data, Data)
n = data.num_samples
xtrain = params.real_matrix(data.samples(), nrows=n)
ytrain = params.real_vector(data.labels(), dimensions=n)
self._selector.fit(xtrain, ytrain)
return self
def apply(self, data: Data) -> TabularData:
data = params.instance(data, Data)
samples = params.real_matrix(data.samples())
support = self._selector.get_support()
selected = samples[:, support]
return TabularData(selected, data.labels())
| true | true |
f7214d90b8586fc89f0cc957a7cf81ccb7d45c94 | 10,292 | py | Python | netket/sampler/metropolis_numpy.py | NetKet/netket | 96758e814fc3128e6821564d6cc2852bac40ecf2 | [
"Apache-2.0"
] | null | null | null | netket/sampler/metropolis_numpy.py | NetKet/netket | 96758e814fc3128e6821564d6cc2852bac40ecf2 | [
"Apache-2.0"
] | null | null | null | netket/sampler/metropolis_numpy.py | NetKet/netket | 96758e814fc3128e6821564d6cc2852bac40ecf2 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The NetKet Authors - All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from dataclasses import dataclass
from functools import partial
from typing import Any, Tuple, Callable
import numpy as np
from numba import jit
from jax import numpy as jnp
import jax
from netket.hilbert import AbstractHilbert
from netket.utils.mpi import mpi_sum, n_nodes
from netket.utils.types import PyTree
from netket.utils.deprecation import deprecated
import netket.jax as nkjax
from .metropolis import MetropolisSampler
@dataclass
class MetropolisNumpySamplerState:
σ: np.ndarray
"""Holds the current configuration."""
σ1: np.ndarray
"""Holds a proposed configuration (preallocation)."""
log_values: np.ndarray
"""Holds model(pars, σ) for the current σ (preallocation)."""
log_values_1: np.ndarray
"""Holds model(pars, σ1) for the last σ1 (preallocation)."""
log_prob_corr: np.ndarray
"""Holds optional acceptance correction (preallocation)."""
rule_state: Any
"""The optional state of the rule."""
rng: Any
"""A numpy random generator."""
n_steps_proc: int = 0
"""Number of moves performed along the chains in this process since the last reset."""
n_accepted_proc: int = 0
"""Number of accepted transitions among the chains in this process since the last reset."""
@property
def acceptance(self) -> float:
"""The fraction of accepted moves across all chains and MPI processes.
The rate is computed since the last reset of the sampler.
Will return None if no sampling has been performed since then.
"""
if self.n_steps == 0:
return None
return self.n_accepted / self.n_steps
@property
@deprecated(
"""Please use the attribute `.acceptance` instead of
`.acceptance_ratio`. The new attribute `.acceptance` returns the
acceptance ratio ∈ [0,1], instead of the current `acceptance_ratio`
returning a percentage, which is a bug."""
)
def acceptance_ratio(self) -> float:
"""DEPRECATED: Please use the attribute `.acceptance` instead of
`.acceptance_ratio`. The new attribute `.acceptance` returns the
acceptance ratio ∈ [0,1], instead of the current `acceptance_ratio`
returning a percentage, which is a bug.
The percentage of accepted moves across all chains and MPI processes.
The rate is computed since the last reset of the sampler.
Will return None if no sampling has been performed since then.
"""
return self.acceptance * 100
@property
def n_steps(self) -> int:
"""Total number of moves performed across all processes since the last reset."""
return self.n_steps_proc * n_nodes
@property
def n_accepted(self) -> int:
"""Total number of moves accepted across all processes since the last reset."""
return mpi_sum(self.n_accepted_proc)
def __repr__(self):
if self.n_steps > 0:
acc_string = "# accepted = {}/{} ({}%), ".format(
self.n_accepted, self.n_steps, self.acceptance * 100
)
else:
acc_string = ""
return f"MetropolisNumpySamplerState({acc_string}rng state={self.rng})"
@partial(jax.jit, static_argnums=0)
def apply_model(machine, pars, weights):
return machine.apply(pars, weights)
class MetropolisSamplerNumpy(MetropolisSampler):
"""
Metropolis-Hastings sampler for an Hilbert space according to a specific transition
rule executed on CPU through Numpy.
This sampler is equivalent to :ref:`netket.sampler.MetropolisSampler` but instead of
executing the whole sampling inside a jax-jitted function, only evaluates the forward
pass inside a jax-jitted function, while proposing new steps and accepting/rejecting
them is performed in numpy.
Because of Jax dispatch cost, and especially for small system, this sampler performs
poorly, while asymptotically it should have the same performance of standard Jax samplers.
However, some transition rules don't work on GPU, and some samplers (Hamiltonian) work
very poorly on jax so this is a good workaround.
See :ref:`netket.sampler.MetropolisSampler` for more informations.
"""
def _init_state(sampler, machine, parameters, key):
rgen = np.random.default_rng(np.asarray(key))
σ = np.zeros((sampler.n_batches, sampler.hilbert.size), dtype=sampler.dtype)
ma_out = jax.eval_shape(machine.apply, parameters, σ)
state = MetropolisNumpySamplerState(
σ=σ,
σ1=np.copy(σ),
log_values=np.zeros(sampler.n_batches, dtype=ma_out.dtype),
log_values_1=np.zeros(sampler.n_batches, dtype=ma_out.dtype),
log_prob_corr=np.zeros(
sampler.n_batches, dtype=nkjax.dtype_real(ma_out.dtype)
),
rng=rgen,
rule_state=sampler.rule.init_state(sampler, machine, parameters, rgen),
)
if not sampler.reset_chains:
key = jnp.asarray(
state.rng.integers(0, 1 << 32, size=2, dtype=np.uint32), dtype=np.uint32
)
state.σ = np.copy(
sampler.rule.random_state(sampler, machine, parameters, state, key)
)
return state
def _reset(sampler, machine, parameters, state):
if sampler.reset_chains:
# directly generate a PRNGKey which is a [2xuint32] array
key = jnp.asarray(
state.rng.integers(0, 1 << 32, size=2, dtype=np.uint32), dtype=np.uint32
)
state.σ = np.copy(
sampler.rule.random_state(sampler, machine, parameters, state, key)
)
state.rule_state = sampler.rule.reset(sampler, machine, parameters, state)
state.log_values = np.copy(apply_model(machine, parameters, state.σ))
state._accepted_samples = 0
state._total_samples = 0
return state
def _sample_next(sampler, machine, parameters, state):
σ = state.σ
σ1 = state.σ1
log_values = state.log_values
log_values_1 = state.log_values_1
log_prob_corr = state.log_prob_corr
mpow = sampler.machine_pow
rgen = state.rng
accepted = 0
for sweep in range(sampler.n_sweeps):
# Propose a new state using the transition kernel
# σp, log_prob_correction =
sampler.rule.transition(sampler, machine, parameters, state, state.rng, σ)
log_values_1 = np.asarray(apply_model(machine, parameters, σ1))
random_uniform = rgen.uniform(0, 1, size=σ.shape[0])
# Acceptance Kernel
accepted += acceptance_kernel(
σ,
σ1,
log_values,
log_values_1,
log_prob_corr,
mpow,
random_uniform,
)
state.n_steps_proc += sampler.n_sweeps * sampler.n_chains
state.n_accepted_proc += accepted
return state, state.σ
def _sample_chain(
sampler,
machine: Callable,
parameters: PyTree,
state: MetropolisNumpySamplerState,
chain_length: int,
) -> Tuple[jnp.ndarray, MetropolisNumpySamplerState]:
samples = np.empty(
(chain_length, sampler.n_chains, sampler.hilbert.size), dtype=sampler.dtype
)
for i in range(chain_length):
state, σ = sampler.sample_next(machine, parameters, state)
samples[i] = σ
return samples, state
def __repr__(sampler):
return (
"MetropolisSamplerNumpy("
+ "\n hilbert = {},".format(sampler.hilbert)
+ "\n rule = {},".format(sampler.rule)
+ "\n n_chains = {},".format(sampler.n_chains)
+ "\n machine_power = {},".format(sampler.machine_pow)
+ "\n reset_chains = {},".format(sampler.reset_chains)
+ "\n n_sweeps = {},".format(sampler.n_sweeps)
+ "\n dtype = {},".format(sampler.dtype)
+ ")"
)
def __str__(sampler):
return (
"MetropolisSamplerNumpy("
+ "rule = {}, ".format(sampler.rule)
+ "n_chains = {}, ".format(sampler.n_chains)
+ "machine_power = {}, ".format(sampler.machine_pow)
+ "n_sweeps = {}, ".format(sampler.n_sweeps)
+ "dtype = {})".format(sampler.dtype)
)
@jit(nopython=True)
def acceptance_kernel(
σ, σ1, log_values, log_values_1, log_prob_corr, machine_pow, random_uniform
):
accepted = 0
for i in range(σ.shape[0]):
prob = np.exp(
machine_pow * (log_values_1[i] - log_values[i]).real + log_prob_corr[i]
)
assert not math.isnan(prob)
if prob > random_uniform[i]:
log_values[i] = log_values_1[i]
σ[i] = σ1[i]
accepted += 1
return accepted
def MetropolisLocalNumpy(hilbert: AbstractHilbert, *args, **kwargs):
from .rules import LocalRuleNumpy
rule = LocalRuleNumpy()
return MetropolisSamplerNumpy(hilbert, rule, *args, **kwargs)
def MetropolisHamiltonianNumpy(hilbert: AbstractHilbert, hamiltonian, *args, **kwargs):
from .rules import HamiltonianRuleNumpy
rule = HamiltonianRuleNumpy(hamiltonian)
return MetropolisSamplerNumpy(hilbert, rule, *args, **kwargs)
def MetropolisCustomNumpy(
hilbert: AbstractHilbert, move_operators, move_weights=None, *args, **kwargs
):
from .rules import CustomRuleNumpy
rule = CustomRuleNumpy(move_operators, move_weights)
return MetropolisSamplerNumpy(hilbert, rule, *args, **kwargs)
| 33.744262 | 95 | 0.643412 |
import math
from dataclasses import dataclass
from functools import partial
from typing import Any, Tuple, Callable
import numpy as np
from numba import jit
from jax import numpy as jnp
import jax
from netket.hilbert import AbstractHilbert
from netket.utils.mpi import mpi_sum, n_nodes
from netket.utils.types import PyTree
from netket.utils.deprecation import deprecated
import netket.jax as nkjax
from .metropolis import MetropolisSampler
@dataclass
class MetropolisNumpySamplerState:
σ: np.ndarray
σ1: np.ndarray
log_values: np.ndarray
log_values_1: np.ndarray
log_prob_corr: np.ndarray
rule_state: Any
rng: Any
n_steps_proc: int = 0
n_accepted_proc: int = 0
@property
def acceptance(self) -> float:
if self.n_steps == 0:
return None
return self.n_accepted / self.n_steps
@property
@deprecated(
"""Please use the attribute `.acceptance` instead of
`.acceptance_ratio`. The new attribute `.acceptance` returns the
acceptance ratio ∈ [0,1], instead of the current `acceptance_ratio`
returning a percentage, which is a bug."""
)
def acceptance_ratio(self) -> float:
return self.acceptance * 100
@property
def n_steps(self) -> int:
return self.n_steps_proc * n_nodes
@property
def n_accepted(self) -> int:
return mpi_sum(self.n_accepted_proc)
def __repr__(self):
if self.n_steps > 0:
acc_string = "# accepted = {}/{} ({}%), ".format(
self.n_accepted, self.n_steps, self.acceptance * 100
)
else:
acc_string = ""
return f"MetropolisNumpySamplerState({acc_string}rng state={self.rng})"
@partial(jax.jit, static_argnums=0)
def apply_model(machine, pars, weights):
return machine.apply(pars, weights)
class MetropolisSamplerNumpy(MetropolisSampler):
def _init_state(sampler, machine, parameters, key):
rgen = np.random.default_rng(np.asarray(key))
σ = np.zeros((sampler.n_batches, sampler.hilbert.size), dtype=sampler.dtype)
ma_out = jax.eval_shape(machine.apply, parameters, σ)
state = MetropolisNumpySamplerState(
σ=σ,
σ1=np.copy(σ),
log_values=np.zeros(sampler.n_batches, dtype=ma_out.dtype),
log_values_1=np.zeros(sampler.n_batches, dtype=ma_out.dtype),
log_prob_corr=np.zeros(
sampler.n_batches, dtype=nkjax.dtype_real(ma_out.dtype)
),
rng=rgen,
rule_state=sampler.rule.init_state(sampler, machine, parameters, rgen),
)
if not sampler.reset_chains:
key = jnp.asarray(
state.rng.integers(0, 1 << 32, size=2, dtype=np.uint32), dtype=np.uint32
)
state.σ = np.copy(
sampler.rule.random_state(sampler, machine, parameters, state, key)
)
return state
def _reset(sampler, machine, parameters, state):
if sampler.reset_chains:
key = jnp.asarray(
state.rng.integers(0, 1 << 32, size=2, dtype=np.uint32), dtype=np.uint32
)
state.σ = np.copy(
sampler.rule.random_state(sampler, machine, parameters, state, key)
)
state.rule_state = sampler.rule.reset(sampler, machine, parameters, state)
state.log_values = np.copy(apply_model(machine, parameters, state.σ))
state._accepted_samples = 0
state._total_samples = 0
return state
def _sample_next(sampler, machine, parameters, state):
σ = state.σ
σ1 = state.σ1
log_values = state.log_values
log_values_1 = state.log_values_1
log_prob_corr = state.log_prob_corr
mpow = sampler.machine_pow
rgen = state.rng
accepted = 0
for sweep in range(sampler.n_sweeps):
sampler.rule.transition(sampler, machine, parameters, state, state.rng, σ)
log_values_1 = np.asarray(apply_model(machine, parameters, σ1))
random_uniform = rgen.uniform(0, 1, size=σ.shape[0])
accepted += acceptance_kernel(
σ,
σ1,
log_values,
log_values_1,
log_prob_corr,
mpow,
random_uniform,
)
state.n_steps_proc += sampler.n_sweeps * sampler.n_chains
state.n_accepted_proc += accepted
return state, state.σ
def _sample_chain(
sampler,
machine: Callable,
parameters: PyTree,
state: MetropolisNumpySamplerState,
chain_length: int,
) -> Tuple[jnp.ndarray, MetropolisNumpySamplerState]:
samples = np.empty(
(chain_length, sampler.n_chains, sampler.hilbert.size), dtype=sampler.dtype
)
for i in range(chain_length):
state, σ = sampler.sample_next(machine, parameters, state)
samples[i] = σ
return samples, state
def __repr__(sampler):
return (
"MetropolisSamplerNumpy("
+ "\n hilbert = {},".format(sampler.hilbert)
+ "\n rule = {},".format(sampler.rule)
+ "\n n_chains = {},".format(sampler.n_chains)
+ "\n machine_power = {},".format(sampler.machine_pow)
+ "\n reset_chains = {},".format(sampler.reset_chains)
+ "\n n_sweeps = {},".format(sampler.n_sweeps)
+ "\n dtype = {},".format(sampler.dtype)
+ ")"
)
def __str__(sampler):
return (
"MetropolisSamplerNumpy("
+ "rule = {}, ".format(sampler.rule)
+ "n_chains = {}, ".format(sampler.n_chains)
+ "machine_power = {}, ".format(sampler.machine_pow)
+ "n_sweeps = {}, ".format(sampler.n_sweeps)
+ "dtype = {})".format(sampler.dtype)
)
@jit(nopython=True)
def acceptance_kernel(
σ, σ1, log_values, log_values_1, log_prob_corr, machine_pow, random_uniform
):
accepted = 0
for i in range(σ.shape[0]):
prob = np.exp(
machine_pow * (log_values_1[i] - log_values[i]).real + log_prob_corr[i]
)
assert not math.isnan(prob)
if prob > random_uniform[i]:
log_values[i] = log_values_1[i]
σ[i] = σ1[i]
accepted += 1
return accepted
def MetropolisLocalNumpy(hilbert: AbstractHilbert, *args, **kwargs):
from .rules import LocalRuleNumpy
rule = LocalRuleNumpy()
return MetropolisSamplerNumpy(hilbert, rule, *args, **kwargs)
def MetropolisHamiltonianNumpy(hilbert: AbstractHilbert, hamiltonian, *args, **kwargs):
from .rules import HamiltonianRuleNumpy
rule = HamiltonianRuleNumpy(hamiltonian)
return MetropolisSamplerNumpy(hilbert, rule, *args, **kwargs)
def MetropolisCustomNumpy(
hilbert: AbstractHilbert, move_operators, move_weights=None, *args, **kwargs
):
from .rules import CustomRuleNumpy
rule = CustomRuleNumpy(move_operators, move_weights)
return MetropolisSamplerNumpy(hilbert, rule, *args, **kwargs)
| true | true |
f7214da5be355cbd7977e3d4b792fe2a9df91d2e | 393 | py | Python | LSTM/graphs/graph1.py | Anurag14/Inflow-prediction-Bhakra | d440ec552032084991878877ba5154ea2c452264 | [
"MIT"
] | 8 | 2019-05-29T09:07:25.000Z | 2021-12-28T13:53:50.000Z | LSTM/graphs/graph1.py | Anurag14/Inflow-prediction-Bhakra | d440ec552032084991878877ba5154ea2c452264 | [
"MIT"
] | null | null | null | LSTM/graphs/graph1.py | Anurag14/Inflow-prediction-Bhakra | d440ec552032084991878877ba5154ea2c452264 | [
"MIT"
] | 2 | 2019-06-02T17:48:48.000Z | 2022-03-13T01:29:16.000Z | import os
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
df=pd.read_csv('../data1.csv')
df=df.values
#time series vs reservoir levels(ft) graph
sns.set_style('darkgrid')
plt.plot(df[:,0],df[:,1],label="")
plt.plot(df[:,0],df[:,2])
plt.xlabel('Time Series')
plt.ylabel('Reservoir Levels(ft)')
plt.title('Dialy Bhakhra Reservoir Levels for past 20 years')
plt.show()
| 24.5625 | 61 | 0.725191 | import os
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
df=pd.read_csv('../data1.csv')
df=df.values
sns.set_style('darkgrid')
plt.plot(df[:,0],df[:,1],label="")
plt.plot(df[:,0],df[:,2])
plt.xlabel('Time Series')
plt.ylabel('Reservoir Levels(ft)')
plt.title('Dialy Bhakhra Reservoir Levels for past 20 years')
plt.show()
| true | true |
f7214e4b71ef6b1633236cc12a531f99e4afc41e | 7,828 | py | Python | Python_Discord_Bot_JE/venv/Lib/site-packages/discord/asset.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | Python_Discord_Bot_JE/venv/Lib/site-packages/discord/asset.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | Python_Discord_Bot_JE/venv/Lib/site-packages/discord/asset.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-2020 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import io
from .errors import DiscordException
from .errors import InvalidArgument
from . import utils
VALID_STATIC_FORMATS = frozenset({"jpeg", "jpg", "webp", "png"})
VALID_AVATAR_FORMATS = VALID_STATIC_FORMATS | {"gif"}
class Asset:
"""Represents a CDN asset on Discord.
.. container:: operations
.. describe:: str(x)
Returns the URL of the CDN asset.
.. describe:: len(x)
Returns the length of the CDN asset's URL.
.. describe:: bool(x)
Checks if the Asset has a URL.
.. describe:: x == y
Checks if the asset is equal to another asset.
.. describe:: x != y
Checks if the asset is not equal to another asset.
.. describe:: hash(x)
Returns the hash of the asset.
"""
__slots__ = ('_state', '_url')
BASE = 'https://cdn.discordapp.com'
def __init__(self, state, url=None):
self._state = state
self._url = url
@classmethod
def _from_avatar(cls, state, user, *, format=None, static_format='webp', size=1024):
if not utils.valid_icon_size(size):
raise InvalidArgument("size must be a power of 2 between 16 and 4096")
if format is not None and format not in VALID_AVATAR_FORMATS:
raise InvalidArgument("format must be None or one of {}".format(VALID_AVATAR_FORMATS))
if format == "gif" and not user.is_avatar_animated():
raise InvalidArgument("non animated avatars do not support gif format")
if static_format not in VALID_STATIC_FORMATS:
raise InvalidArgument("static_format must be one of {}".format(VALID_STATIC_FORMATS))
if user.avatar is None:
return user.default_avatar_url
if format is None:
format = 'gif' if user.is_avatar_animated() else static_format
return cls(state, '/avatars/{0.id}/{0.avatar}.{1}?size={2}'.format(user, format, size))
@classmethod
def _from_icon(cls, state, object, path):
if object.icon is None:
return cls(state)
url = '/{0}-icons/{1.id}/{1.icon}.jpg'.format(path, object)
return cls(state, url)
@classmethod
def _from_cover_image(cls, state, obj):
if obj.cover_image is None:
return cls(state)
url = '/app-assets/{0.id}/store/{0.cover_image}.jpg'.format(obj)
return cls(state, url)
@classmethod
def _from_guild_image(cls, state, id, hash, key, *, format='webp', size=1024):
if not utils.valid_icon_size(size):
raise InvalidArgument("size must be a power of 2 between 16 and 4096")
if format not in VALID_STATIC_FORMATS:
raise InvalidArgument("format must be one of {}".format(VALID_STATIC_FORMATS))
if hash is None:
return cls(state)
url = '/{key}/{0}/{1}.{2}?size={3}'
return cls(state, url.format(id, hash, format, size, key=key))
@classmethod
def _from_guild_icon(cls, state, guild, *, format=None, static_format='webp', size=1024):
if not utils.valid_icon_size(size):
raise InvalidArgument("size must be a power of 2 between 16 and 4096")
if format is not None and format not in VALID_AVATAR_FORMATS:
raise InvalidArgument("format must be one of {}".format(VALID_AVATAR_FORMATS))
if format == "gif" and not guild.is_icon_animated():
raise InvalidArgument("non animated guild icons do not support gif format")
if static_format not in VALID_STATIC_FORMATS:
raise InvalidArgument("static_format must be one of {}".format(VALID_STATIC_FORMATS))
if guild.icon is None:
return cls(state)
if format is None:
format = 'gif' if guild.is_icon_animated() else static_format
return cls(state, '/icons/{0.id}/{0.icon}.{1}?size={2}'.format(guild, format, size))
def __str__(self):
return self.BASE + self._url if self._url is not None else ''
def __len__(self):
if self._url:
return len(self.BASE + self._url)
return 0
def __bool__(self):
return self._url is not None
def __repr__(self):
return '<Asset url={0._url!r}>'.format(self)
def __eq__(self, other):
return isinstance(other, Asset) and self._url == other._url
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self._url)
async def read(self):
"""|coro|
Retrieves the content of this asset as a :class:`bytes` object.
.. warning::
:class:`PartialEmoji` won't have a connection state if user created,
and a URL won't be present if a custom image isn't associated with
the asset, e.g. a guild with no custom icon.
.. versionadded:: 1.1
Raises
------
DiscordException
There was no valid URL or internal connection state.
HTTPException
Downloading the asset failed.
NotFound
The asset was deleted.
Returns
-------
:class:`bytes`
The content of the asset.
"""
if not self._url:
raise DiscordException('Invalid asset (no URL provided)')
if self._state is None:
raise DiscordException('Invalid state (no ConnectionState provided)')
return await self._state.http.get_from_cdn(self.BASE + self._url)
async def save(self, fp, *, seek_begin=True):
"""|coro|
Saves this asset into a file-like object.
Parameters
----------
fp: Union[BinaryIO, :class:`os.PathLike`]
Same as in :meth:`Attachment.save`.
seek_begin: :class:`bool`
Same as in :meth:`Attachment.save`.
Raises
------
DiscordException
There was no valid URL or internal connection state.
HTTPException
Downloading the asset failed.
NotFound
The asset was deleted.
Returns
--------
:class:`int`
The number of bytes written.
"""
data = await self.read()
if isinstance(fp, io.IOBase) and fp.writable():
written = fp.write(data)
if seek_begin:
fp.seek(0)
return written
else:
with open(fp, 'wb') as f:
return f.write(data)
| 33.452991 | 99 | 0.605646 |
import io
from .errors import DiscordException
from .errors import InvalidArgument
from . import utils
VALID_STATIC_FORMATS = frozenset({"jpeg", "jpg", "webp", "png"})
VALID_AVATAR_FORMATS = VALID_STATIC_FORMATS | {"gif"}
class Asset:
__slots__ = ('_state', '_url')
BASE = 'https://cdn.discordapp.com'
def __init__(self, state, url=None):
self._state = state
self._url = url
@classmethod
def _from_avatar(cls, state, user, *, format=None, static_format='webp', size=1024):
if not utils.valid_icon_size(size):
raise InvalidArgument("size must be a power of 2 between 16 and 4096")
if format is not None and format not in VALID_AVATAR_FORMATS:
raise InvalidArgument("format must be None or one of {}".format(VALID_AVATAR_FORMATS))
if format == "gif" and not user.is_avatar_animated():
raise InvalidArgument("non animated avatars do not support gif format")
if static_format not in VALID_STATIC_FORMATS:
raise InvalidArgument("static_format must be one of {}".format(VALID_STATIC_FORMATS))
if user.avatar is None:
return user.default_avatar_url
if format is None:
format = 'gif' if user.is_avatar_animated() else static_format
return cls(state, '/avatars/{0.id}/{0.avatar}.{1}?size={2}'.format(user, format, size))
@classmethod
def _from_icon(cls, state, object, path):
if object.icon is None:
return cls(state)
url = '/{0}-icons/{1.id}/{1.icon}.jpg'.format(path, object)
return cls(state, url)
@classmethod
def _from_cover_image(cls, state, obj):
if obj.cover_image is None:
return cls(state)
url = '/app-assets/{0.id}/store/{0.cover_image}.jpg'.format(obj)
return cls(state, url)
@classmethod
def _from_guild_image(cls, state, id, hash, key, *, format='webp', size=1024):
if not utils.valid_icon_size(size):
raise InvalidArgument("size must be a power of 2 between 16 and 4096")
if format not in VALID_STATIC_FORMATS:
raise InvalidArgument("format must be one of {}".format(VALID_STATIC_FORMATS))
if hash is None:
return cls(state)
url = '/{key}/{0}/{1}.{2}?size={3}'
return cls(state, url.format(id, hash, format, size, key=key))
@classmethod
def _from_guild_icon(cls, state, guild, *, format=None, static_format='webp', size=1024):
if not utils.valid_icon_size(size):
raise InvalidArgument("size must be a power of 2 between 16 and 4096")
if format is not None and format not in VALID_AVATAR_FORMATS:
raise InvalidArgument("format must be one of {}".format(VALID_AVATAR_FORMATS))
if format == "gif" and not guild.is_icon_animated():
raise InvalidArgument("non animated guild icons do not support gif format")
if static_format not in VALID_STATIC_FORMATS:
raise InvalidArgument("static_format must be one of {}".format(VALID_STATIC_FORMATS))
if guild.icon is None:
return cls(state)
if format is None:
format = 'gif' if guild.is_icon_animated() else static_format
return cls(state, '/icons/{0.id}/{0.icon}.{1}?size={2}'.format(guild, format, size))
def __str__(self):
return self.BASE + self._url if self._url is not None else ''
def __len__(self):
if self._url:
return len(self.BASE + self._url)
return 0
def __bool__(self):
return self._url is not None
def __repr__(self):
return '<Asset url={0._url!r}>'.format(self)
def __eq__(self, other):
return isinstance(other, Asset) and self._url == other._url
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self._url)
async def read(self):
if not self._url:
raise DiscordException('Invalid asset (no URL provided)')
if self._state is None:
raise DiscordException('Invalid state (no ConnectionState provided)')
return await self._state.http.get_from_cdn(self.BASE + self._url)
async def save(self, fp, *, seek_begin=True):
data = await self.read()
if isinstance(fp, io.IOBase) and fp.writable():
written = fp.write(data)
if seek_begin:
fp.seek(0)
return written
else:
with open(fp, 'wb') as f:
return f.write(data)
| true | true |
f7214e57978a52886d37352d317afc38f1a60349 | 3,932 | py | Python | tests/unit/dataactvalidator/test_c23_award_financial_1.py | chambers-brian/SIG_Digital-Strategy_SI_ODP_Backend | 3de8cedf69d5a0c9fad8239734bd6291cf583936 | [
"CC0-1.0"
] | null | null | null | tests/unit/dataactvalidator/test_c23_award_financial_1.py | chambers-brian/SIG_Digital-Strategy_SI_ODP_Backend | 3de8cedf69d5a0c9fad8239734bd6291cf583936 | [
"CC0-1.0"
] | null | null | null | tests/unit/dataactvalidator/test_c23_award_financial_1.py | chambers-brian/SIG_Digital-Strategy_SI_ODP_Backend | 3de8cedf69d5a0c9fad8239734bd6291cf583936 | [
"CC0-1.0"
] | null | null | null | from random import choice
from string import ascii_uppercase, ascii_lowercase, digits
from tests.unit.dataactcore.factories.staging import AwardFinancialFactory, AwardProcurementFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'c23_award_financial_1'
def test_column_headers(database):
expected_subset = {"row_number", "transaction_obligated_amou_sum", "federal_action_obligation_sum"}
actual = set(query_columns(_FILE, database))
assert expected_subset <= actual
def test_success(database):
""" Test that a four digit object class with no flag is a success, and a three digit object class with a flag is a success"""
# Create a 12 character random piid
piid = ''.join(choice(ascii_uppercase + ascii_lowercase + digits) for i in range(12))
piid_two = ''.join(choice(ascii_uppercase + ascii_lowercase + digits) for i in range(12))
piid_three = ''.join(choice(ascii_uppercase + ascii_lowercase + digits) for i in range(12))
first_piid_row_one = AwardFinancialFactory(transaction_obligated_amou = 1100, piid = piid,
allocation_transfer_agency = None)
first_piid_row_two = AwardFinancialFactory(transaction_obligated_amou = 11, piid = piid,
allocation_transfer_agency = None)
# And add a row for a different piid
second_piid_row_one = AwardFinancialFactory(transaction_obligated_amou = 9999, piid = piid_two,
allocation_transfer_agency = None)
third_piid_row_one = AwardFinancialFactory(transaction_obligated_amou = 8888, piid = piid_three,
allocation_transfer_agency = 123)
third_piid_row_two = AwardFinancialFactory(transaction_obligated_amou = 8888, piid = piid_three,
allocation_transfer_agency = None)
first_ap_row = AwardProcurementFactory(piid = piid, federal_action_obligation = -1100)
second_ap_row = AwardProcurementFactory(piid = piid, federal_action_obligation = -10)
third_ap_row = AwardProcurementFactory(piid = piid, federal_action_obligation = -1)
second_piid_ap_row = AwardProcurementFactory(piid = piid_two, federal_action_obligation = -9999)
third_piid_ap_row = AwardProcurementFactory(piid = piid_three, federal_action_obligation = -9999)
errors = number_of_errors(_FILE, database, models=[first_piid_row_one, first_piid_row_two, second_piid_row_one,
third_piid_row_one, first_ap_row, second_ap_row, third_ap_row, second_piid_ap_row, third_piid_ap_row,
third_piid_row_two])
assert errors == 0
def test_failure(database):
""" Test that a three digit object class with no flag is an error"""
# Create a 12 character random piid
piid = ''.join(choice(ascii_uppercase + ascii_lowercase + digits) for i in range(12))
piid_two = ''.join(choice(ascii_uppercase + ascii_lowercase + digits) for i in range(12))
first_piid_row_one = AwardFinancialFactory(transaction_obligated_amou = 1100, piid = piid, allocation_transfer_agency = None)
first_piid_row_two = AwardFinancialFactory(transaction_obligated_amou = 11, piid = piid, allocation_transfer_agency = None)
# And add a row that shouldn't be included
second_piid_row_one = AwardFinancialFactory(transaction_obligated_amou = 9999, piid = piid_two, allocation_transfer_agency = None)
first_ap_row = AwardProcurementFactory(piid = piid, federal_action_obligation = -1100)
second_ap_row = AwardProcurementFactory(piid = piid, federal_action_obligation = -10)
other_piid_ap_row = AwardProcurementFactory(piid = piid_two, federal_action_obligation = -1111)
errors = number_of_errors(_FILE, database, models=[first_piid_row_one, first_piid_row_two, second_piid_row_one, first_ap_row, second_ap_row, other_piid_ap_row])
assert errors == 2
| 65.533333 | 164 | 0.739827 | from random import choice
from string import ascii_uppercase, ascii_lowercase, digits
from tests.unit.dataactcore.factories.staging import AwardFinancialFactory, AwardProcurementFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'c23_award_financial_1'
def test_column_headers(database):
expected_subset = {"row_number", "transaction_obligated_amou_sum", "federal_action_obligation_sum"}
actual = set(query_columns(_FILE, database))
assert expected_subset <= actual
def test_success(database):
piid = ''.join(choice(ascii_uppercase + ascii_lowercase + digits) for i in range(12))
piid_two = ''.join(choice(ascii_uppercase + ascii_lowercase + digits) for i in range(12))
piid_three = ''.join(choice(ascii_uppercase + ascii_lowercase + digits) for i in range(12))
first_piid_row_one = AwardFinancialFactory(transaction_obligated_amou = 1100, piid = piid,
allocation_transfer_agency = None)
first_piid_row_two = AwardFinancialFactory(transaction_obligated_amou = 11, piid = piid,
allocation_transfer_agency = None)
second_piid_row_one = AwardFinancialFactory(transaction_obligated_amou = 9999, piid = piid_two,
allocation_transfer_agency = None)
third_piid_row_one = AwardFinancialFactory(transaction_obligated_amou = 8888, piid = piid_three,
allocation_transfer_agency = 123)
third_piid_row_two = AwardFinancialFactory(transaction_obligated_amou = 8888, piid = piid_three,
allocation_transfer_agency = None)
first_ap_row = AwardProcurementFactory(piid = piid, federal_action_obligation = -1100)
second_ap_row = AwardProcurementFactory(piid = piid, federal_action_obligation = -10)
third_ap_row = AwardProcurementFactory(piid = piid, federal_action_obligation = -1)
second_piid_ap_row = AwardProcurementFactory(piid = piid_two, federal_action_obligation = -9999)
third_piid_ap_row = AwardProcurementFactory(piid = piid_three, federal_action_obligation = -9999)
errors = number_of_errors(_FILE, database, models=[first_piid_row_one, first_piid_row_two, second_piid_row_one,
third_piid_row_one, first_ap_row, second_ap_row, third_ap_row, second_piid_ap_row, third_piid_ap_row,
third_piid_row_two])
assert errors == 0
def test_failure(database):
piid = ''.join(choice(ascii_uppercase + ascii_lowercase + digits) for i in range(12))
piid_two = ''.join(choice(ascii_uppercase + ascii_lowercase + digits) for i in range(12))
first_piid_row_one = AwardFinancialFactory(transaction_obligated_amou = 1100, piid = piid, allocation_transfer_agency = None)
first_piid_row_two = AwardFinancialFactory(transaction_obligated_amou = 11, piid = piid, allocation_transfer_agency = None)
second_piid_row_one = AwardFinancialFactory(transaction_obligated_amou = 9999, piid = piid_two, allocation_transfer_agency = None)
first_ap_row = AwardProcurementFactory(piid = piid, federal_action_obligation = -1100)
second_ap_row = AwardProcurementFactory(piid = piid, federal_action_obligation = -10)
other_piid_ap_row = AwardProcurementFactory(piid = piid_two, federal_action_obligation = -1111)
errors = number_of_errors(_FILE, database, models=[first_piid_row_one, first_piid_row_two, second_piid_row_one, first_ap_row, second_ap_row, other_piid_ap_row])
assert errors == 2
| true | true |
f7214fd6196b206a0fc7264a73b8e0fd22653169 | 1,766 | py | Python | src/main-ja.py | junjihashimoto/wav2vec-2-nix | f104280586cf78d0fc5f280ea013f6bc676cd05e | [
"BSD-3-Clause"
] | null | null | null | src/main-ja.py | junjihashimoto/wav2vec-2-nix | f104280586cf78d0fc5f280ea013f6bc676cd05e | [
"BSD-3-Clause"
] | null | null | null | src/main-ja.py | junjihashimoto/wav2vec-2-nix | f104280586cf78d0fc5f280ea013f6bc676cd05e | [
"BSD-3-Clause"
] | null | null | null |
# https://huggingface.co/vumichien/wav2vec2-large-xlsr-japanese
import torch
import torchaudio
import librosa
from datasets import load_dataset
import MeCab
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
import re
# config
wakati = MeCab.Tagger("-Owakati")
chars_to_ignore_regex = '[\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\,\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\、\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\。\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\.\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\「\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\」\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\…\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\?\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\・]'
# load data, processor and model
test_dataset = load_dataset("common_voice", "ja", split="test[:2%]")
processor = Wav2Vec2Processor.from_pretrained("vumichien/wav2vec2-large-xlsr-japanese")
model = Wav2Vec2ForCTC.from_pretrained("vumichien/wav2vec2-large-xlsr-japanese")
resampler = lambda sr, y: librosa.resample(y.numpy().squeeze(), sr, 16_000)
# Preprocessing the datasets.
def speech_file_to_array_fn(batch):
batch["sentence"] = wakati.parse(batch["sentence"]).strip()
batch["sentence"] = re.sub(chars_to_ignore_regex,'', batch["sentence"]).strip()
speech_array, sampling_rate = torchaudio.load(batch["path"])
batch["speech"] = resampler(sampling_rate, speech_array).squeeze()
print(batch["sentence"])
return batch
test_dataset = test_dataset.map(speech_file_to_array_fn)
inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True)
with torch.no_grad():
logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits
predicted_ids = torch.argmax(logits, dim=-1)
print("Prediction:", processor.batch_decode(predicted_ids))
print("Reference:", test_dataset["sentence"][:2])
| 47.72973 | 325 | 0.64043 |
import torch
import torchaudio
import librosa
from datasets import load_dataset
import MeCab
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
import re
wakati = MeCab.Tagger("-Owakati")
chars_to_ignore_regex = '[\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\,\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\、\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\。\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\.\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\「\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\」\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\…\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\?\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\・]'
test_dataset = load_dataset("common_voice", "ja", split="test[:2%]")
processor = Wav2Vec2Processor.from_pretrained("vumichien/wav2vec2-large-xlsr-japanese")
model = Wav2Vec2ForCTC.from_pretrained("vumichien/wav2vec2-large-xlsr-japanese")
resampler = lambda sr, y: librosa.resample(y.numpy().squeeze(), sr, 16_000)
def speech_file_to_array_fn(batch):
batch["sentence"] = wakati.parse(batch["sentence"]).strip()
batch["sentence"] = re.sub(chars_to_ignore_regex,'', batch["sentence"]).strip()
speech_array, sampling_rate = torchaudio.load(batch["path"])
batch["speech"] = resampler(sampling_rate, speech_array).squeeze()
print(batch["sentence"])
return batch
test_dataset = test_dataset.map(speech_file_to_array_fn)
inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True)
with torch.no_grad():
logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits
predicted_ids = torch.argmax(logits, dim=-1)
print("Prediction:", processor.batch_decode(predicted_ids))
print("Reference:", test_dataset["sentence"][:2])
| true | true |
f72150878c28e84523ea2167e57b4bc5ae34cb23 | 1,024 | py | Python | educative/TreesBFS/zigzagTraversal.py | j-dags/Algos | 8201171c983bf8464b1d25526a16493966eb426f | [
"MIT"
] | null | null | null | educative/TreesBFS/zigzagTraversal.py | j-dags/Algos | 8201171c983bf8464b1d25526a16493966eb426f | [
"MIT"
] | null | null | null | educative/TreesBFS/zigzagTraversal.py | j-dags/Algos | 8201171c983bf8464b1d25526a16493966eb426f | [
"MIT"
] | null | null | null | def zigzagTraversal(root):
queue = [root] # initialize queue to root node
result = []
while queue: # iterate through loop while queue is not empty
arr = []
# levelSize prevents us from looping pasts current level in queue
levelSize = len(queue)
for _ in range(levelSize):
# these two lines act as .shift() method
curr = queue[0]
queue = queue[1:]
# .unshift() curr.val on odd levels
if len(result) % 2 == 0: arr.append(curr.val)
else: arr = [curr.val] + arr
# add child nodes to queue
if curr.left: queue.append(curr.left)
if curr.right: queue.append(curr.right)
result.append(arr)
return result
class Node:
def __init__(self, val):
self.val = val
self.right = None
self.left = None
one = Node(1)
two = Node(2)
three = Node(3)
four = Node(4)
five = Node(5)
six = Node(6)
seven = Node(7)
one.left = two
one.right = three
two.left = four
two.right = five
three.left = six
three.right = seven
print(zigzagTraversal(one))
| 20.897959 | 69 | 0.636719 | def zigzagTraversal(root):
queue = [root]
result = []
while queue:
arr = []
levelSize = len(queue)
for _ in range(levelSize):
curr = queue[0]
queue = queue[1:]
if len(result) % 2 == 0: arr.append(curr.val)
else: arr = [curr.val] + arr
if curr.left: queue.append(curr.left)
if curr.right: queue.append(curr.right)
result.append(arr)
return result
class Node:
def __init__(self, val):
self.val = val
self.right = None
self.left = None
one = Node(1)
two = Node(2)
three = Node(3)
four = Node(4)
five = Node(5)
six = Node(6)
seven = Node(7)
one.left = two
one.right = three
two.left = four
two.right = five
three.left = six
three.right = seven
print(zigzagTraversal(one))
| true | true |
f7215088d8da5bd6c6c28282619051368571f5b9 | 1,260 | py | Python | fix_size.py | Dai-z/label-converters | 0da9bfa620c6ab222ed97b82c256401fcb5804cf | [
"MIT"
] | 1 | 2020-11-16T17:11:43.000Z | 2020-11-16T17:11:43.000Z | fix_size.py | Dai-z/label-converters | 0da9bfa620c6ab222ed97b82c256401fcb5804cf | [
"MIT"
] | null | null | null | fix_size.py | Dai-z/label-converters | 0da9bfa620c6ab222ed97b82c256401fcb5804cf | [
"MIT"
] | null | null | null | import xml.etree.ElementTree as ET
import os
from os import listdir, getcwd
from os.path import join
import argparse
import cv2
classes = []
def convert_annotation(image, args):
if args.anno_dir:
anno_file = join(args.anno_dir, image.split('.')[0]) + '.xml'
if not os.path.isfile(anno_file):
return False
in_file = open(anno_file)
tree = ET.parse(in_file)
root = tree.getroot()
size = root.find('size')
w = int(size.find('width').text)
h = int(size.find('height').text)
if (w <= 0 or h <= 0):
print('Fixing: '+anno_file)
img_file = anno_file.replace('Annotations', 'JPEGImages').replace('annotations', 'JPEGImages').replace('.xml','.jpg')
img = cv2.imread(img_file)
h, w, _ = img.shape
size.find('width').text=str(w)
size.find('height').text=str(h)
tree.write(anno_file)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--anno_dir",
required=True,
help="Directory for VOC annotation xml files")
args = parser.parse_args()
anno_files = listdir(args.anno_dir)
anno_files.sort()
for anno in anno_files:
convert_annotation(anno, args)
| 28.636364 | 125 | 0.618254 | import xml.etree.ElementTree as ET
import os
from os import listdir, getcwd
from os.path import join
import argparse
import cv2
classes = []
def convert_annotation(image, args):
if args.anno_dir:
anno_file = join(args.anno_dir, image.split('.')[0]) + '.xml'
if not os.path.isfile(anno_file):
return False
in_file = open(anno_file)
tree = ET.parse(in_file)
root = tree.getroot()
size = root.find('size')
w = int(size.find('width').text)
h = int(size.find('height').text)
if (w <= 0 or h <= 0):
print('Fixing: '+anno_file)
img_file = anno_file.replace('Annotations', 'JPEGImages').replace('annotations', 'JPEGImages').replace('.xml','.jpg')
img = cv2.imread(img_file)
h, w, _ = img.shape
size.find('width').text=str(w)
size.find('height').text=str(h)
tree.write(anno_file)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--anno_dir",
required=True,
help="Directory for VOC annotation xml files")
args = parser.parse_args()
anno_files = listdir(args.anno_dir)
anno_files.sort()
for anno in anno_files:
convert_annotation(anno, args)
| true | true |
f721509f3f35df7cb0d1888befa5bc53be9b4653 | 422 | py | Python | tmm/apps/translation_management_tool/migrations/0006_auto_20211105_1444.py | 2567910/tmm | c36bbb508ed8ea4fa8e814af817c5d4f4ae69d4c | [
"MIT"
] | 3 | 2022-03-02T19:30:26.000Z | 2022-03-04T10:55:10.000Z | tmm/apps/translation_management_tool/migrations/0006_auto_20211105_1444.py | 2567910/tmm | c36bbb508ed8ea4fa8e814af817c5d4f4ae69d4c | [
"MIT"
] | 3 | 2022-03-08T12:25:16.000Z | 2022-03-16T22:30:55.000Z | tmm/apps/translation_management_tool/migrations/0006_auto_20211105_1444.py | 2567910/tmm | c36bbb508ed8ea4fa8e814af817c5d4f4ae69d4c | [
"MIT"
] | null | null | null | # Generated by Django 3.1.7 on 2021-11-05 14:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('translation_management_tool', '0005_auto_20211105_1418'),
]
operations = [
migrations.AlterField(
model_name='language',
name='languages',
field=models.CharField(blank=True, max_length=7),
),
]
| 22.210526 | 67 | 0.625592 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('translation_management_tool', '0005_auto_20211105_1418'),
]
operations = [
migrations.AlterField(
model_name='language',
name='languages',
field=models.CharField(blank=True, max_length=7),
),
]
| true | true |
f7215171b2591e8446240124dc5466d1022604c1 | 631 | py | Python | locations/items.py | wessport/allthecolleges | 7741ed0bef7359d6d871963c527a3d9e31303c7c | [
"MIT"
] | null | null | null | locations/items.py | wessport/allthecolleges | 7741ed0bef7359d6d871963c527a3d9e31303c7c | [
"MIT"
] | null | null | null | locations/items.py | wessport/allthecolleges | 7741ed0bef7359d6d871963c527a3d9e31303c7c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
# class AddressItem(scrapy.Item):
# # define the fields for your item here like:
# # name = scrapy.Field()
# pass
class AddressItem(scrapy.Item):
# define the fields for your item here:
school_id = scrapy.Field()
name = scrapy.Field()
street_address = scrapy.Field()
city = scrapy.Field()
state = scrapy.Field()
postcode = scrapy.Field()
ref = scrapy.Field()
website = scrapy.Field()
extras = scrapy.Field()
| 22.535714 | 52 | 0.652932 |
import scrapy
eld()
name = scrapy.Field()
street_address = scrapy.Field()
city = scrapy.Field()
state = scrapy.Field()
postcode = scrapy.Field()
ref = scrapy.Field()
website = scrapy.Field()
extras = scrapy.Field()
| true | true |
f72152194d185954a947a244a1109ed7161112ed | 26,894 | py | Python | tensorflow_estimator/contrib/estimator/python/estimator/rnn.py | CheukNgai/estimator | 673a50bd5ffa70d0672ce47e40f5075f1cbe0a62 | [
"Apache-2.0"
] | null | null | null | tensorflow_estimator/contrib/estimator/python/estimator/rnn.py | CheukNgai/estimator | 673a50bd5ffa70d0672ce47e40f5075f1cbe0a62 | [
"Apache-2.0"
] | null | null | null | tensorflow_estimator/contrib/estimator/python/estimator/rnn.py | CheukNgai/estimator | 673a50bd5ffa70d0672ce47e40f5075f1cbe0a62 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Recurrent Neural Network estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow_estimator.contrib.estimator.python.estimator import extenders
from tensorflow.contrib.feature_column.python.feature_column import sequence_feature_column as seq_fc
from tensorflow_estimator.python.estimator import estimator
from tensorflow_estimator.python.estimator.canned import head as head_lib
from tensorflow_estimator.python.estimator.canned import optimizers
from tensorflow.python.feature_column import feature_column as feature_column_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.layers import core as core_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.losses import losses
from tensorflow.python.summary import summary
from tensorflow.python.training import optimizer as optimizer_lib
from tensorflow.python.training import training_util
# The defaults are historical artifacts of the initial implementation, but seem
# reasonable choices.
_DEFAULT_LEARNING_RATE = 0.05
_DEFAULT_CLIP_NORM = 5.0
_CELL_TYPES = {'basic_rnn': rnn_cell.BasicRNNCell,
'lstm': rnn_cell.BasicLSTMCell,
'gru': rnn_cell.GRUCell}
# Indicates no value was provided by the user to a kwarg.
USE_DEFAULT = object()
def _single_rnn_cell(num_units, cell_type):
cell_type = _CELL_TYPES.get(cell_type, cell_type)
if not cell_type or not issubclass(cell_type, rnn_cell.RNNCell):
raise ValueError('Supported cell types are {}; got {}'.format(
list(_CELL_TYPES.keys()), cell_type))
return cell_type(num_units=num_units)
def _make_rnn_cell_fn(num_units, cell_type='basic_rnn'):
"""Convenience function to create `rnn_cell_fn` for canned RNN Estimators.
Args:
num_units: Iterable of integer number of hidden units per RNN layer.
cell_type: A subclass of `tf.nn.rnn_cell.RNNCell` or a string specifying
the cell type. Supported strings are: `'basic_rnn'`, `'lstm'`, and
`'gru'`.
Returns:
A function that takes a single argument, an instance of
`tf.estimator.ModeKeys`, and returns an instance derived from
`tf.nn.rnn_cell.RNNCell`.
Raises:
ValueError: If cell_type is not supported.
"""
def rnn_cell_fn(mode):
# Unused. Part of the rnn_cell_fn interface since user specified functions
# may need different behavior across modes (e.g. dropout).
del mode
cells = [_single_rnn_cell(n, cell_type) for n in num_units]
if len(cells) == 1:
return cells[0]
return rnn_cell.MultiRNNCell(cells)
return rnn_cell_fn
def _concatenate_context_input(sequence_input, context_input):
"""Replicates `context_input` across all timesteps of `sequence_input`.
Expands dimension 1 of `context_input` then tiles it `sequence_length` times.
This value is appended to `sequence_input` on dimension 2 and the result is
returned.
Args:
sequence_input: A `Tensor` of dtype `float32` and shape `[batch_size,
padded_length, d0]`.
context_input: A `Tensor` of dtype `float32` and shape `[batch_size, d1]`.
Returns:
A `Tensor` of dtype `float32` and shape `[batch_size, padded_length,
d0 + d1]`.
Raises:
ValueError: If `sequence_input` does not have rank 3 or `context_input` does
not have rank 2.
"""
seq_rank_check = check_ops.assert_rank(
sequence_input,
3,
message='sequence_input must have rank 3',
data=[array_ops.shape(sequence_input)])
seq_type_check = check_ops.assert_type(
sequence_input,
dtypes.float32,
message='sequence_input must have dtype float32; got {}.'.format(
sequence_input.dtype))
ctx_rank_check = check_ops.assert_rank(
context_input,
2,
message='context_input must have rank 2',
data=[array_ops.shape(context_input)])
ctx_type_check = check_ops.assert_type(
context_input,
dtypes.float32,
message='context_input must have dtype float32; got {}.'.format(
context_input.dtype))
with ops.control_dependencies(
[seq_rank_check, seq_type_check, ctx_rank_check, ctx_type_check]):
padded_length = array_ops.shape(sequence_input)[1]
tiled_context_input = array_ops.tile(
array_ops.expand_dims(context_input, 1),
array_ops.concat([[1], [padded_length], [1]], 0))
return array_ops.concat([sequence_input, tiled_context_input], 2)
def _select_last_activations(activations, sequence_lengths):
"""Selects the nth set of activations for each n in `sequence_length`.
Returns a `Tensor` of shape `[batch_size, k]`. If `sequence_length` is not
`None`, then `output[i, :] = activations[i, sequence_length[i] - 1, :]`. If
`sequence_length` is `None`, then `output[i, :] = activations[i, -1, :]`.
Args:
activations: A `Tensor` with shape `[batch_size, padded_length, k]`.
sequence_lengths: A `Tensor` with shape `[batch_size]` or `None`.
Returns:
A `Tensor` of shape `[batch_size, k]`.
"""
with ops.name_scope(
'select_last_activations', values=[activations, sequence_lengths]):
activations_shape = array_ops.shape(activations)
batch_size = activations_shape[0]
padded_length = activations_shape[1]
output_units = activations_shape[2]
if sequence_lengths is None:
sequence_lengths = padded_length
start_indices = math_ops.to_int64(
math_ops.range(batch_size) * padded_length)
last_indices = start_indices + sequence_lengths - 1
reshaped_activations = array_ops.reshape(
activations, [batch_size * padded_length, output_units])
last_activations = array_ops.gather(reshaped_activations, last_indices)
last_activations.set_shape([activations.shape[0], activations.shape[2]])
return last_activations
def _rnn_logit_fn_builder(output_units, rnn_cell_fn, sequence_feature_columns,
context_feature_columns, input_layer_partitioner,
return_sequences=False):
"""Function builder for a rnn logit_fn.
Args:
output_units: An int indicating the dimension of the logit layer.
rnn_cell_fn: A function with one argument, a `tf.estimator.ModeKeys`, and
returns an object of type `tf.nn.rnn_cell.RNNCell`.
sequence_feature_columns: An iterable containing the `FeatureColumn`s
that represent sequential input.
context_feature_columns: An iterable containing the `FeatureColumn`s
that represent contextual input.
input_layer_partitioner: Partitioner for input layer.
return_sequences: A boolean indicating whether to return the last output
in the output sequence, or the full sequence.
Returns:
A logit_fn (see below).
Raises:
ValueError: If output_units is not an int.
"""
if not isinstance(output_units, int):
raise ValueError('output_units must be an int. Given type: {}'.format(
type(output_units)))
def rnn_logit_fn(features, mode):
"""Recurrent Neural Network logit_fn.
Args:
features: This is the first item returned from the `input_fn`
passed to `train`, `evaluate`, and `predict`. This should be a
single `Tensor` or `dict` of same.
mode: Optional. Specifies if this training, evaluation or prediction. See
`ModeKeys`.
Returns:
A `Tensor` representing the logits.
"""
with variable_scope.variable_scope(
'sequence_input_layer',
values=tuple(six.itervalues(features)),
partitioner=input_layer_partitioner):
sequence_input, sequence_length = seq_fc.sequence_input_layer(
features=features, feature_columns=sequence_feature_columns)
summary.histogram('sequence_length', sequence_length)
if context_feature_columns:
context_input = feature_column_lib.input_layer(
features=features,
feature_columns=context_feature_columns)
sequence_input = _concatenate_context_input(sequence_input,
context_input)
cell = rnn_cell_fn(mode)
# Ignore output state.
rnn_outputs, _ = rnn.dynamic_rnn(
cell=cell,
inputs=sequence_input,
sequence_length=sequence_length,
dtype=dtypes.float32,
time_major=False)
if not return_sequences:
rnn_outputs = _select_last_activations(rnn_outputs, sequence_length)
with variable_scope.variable_scope('logits', values=(rnn_outputs,)):
logits = core_layers.dense(
rnn_outputs,
units=output_units,
activation=None,
kernel_initializer=init_ops.glorot_uniform_initializer())
return logits
return rnn_logit_fn
def _rnn_model_fn(features,
labels,
mode,
head,
rnn_cell_fn,
sequence_feature_columns,
context_feature_columns,
return_sequences=False,
optimizer='Adagrad',
input_layer_partitioner=None,
config=None):
"""Recurrent Neural Net model_fn.
Args:
features: dict of `Tensor` and `SparseTensor` objects returned from
`input_fn`.
labels: `Tensor` of shape [batch_size, 1] or [batch_size] with labels.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
head: A `head_lib._Head` instance.
rnn_cell_fn: A function with one argument, a `tf.estimator.ModeKeys`, and
returns an object of type `tf.nn.rnn_cell.RNNCell`.
sequence_feature_columns: Iterable containing `FeatureColumn`s that
represent sequential model inputs.
context_feature_columns: Iterable containing `FeatureColumn`s that
represent model inputs not associated with a specific timestep.
return_sequences: A boolean indicating whether to return the last output
in the output sequence, or the full sequence.
optimizer: String, `tf.Optimizer` object, or callable that creates the
optimizer to use for training. If not specified, will use the Adagrad
optimizer with a default learning rate of 0.05 and gradient clip norm of
5.0.
input_layer_partitioner: Partitioner for input layer. Defaults
to `min_max_variable_partitioner` with `min_slice_size` 64 << 20.
config: `RunConfig` object to configure the runtime settings.
Returns:
An `EstimatorSpec` instance.
Raises:
ValueError: If mode or optimizer is invalid, or features has the wrong type.
"""
if not isinstance(features, dict):
raise ValueError('features should be a dictionary of `Tensor`s. '
'Given type: {}'.format(type(features)))
# If user does not provide an optimizer instance, use the optimizer specified
# by the string with default learning rate and gradient clipping.
if not isinstance(optimizer, optimizer_lib.Optimizer):
optimizer = optimizers.get_optimizer_instance(
optimizer, learning_rate=_DEFAULT_LEARNING_RATE)
optimizer = extenders.clip_gradients_by_norm(optimizer, _DEFAULT_CLIP_NORM)
num_ps_replicas = config.num_ps_replicas if config else 0
partitioner = partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas)
with variable_scope.variable_scope(
'rnn',
values=tuple(six.itervalues(features)),
partitioner=partitioner):
input_layer_partitioner = input_layer_partitioner or (
partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=64 << 20))
logit_fn = _rnn_logit_fn_builder(
output_units=head.logits_dimension,
rnn_cell_fn=rnn_cell_fn,
sequence_feature_columns=sequence_feature_columns,
context_feature_columns=context_feature_columns,
input_layer_partitioner=input_layer_partitioner,
return_sequences=return_sequences)
logits = logit_fn(features=features, mode=mode)
def _train_op_fn(loss):
"""Returns the op to optimize the loss."""
return optimizer.minimize(
loss,
global_step=training_util.get_global_step())
return head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
def _assert_rnn_cell_fn(rnn_cell_fn, num_units, cell_type):
"""Assert arguments are valid and return rnn_cell_fn."""
if rnn_cell_fn and (num_units or cell_type != USE_DEFAULT):
raise ValueError(
'num_units and cell_type must not be specified when using rnn_cell_fn'
)
if not rnn_cell_fn:
if cell_type == USE_DEFAULT:
cell_type = 'basic_rnn'
rnn_cell_fn = _make_rnn_cell_fn(num_units, cell_type)
return rnn_cell_fn
class RNNClassifier(estimator.Estimator):
"""A classifier for TensorFlow RNN models.
Trains a recurrent neural network model to classify instances into one of
multiple classes.
Example:
```python
token_sequence = sequence_categorical_column_with_hash_bucket(...)
token_emb = embedding_column(categorical_column=token_sequence, ...)
estimator = RNNClassifier(
sequence_feature_columns=[token_emb],
num_units=[32, 16], cell_type='lstm')
# Input builders
def input_fn_train: # returns x, y
pass
estimator.train(input_fn=input_fn_train, steps=100)
def input_fn_eval: # returns x, y
pass
metrics = estimator.evaluate(input_fn=input_fn_eval, steps=10)
def input_fn_predict: # returns x, None
pass
predictions = estimator.predict(input_fn=input_fn_predict)
```
Input of `train` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* if `weight_column` is not `None`, a feature with
`key=weight_column` whose value is a `Tensor`.
* for each `column` in `sequence_feature_columns`:
- a feature with `key=column.name` whose `value` is a `SparseTensor`.
* for each `column` in `context_feature_columns`:
- if `column` is a `_CategoricalColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `_WeightedCategoricalColumn`, two features: the first
with `key` the id column name, the second with `key` the weight column
name. Both features' `value` must be a `SparseTensor`.
- if `column` is a `_DenseColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
Loss is calculated by using softmax cross entropy.
@compatibility(eager)
Estimators are not compatible with eager execution.
@end_compatibility
"""
def __init__(self,
sequence_feature_columns,
context_feature_columns=None,
num_units=None,
cell_type=USE_DEFAULT,
rnn_cell_fn=None,
model_dir=None,
n_classes=2,
weight_column=None,
label_vocabulary=None,
optimizer='Adagrad',
loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE,
input_layer_partitioner=None,
config=None):
"""Initializes a `RNNClassifier` instance.
Args:
sequence_feature_columns: An iterable containing the `FeatureColumn`s
that represent sequential input. All items in the set should either be
sequence columns (e.g. `sequence_numeric_column`) or constructed from
one (e.g. `embedding_column` with `sequence_categorical_column_*` as
input).
context_feature_columns: An iterable containing the `FeatureColumn`s
for contextual input. The data represented by these columns will be
replicated and given to the RNN at each timestep. These columns must be
instances of classes derived from `_DenseColumn` such as
`numeric_column`, not the sequential variants.
num_units: Iterable of integer number of hidden units per RNN layer. If
set, `cell_type` must also be specified and `rnn_cell_fn` must be
`None`.
cell_type: A subclass of `tf.nn.rnn_cell.RNNCell` or a string specifying
the cell type. Supported strings are: `'basic_rnn'`, `'lstm'`, and
`'gru'`. If set, `num_units` must also be specified and `rnn_cell_fn`
must be `None`.
rnn_cell_fn: A function with one argument, a `tf.estimator.ModeKeys`, and
returns an object of type `tf.nn.rnn_cell.RNNCell` that will be used to
construct the RNN. If set, `num_units` and `cell_type` cannot be set.
This is for advanced users who need additional customization beyond
`num_units` and `cell_type`. Note that `tf.nn.rnn_cell.MultiRNNCell` is
needed for stacked RNNs.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
n_classes: Number of label classes. Defaults to 2, namely binary
classification. Must be > 1.
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example. If it is a string, it is
used as a key to fetch weight tensor from the `features`. If it is a
`_NumericColumn`, raw tensor is fetched by key `weight_column.key`,
then weight_column.normalizer_fn is applied on it to get weight tensor.
label_vocabulary: A list of strings represents possible label values. If
given, labels must be string type and have any value in
`label_vocabulary`. If it is not given, that means labels are
already encoded as integer or float within [0, 1] for `n_classes=2` and
encoded as integer values in {0, 1,..., n_classes-1} for `n_classes`>2 .
Also there will be errors if vocabulary is not provided and labels are
string.
optimizer: An instance of `tf.Optimizer` or string specifying optimizer
type. Defaults to Adagrad optimizer.
loss_reduction: One of `tf.losses.Reduction` except `NONE`. Describes how
to reduce training loss over batch. Defaults to `SUM_OVER_BATCH_SIZE`.
input_layer_partitioner: Optional. Partitioner for input layer. Defaults
to `min_max_variable_partitioner` with `min_slice_size` 64 << 20.
config: `RunConfig` object to configure the runtime settings.
Raises:
ValueError: If `num_units`, `cell_type`, and `rnn_cell_fn` are not
compatible.
"""
rnn_cell_fn = _assert_rnn_cell_fn(rnn_cell_fn, num_units, cell_type)
if n_classes == 2:
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss( # pylint: disable=protected-access
weight_column=weight_column,
label_vocabulary=label_vocabulary,
loss_reduction=loss_reduction)
else:
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss( # pylint: disable=protected-access
n_classes,
weight_column=weight_column,
label_vocabulary=label_vocabulary,
loss_reduction=loss_reduction)
def _model_fn(features, labels, mode, config):
return _rnn_model_fn(
features=features,
labels=labels,
mode=mode,
head=head,
rnn_cell_fn=rnn_cell_fn,
sequence_feature_columns=tuple(sequence_feature_columns or []),
context_feature_columns=tuple(context_feature_columns or []),
return_sequences=False,
optimizer=optimizer,
input_layer_partitioner=input_layer_partitioner,
config=config)
super(RNNClassifier, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config)
class RNNEstimator(estimator.Estimator):
"""An Estimator for TensorFlow RNN models with user-specified head.
Example:
```python
token_sequence = sequence_categorical_column_with_hash_bucket(...)
token_emb = embedding_column(categorical_column=token_sequence, ...)
estimator = RNNEstimator(
head=tf.contrib.estimator.regression_head(),
sequence_feature_columns=[token_emb],
num_units=[32, 16], cell_type='lstm')
# Or with custom RNN cell:
def rnn_cell_fn(mode):
cells = [ tf.contrib.rnn.LSTMCell(size) for size in [32, 16] ]
if mode == tf.estimator.ModeKeys.TRAIN:
cells = [ tf.contrib.rnn.DropoutWrapper(cell, input_keep_prob=0.5)
for cell in cells ]
return tf.contrib.rnn.MultiRNNCell(cells)
estimator = RNNEstimator(
head=tf.contrib.estimator.regression_head(),
sequence_feature_columns=[token_emb],
rnn_cell_fn=rnn_cell_fn)
# Input builders
def input_fn_train: # returns x, y
pass
estimator.train(input_fn=input_fn_train, steps=100)
def input_fn_eval: # returns x, y
pass
metrics = estimator.evaluate(input_fn=input_fn_eval, steps=10)
def input_fn_predict: # returns x, None
pass
predictions = estimator.predict(input_fn=input_fn_predict)
```
Input of `train` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* if the head's `weight_column` is not `None`, a feature with
`key=weight_column` whose value is a `Tensor`.
* for each `column` in `sequence_feature_columns`:
- a feature with `key=column.name` whose `value` is a `SparseTensor`.
* for each `column` in `context_feature_columns`:
- if `column` is a `_CategoricalColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `_WeightedCategoricalColumn`, two features: the first
with `key` the id column name, the second with `key` the weight column
name. Both features' `value` must be a `SparseTensor`.
- if `column` is a `_DenseColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
Loss and predicted output are determined by the specified head.
@compatibility(eager)
Estimators are not compatible with eager execution.
@end_compatibility
"""
def __init__(self,
head,
sequence_feature_columns,
context_feature_columns=None,
num_units=None,
cell_type=USE_DEFAULT,
rnn_cell_fn=None,
return_sequences=False,
model_dir=None,
optimizer='Adagrad',
input_layer_partitioner=None,
config=None):
"""Initializes a `RNNEstimator` instance.
Args:
head: A `_Head` instance constructed with a method such as
`tf.contrib.estimator.multi_label_head`. This specifies the model's
output and loss function to be optimized.
sequence_feature_columns: An iterable containing the `FeatureColumn`s
that represent sequential input. All items in the set should either be
sequence columns (e.g. `sequence_numeric_column`) or constructed from
one (e.g. `embedding_column` with `sequence_categorical_column_*` as
input).
context_feature_columns: An iterable containing the `FeatureColumn`s
for contextual input. The data represented by these columns will be
replicated and given to the RNN at each timestep. These columns must be
instances of classes derived from `_DenseColumn` such as
`numeric_column`, not the sequential variants.
num_units: Iterable of integer number of hidden units per RNN layer. If
set, `cell_type` must also be specified and `rnn_cell_fn` must be
`None`.
cell_type: A subclass of `tf.nn.rnn_cell.RNNCell` or a string specifying
the cell type. Supported strings are: `'basic_rnn'`, `'lstm'`, and
`'gru'`. If set, `num_units` must also be specified and `rnn_cell_fn`
must be `None`.
rnn_cell_fn: A function with one argument, a `tf.estimator.ModeKeys`, and
returns an object of type `tf.nn.rnn_cell.RNNCell` that will be used to
construct the RNN. If set, `num_units` and `cell_type` cannot be set.
This is for advanced users who need additional customization beyond
`num_units` and `cell_type`. Note that `tf.nn.rnn_cell.MultiRNNCell` is
needed for stacked RNNs.
return_sequences: A boolean indicating whether to return the last output
in the output sequence, or the full sequence.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
optimizer: An instance of `tf.Optimizer` or string specifying optimizer
type. Defaults to Adagrad optimizer.
input_layer_partitioner: Optional. Partitioner for input layer. Defaults
to `min_max_variable_partitioner` with `min_slice_size` 64 << 20.
config: `RunConfig` object to configure the runtime settings.
Raises:
ValueError: If `num_units`, `cell_type`, and `rnn_cell_fn` are not
compatible.
"""
rnn_cell_fn = _assert_rnn_cell_fn(rnn_cell_fn, num_units, cell_type)
def _model_fn(features, labels, mode, config):
return _rnn_model_fn(
features=features,
labels=labels,
mode=mode,
head=head,
rnn_cell_fn=rnn_cell_fn,
sequence_feature_columns=tuple(sequence_feature_columns or []),
context_feature_columns=tuple(context_feature_columns or []),
return_sequences=return_sequences,
optimizer=optimizer,
input_layer_partitioner=input_layer_partitioner,
config=config)
super(RNNEstimator, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config)
| 41.696124 | 112 | 0.704618 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow_estimator.contrib.estimator.python.estimator import extenders
from tensorflow.contrib.feature_column.python.feature_column import sequence_feature_column as seq_fc
from tensorflow_estimator.python.estimator import estimator
from tensorflow_estimator.python.estimator.canned import head as head_lib
from tensorflow_estimator.python.estimator.canned import optimizers
from tensorflow.python.feature_column import feature_column as feature_column_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.layers import core as core_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.losses import losses
from tensorflow.python.summary import summary
from tensorflow.python.training import optimizer as optimizer_lib
from tensorflow.python.training import training_util
_DEFAULT_LEARNING_RATE = 0.05
_DEFAULT_CLIP_NORM = 5.0
_CELL_TYPES = {'basic_rnn': rnn_cell.BasicRNNCell,
'lstm': rnn_cell.BasicLSTMCell,
'gru': rnn_cell.GRUCell}
USE_DEFAULT = object()
def _single_rnn_cell(num_units, cell_type):
cell_type = _CELL_TYPES.get(cell_type, cell_type)
if not cell_type or not issubclass(cell_type, rnn_cell.RNNCell):
raise ValueError('Supported cell types are {}; got {}'.format(
list(_CELL_TYPES.keys()), cell_type))
return cell_type(num_units=num_units)
def _make_rnn_cell_fn(num_units, cell_type='basic_rnn'):
def rnn_cell_fn(mode):
del mode
cells = [_single_rnn_cell(n, cell_type) for n in num_units]
if len(cells) == 1:
return cells[0]
return rnn_cell.MultiRNNCell(cells)
return rnn_cell_fn
def _concatenate_context_input(sequence_input, context_input):
seq_rank_check = check_ops.assert_rank(
sequence_input,
3,
message='sequence_input must have rank 3',
data=[array_ops.shape(sequence_input)])
seq_type_check = check_ops.assert_type(
sequence_input,
dtypes.float32,
message='sequence_input must have dtype float32; got {}.'.format(
sequence_input.dtype))
ctx_rank_check = check_ops.assert_rank(
context_input,
2,
message='context_input must have rank 2',
data=[array_ops.shape(context_input)])
ctx_type_check = check_ops.assert_type(
context_input,
dtypes.float32,
message='context_input must have dtype float32; got {}.'.format(
context_input.dtype))
with ops.control_dependencies(
[seq_rank_check, seq_type_check, ctx_rank_check, ctx_type_check]):
padded_length = array_ops.shape(sequence_input)[1]
tiled_context_input = array_ops.tile(
array_ops.expand_dims(context_input, 1),
array_ops.concat([[1], [padded_length], [1]], 0))
return array_ops.concat([sequence_input, tiled_context_input], 2)
def _select_last_activations(activations, sequence_lengths):
with ops.name_scope(
'select_last_activations', values=[activations, sequence_lengths]):
activations_shape = array_ops.shape(activations)
batch_size = activations_shape[0]
padded_length = activations_shape[1]
output_units = activations_shape[2]
if sequence_lengths is None:
sequence_lengths = padded_length
start_indices = math_ops.to_int64(
math_ops.range(batch_size) * padded_length)
last_indices = start_indices + sequence_lengths - 1
reshaped_activations = array_ops.reshape(
activations, [batch_size * padded_length, output_units])
last_activations = array_ops.gather(reshaped_activations, last_indices)
last_activations.set_shape([activations.shape[0], activations.shape[2]])
return last_activations
def _rnn_logit_fn_builder(output_units, rnn_cell_fn, sequence_feature_columns,
context_feature_columns, input_layer_partitioner,
return_sequences=False):
if not isinstance(output_units, int):
raise ValueError('output_units must be an int. Given type: {}'.format(
type(output_units)))
def rnn_logit_fn(features, mode):
with variable_scope.variable_scope(
'sequence_input_layer',
values=tuple(six.itervalues(features)),
partitioner=input_layer_partitioner):
sequence_input, sequence_length = seq_fc.sequence_input_layer(
features=features, feature_columns=sequence_feature_columns)
summary.histogram('sequence_length', sequence_length)
if context_feature_columns:
context_input = feature_column_lib.input_layer(
features=features,
feature_columns=context_feature_columns)
sequence_input = _concatenate_context_input(sequence_input,
context_input)
cell = rnn_cell_fn(mode)
rnn_outputs, _ = rnn.dynamic_rnn(
cell=cell,
inputs=sequence_input,
sequence_length=sequence_length,
dtype=dtypes.float32,
time_major=False)
if not return_sequences:
rnn_outputs = _select_last_activations(rnn_outputs, sequence_length)
with variable_scope.variable_scope('logits', values=(rnn_outputs,)):
logits = core_layers.dense(
rnn_outputs,
units=output_units,
activation=None,
kernel_initializer=init_ops.glorot_uniform_initializer())
return logits
return rnn_logit_fn
def _rnn_model_fn(features,
labels,
mode,
head,
rnn_cell_fn,
sequence_feature_columns,
context_feature_columns,
return_sequences=False,
optimizer='Adagrad',
input_layer_partitioner=None,
config=None):
if not isinstance(features, dict):
raise ValueError('features should be a dictionary of `Tensor`s. '
'Given type: {}'.format(type(features)))
if not isinstance(optimizer, optimizer_lib.Optimizer):
optimizer = optimizers.get_optimizer_instance(
optimizer, learning_rate=_DEFAULT_LEARNING_RATE)
optimizer = extenders.clip_gradients_by_norm(optimizer, _DEFAULT_CLIP_NORM)
num_ps_replicas = config.num_ps_replicas if config else 0
partitioner = partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas)
with variable_scope.variable_scope(
'rnn',
values=tuple(six.itervalues(features)),
partitioner=partitioner):
input_layer_partitioner = input_layer_partitioner or (
partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=64 << 20))
logit_fn = _rnn_logit_fn_builder(
output_units=head.logits_dimension,
rnn_cell_fn=rnn_cell_fn,
sequence_feature_columns=sequence_feature_columns,
context_feature_columns=context_feature_columns,
input_layer_partitioner=input_layer_partitioner,
return_sequences=return_sequences)
logits = logit_fn(features=features, mode=mode)
def _train_op_fn(loss):
return optimizer.minimize(
loss,
global_step=training_util.get_global_step())
return head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
def _assert_rnn_cell_fn(rnn_cell_fn, num_units, cell_type):
if rnn_cell_fn and (num_units or cell_type != USE_DEFAULT):
raise ValueError(
'num_units and cell_type must not be specified when using rnn_cell_fn'
)
if not rnn_cell_fn:
if cell_type == USE_DEFAULT:
cell_type = 'basic_rnn'
rnn_cell_fn = _make_rnn_cell_fn(num_units, cell_type)
return rnn_cell_fn
class RNNClassifier(estimator.Estimator):
def __init__(self,
sequence_feature_columns,
context_feature_columns=None,
num_units=None,
cell_type=USE_DEFAULT,
rnn_cell_fn=None,
model_dir=None,
n_classes=2,
weight_column=None,
label_vocabulary=None,
optimizer='Adagrad',
loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE,
input_layer_partitioner=None,
config=None):
rnn_cell_fn = _assert_rnn_cell_fn(rnn_cell_fn, num_units, cell_type)
if n_classes == 2:
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
weight_column=weight_column,
label_vocabulary=label_vocabulary,
loss_reduction=loss_reduction)
else:
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss(
n_classes,
weight_column=weight_column,
label_vocabulary=label_vocabulary,
loss_reduction=loss_reduction)
def _model_fn(features, labels, mode, config):
return _rnn_model_fn(
features=features,
labels=labels,
mode=mode,
head=head,
rnn_cell_fn=rnn_cell_fn,
sequence_feature_columns=tuple(sequence_feature_columns or []),
context_feature_columns=tuple(context_feature_columns or []),
return_sequences=False,
optimizer=optimizer,
input_layer_partitioner=input_layer_partitioner,
config=config)
super(RNNClassifier, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config)
class RNNEstimator(estimator.Estimator):
def __init__(self,
head,
sequence_feature_columns,
context_feature_columns=None,
num_units=None,
cell_type=USE_DEFAULT,
rnn_cell_fn=None,
return_sequences=False,
model_dir=None,
optimizer='Adagrad',
input_layer_partitioner=None,
config=None):
rnn_cell_fn = _assert_rnn_cell_fn(rnn_cell_fn, num_units, cell_type)
def _model_fn(features, labels, mode, config):
return _rnn_model_fn(
features=features,
labels=labels,
mode=mode,
head=head,
rnn_cell_fn=rnn_cell_fn,
sequence_feature_columns=tuple(sequence_feature_columns or []),
context_feature_columns=tuple(context_feature_columns or []),
return_sequences=return_sequences,
optimizer=optimizer,
input_layer_partitioner=input_layer_partitioner,
config=config)
super(RNNEstimator, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config)
| true | true |
f72153ee7a89b3a20eafa2cc82f8c626ef1eda68 | 688 | py | Python | uniseg/graphemeclustertest.py | olivier-compilatio/uniseg-python | 8513a59e9c701a173c8001655e592c18f8840b16 | [
"MIT"
] | 2 | 2019-12-28T09:28:43.000Z | 2021-05-11T02:01:47.000Z | uniseg/graphemeclustertest.py | olivier-compilatio/uniseg-python | 8513a59e9c701a173c8001655e592c18f8840b16 | [
"MIT"
] | null | null | null | uniseg/graphemeclustertest.py | olivier-compilatio/uniseg-python | 8513a59e9c701a173c8001655e592c18f8840b16 | [
"MIT"
] | 2 | 2019-07-23T09:11:55.000Z | 2019-10-02T17:13:53.000Z | #!/usr/bin/env python
from __future__ import (absolute_import,
division,
print_function,
unicode_literals)
import doctest
import unittest
from . import graphemecluster
from .db import iter_grapheme_cluster_break_tests
from .test import implement_break_tests
@implement_break_tests(graphemecluster.grapheme_cluster_boundaries,
iter_grapheme_cluster_break_tests())
class GraphemeClusterTest(unittest.TestCase):
pass
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite(graphemecluster))
return tests
if __name__ == '__main__':
unittest.main()
| 22.933333 | 67 | 0.694767 |
from __future__ import (absolute_import,
division,
print_function,
unicode_literals)
import doctest
import unittest
from . import graphemecluster
from .db import iter_grapheme_cluster_break_tests
from .test import implement_break_tests
@implement_break_tests(graphemecluster.grapheme_cluster_boundaries,
iter_grapheme_cluster_break_tests())
class GraphemeClusterTest(unittest.TestCase):
pass
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite(graphemecluster))
return tests
if __name__ == '__main__':
unittest.main()
| true | true |
f721549c945034a707dcd61c8eb272e55a908d06 | 6,155 | py | Python | tests/pytest_extension/meta/test_all.py | broglep-work/python-pytest-cases | 4976c0073a2fad5fbe5de34a5d1199efda0b7da9 | [
"BSD-3-Clause"
] | 213 | 2018-07-05T21:21:21.000Z | 2022-03-22T04:54:53.000Z | tests/pytest_extension/meta/test_all.py | broglep-work/python-pytest-cases | 4976c0073a2fad5fbe5de34a5d1199efda0b7da9 | [
"BSD-3-Clause"
] | 259 | 2018-06-22T16:46:33.000Z | 2022-03-23T19:39:15.000Z | tests/pytest_extension/meta/test_all.py | broglep-work/python-pytest-cases | 4976c0073a2fad5fbe5de34a5d1199efda0b7da9 | [
"BSD-3-Clause"
] | 27 | 2019-03-26T12:46:49.000Z | 2022-02-21T16:56:23.000Z | import ast
import os
import shlex
import re
from os.path import join, dirname, isdir, exists
import pytest
from pytest_cases.common_mini_six import string_types
# Make the list of all tests that we will have to execute (each in an independent pytest runner)
THIS_DIR = dirname(__file__)
tests_raw_folder = join(THIS_DIR, 'raw')
test_files = [f for f in os.listdir(tests_raw_folder) if not f.startswith('_')]
META_REGEX = re.compile(
"""^(# META
# )(?P<asserts_dct>.*)(
# END META)
.*""")
@pytest.mark.parametrize('test_to_run', test_files, ids=str)
def test_run_all_tests(test_to_run, testdir):
"""
This is a meta-test. It is executed for each test file in the 'raw' folder.
For each of them, the file is retrieved and the expected test results are read from its first lines.
Then a dedicated pytest runner is run on this file, and the results are compared with the expected ones.
See https://docs.pytest.org/en/latest/writing_plugins.html
:param test_to_run:
:param testdir:
:return:
"""
cmdargs = []
conf_file_path = None
test_to_run_path = join(tests_raw_folder, test_to_run)
if isdir(test_to_run_path):
test_folder_path = test_to_run_path
# check if there is a cmdargs file
cmdargs_file_path = join(test_folder_path, "cmdargs.txt")
if exists(cmdargs_file_path):
with open(cmdargs_file_path) as c:
cmdargs = c.read()
cmdargs = process_cmdargs(cmdargs)
# check if there is a conf file
conf_file_path = join(test_folder_path, "conf.py")
if exists(conf_file_path):
with open(conf_file_path) as c:
cfg_contents = c.read()
# Create a temporary conftest.py file
print("\nConfig contents: %s" % cfg_contents)
testdir.makeconftest(cfg_contents)
# the test file should have the same name than the dir
test_to_run = test_to_run + ".py"
test_to_run_path = join(test_folder_path, test_to_run)
if not exists(test_to_run_path):
raise ValueError("Test file %s not found in folder %s" % (test_to_run, test_folder_path))
with open(test_to_run_path) as f:
# create a temporary pytest test file
test_file_contents = f.read()
testdir.makepyfile(test_file_contents)
# Grab the expected things to check when this is executed
m = META_REGEX.match(test_file_contents)
if m is None:
raise ValueError("test file '%s' does not contain the META-header" % test_to_run)
asserts_dct_str = m.groupdict()['asserts_dct']
asserts_dct = ast.literal_eval(asserts_dct_str)
# Here we run pytest
print("\nTesting that running pytest on file %s with config file %s results in %s."
"" % (test_to_run, conf_file_path, str(asserts_dct)))
print("For debug, temp dir is: %s" % testdir.tmpdir)
# protect against pycharm fiddling with the config
from _pytest import config
jb_prepareconfig = config._prepareconfig
if jb_prepareconfig.__module__ != config.get_config.__module__:
# we are in pycharm ! Fix that
config._prepareconfig = get_pytest_prepare_config()
# run
# first = testdir.runpytest("--collect-only", "-p", "no:cacheprovider") # ("-q")
# outfirst = "\n".join(first.outlines)
# assert "collected 1 items" in outfirst
# ********* RUN *********
result = testdir.runpytest(*cmdargs) # ("-q")
# put back the PyCharm hack
config._prepareconfig = jb_prepareconfig
# Here we check that everything is ok
try:
result.assert_outcomes(**asserts_dct)
except Exception as e:
err = Exception("Error while asserting that %s results in %s. Actual results: %s"
"" % (test_to_run, str(asserts_dct), result.parseoutcomes()))
err.__cause__ = e
raise err
def get_pytest_prepare_config(dynamic=False):
import py
import shlex
if dynamic:
from _pytest import config
with open(config.__file__) as cfg_file_original:
_capture = False
all_lines = []
for l in cfg_file_original.readlines():
if l.startswith("def _prepareconfig"):
_capture = True
all_lines.append(l)
elif _capture:
if l.startswith(" "):
all_lines.append(l)
else:
break
from _pytest.config import get_config
g = globals()
l = locals()
prepare_cfg_code = "".join(all_lines)
# print(prepare_cfg_code)
exec(prepare_cfg_code, l, g)
real_prepare_config = g['_prepareconfig']
else:
import sys
from _pytest.config import get_config
def real_prepare_config(args=None, plugins=None):
if args is None:
args = sys.argv[1:]
elif isinstance(args, py.path.local):
args = [str(args)]
elif not isinstance(args, (tuple, list)):
if not isinstance(args, string_types):
raise ValueError("not a string or argument list: %r" % (args,))
args = shlex.split(args, posix=sys.platform != "win32")
config = get_config()
pluginmanager = config.pluginmanager
try:
if plugins:
for plugin in plugins:
if isinstance(plugin, py.builtin._basestring):
pluginmanager.consider_pluginarg(plugin)
else:
pluginmanager.register(plugin)
return pluginmanager.hook.pytest_cmdline_parse(
pluginmanager=pluginmanager, args=args)
except BaseException:
config._ensure_unconfigure()
raise
return real_prepare_config
def process_cmdargs(cmdargs):
return shlex.split(cmdargs)
| 36.420118 | 108 | 0.605199 | import ast
import os
import shlex
import re
from os.path import join, dirname, isdir, exists
import pytest
from pytest_cases.common_mini_six import string_types
THIS_DIR = dirname(__file__)
tests_raw_folder = join(THIS_DIR, 'raw')
test_files = [f for f in os.listdir(tests_raw_folder) if not f.startswith('_')]
META_REGEX = re.compile(
"""^(# META
# )(?P<asserts_dct>.*)(
# END META)
.*""")
@pytest.mark.parametrize('test_to_run', test_files, ids=str)
def test_run_all_tests(test_to_run, testdir):
cmdargs = []
conf_file_path = None
test_to_run_path = join(tests_raw_folder, test_to_run)
if isdir(test_to_run_path):
test_folder_path = test_to_run_path
cmdargs_file_path = join(test_folder_path, "cmdargs.txt")
if exists(cmdargs_file_path):
with open(cmdargs_file_path) as c:
cmdargs = c.read()
cmdargs = process_cmdargs(cmdargs)
conf_file_path = join(test_folder_path, "conf.py")
if exists(conf_file_path):
with open(conf_file_path) as c:
cfg_contents = c.read()
print("\nConfig contents: %s" % cfg_contents)
testdir.makeconftest(cfg_contents)
test_to_run = test_to_run + ".py"
test_to_run_path = join(test_folder_path, test_to_run)
if not exists(test_to_run_path):
raise ValueError("Test file %s not found in folder %s" % (test_to_run, test_folder_path))
with open(test_to_run_path) as f:
test_file_contents = f.read()
testdir.makepyfile(test_file_contents)
m = META_REGEX.match(test_file_contents)
if m is None:
raise ValueError("test file '%s' does not contain the META-header" % test_to_run)
asserts_dct_str = m.groupdict()['asserts_dct']
asserts_dct = ast.literal_eval(asserts_dct_str)
print("\nTesting that running pytest on file %s with config file %s results in %s."
"" % (test_to_run, conf_file_path, str(asserts_dct)))
print("For debug, temp dir is: %s" % testdir.tmpdir)
from _pytest import config
jb_prepareconfig = config._prepareconfig
if jb_prepareconfig.__module__ != config.get_config.__module__:
config._prepareconfig = get_pytest_prepare_config()
result = testdir.runpytest(*cmdargs)
config._prepareconfig = jb_prepareconfig
try:
result.assert_outcomes(**asserts_dct)
except Exception as e:
err = Exception("Error while asserting that %s results in %s. Actual results: %s"
"" % (test_to_run, str(asserts_dct), result.parseoutcomes()))
err.__cause__ = e
raise err
def get_pytest_prepare_config(dynamic=False):
import py
import shlex
if dynamic:
from _pytest import config
with open(config.__file__) as cfg_file_original:
_capture = False
all_lines = []
for l in cfg_file_original.readlines():
if l.startswith("def _prepareconfig"):
_capture = True
all_lines.append(l)
elif _capture:
if l.startswith(" "):
all_lines.append(l)
else:
break
from _pytest.config import get_config
g = globals()
l = locals()
prepare_cfg_code = "".join(all_lines)
exec(prepare_cfg_code, l, g)
real_prepare_config = g['_prepareconfig']
else:
import sys
from _pytest.config import get_config
def real_prepare_config(args=None, plugins=None):
if args is None:
args = sys.argv[1:]
elif isinstance(args, py.path.local):
args = [str(args)]
elif not isinstance(args, (tuple, list)):
if not isinstance(args, string_types):
raise ValueError("not a string or argument list: %r" % (args,))
args = shlex.split(args, posix=sys.platform != "win32")
config = get_config()
pluginmanager = config.pluginmanager
try:
if plugins:
for plugin in plugins:
if isinstance(plugin, py.builtin._basestring):
pluginmanager.consider_pluginarg(plugin)
else:
pluginmanager.register(plugin)
return pluginmanager.hook.pytest_cmdline_parse(
pluginmanager=pluginmanager, args=args)
except BaseException:
config._ensure_unconfigure()
raise
return real_prepare_config
def process_cmdargs(cmdargs):
return shlex.split(cmdargs)
| true | true |
f72155b9712b48098172994163d1909c6bd06e2b | 959 | py | Python | django_mc2p/__init__.py | mc2p/mc2p-django | a8a245d0a2783a0199e74d2d0396c397c056f0f6 | [
"BSD-2-Clause"
] | null | null | null | django_mc2p/__init__.py | mc2p/mc2p-django | a8a245d0a2783a0199e74d2d0396c397c056f0f6 | [
"BSD-2-Clause"
] | null | null | null | django_mc2p/__init__.py | mc2p/mc2p-django | a8a245d0a2783a0199e74d2d0396c397c056f0f6 | [
"BSD-2-Clause"
] | null | null | null | from mc2p import MC2PClient as MC2PClientPython
__title__ = 'MyChoice2Pay Django'
__version__ = '0.1.3'
__author__ = 'MyChoice2Pay'
__license__ = 'BSD 2-Clause'
__copyright__ = 'Copyright 2017 MyChoice2Pay'
# Version synonym
VERSION = __version__
# Header encoding (see RFC5987)
HTTP_HEADER_ENCODING = 'iso-8859-1'
# Default datetime input and output formats
ISO_8601 = 'iso-8601'
default_app_config = 'django_mc2p.apps.DjangoMC2PConfig'
class MC2PClient(MC2PClientPython):
"""
Wrapper of MC2PClient of Python
"""
def __init__(self):
"""
Initializes a MC2PClient getting key and secret key from DB
"""
from .models import MC2PConfig
try:
mc2p_config = MC2PConfig.objects.get()
key = mc2p_config.key
secret_key = mc2p_config.secret_key
except:
key = ''
secret_key = ''
super(MC2PClient, self).__init__(key, secret_key)
| 23.975 | 67 | 0.665276 | from mc2p import MC2PClient as MC2PClientPython
__title__ = 'MyChoice2Pay Django'
__version__ = '0.1.3'
__author__ = 'MyChoice2Pay'
__license__ = 'BSD 2-Clause'
__copyright__ = 'Copyright 2017 MyChoice2Pay'
VERSION = __version__
HTTP_HEADER_ENCODING = 'iso-8859-1'
ISO_8601 = 'iso-8601'
default_app_config = 'django_mc2p.apps.DjangoMC2PConfig'
class MC2PClient(MC2PClientPython):
def __init__(self):
from .models import MC2PConfig
try:
mc2p_config = MC2PConfig.objects.get()
key = mc2p_config.key
secret_key = mc2p_config.secret_key
except:
key = ''
secret_key = ''
super(MC2PClient, self).__init__(key, secret_key)
| true | true |
f721568c58cfd884ff05085466a0d5440678835f | 19,025 | py | Python | nnunet/inference/pretrained_models/download_pretrained_model.py | Jiawei-Yang/TumorCP | 6053c75642fcbc0fb0424320ab3d758f24883b0e | [
"Apache-2.0"
] | 12 | 2021-07-22T15:08:13.000Z | 2022-03-10T08:15:56.000Z | nnunet/inference/pretrained_models/download_pretrained_model.py | Jiawei-Yang/TumorCP | 6053c75642fcbc0fb0424320ab3d758f24883b0e | [
"Apache-2.0"
] | 1 | 2022-03-07T13:21:42.000Z | 2022-03-07T13:21:42.000Z | nnunet/inference/pretrained_models/download_pretrained_model.py | Jiawei-Yang/TumorCP | 6053c75642fcbc0fb0424320ab3d758f24883b0e | [
"Apache-2.0"
] | 3 | 2021-11-26T06:26:24.000Z | 2022-02-14T01:23:44.000Z | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import zipfile
from time import time
import requests
from batchgenerators.utilities.file_and_folder_operations import join, isfile
from nnunet.paths import network_training_output_dir
def get_available_models():
available_models = {
"Task001_BrainTumour": {
'description': "Brain Tumor Segmentation. \n"
"Segmentation targets are edema, enhancing tumor and necrosis, \n"
"input modalities are 0: FLAIR, 1: T1, 2: T1 with contrast agent, 3: T2. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task001_BrainTumour.zip?download=1"
},
"Task002_Heart": {
'description': "Left Atrium Segmentation. \n"
"Segmentation target is the left atrium, \n"
"input modalities are 0: MRI. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task002_Heart.zip?download=1"
},
"Task003_Liver": {
'description': "Liver and Liver Tumor Segmentation. \n"
"Segmentation targets are liver and tumors, \n"
"input modalities are 0: abdominal CT scan. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task003_Liver.zip?download=1"
},
"Task004_Hippocampus": {
'description': "Hippocampus Segmentation. \n"
"Segmentation targets posterior and anterior parts of the hippocampus, \n"
"input modalities are 0: MRI. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task004_Hippocampus.zip?download=1"
},
"Task005_Prostate": {
'description': "Prostate Segmentation. \n"
"Segmentation targets are peripheral and central zone, \n"
"input modalities are 0: T2, 1: ADC. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4485926/files/Task005_Prostate.zip?download=1"
},
"Task006_Lung": {
'description': "Lung Nodule Segmentation. \n"
"Segmentation target are lung nodules, \n"
"input modalities are 0: abdominal CT scan. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task006_Lung.zip?download=1"
},
"Task007_Pancreas": {
'description': "Pancreas Segmentation. \n"
"Segmentation targets are pancras and pancreas tumor, \n"
"input modalities are 0: abdominal CT scan. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task007_Pancreas.zip?download=1"
},
"Task008_HepaticVessel": {
'description': "Hepatic Vessel Segmentation. \n"
"Segmentation targets are hepatic vesels and liver tumors, \n"
"input modalities are 0: abdominal CT scan. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task008_HepaticVessel.zip?download=1"
},
"Task009_Spleen": {
'description': "Spleen Segmentation. \n"
"Segmentation target is the spleen, \n"
"input modalities are 0: abdominal CT scan. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task009_Spleen.zip?download=1"
},
"Task010_Colon": {
'description': "Colon Cancer Segmentation. \n"
"Segmentation target are colon caner primaries, \n"
"input modalities are 0: CT scan. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task010_Colon.zip?download=1"
},
"Task017_AbdominalOrganSegmentation": {
'description': "Multi-Atlas Labeling Beyond the Cranial Vault - Abdomen. \n"
"Segmentation targets are thirteen different abdominal organs, \n"
"input modalities are 0: abdominal CT scan. \n"
"Also see https://www.synapse.org/#!Synapse:syn3193805/wiki/217754",
'url': "https://zenodo.org/record/4003545/files/Task017_AbdominalOrganSegmentation.zip?download=1"
},
"Task024_Promise": {
'description': "Prostate MR Image Segmentation 2012. \n"
"Segmentation target is the prostate, \n"
"input modalities are 0: T2. \n"
"Also see https://promise12.grand-challenge.org/",
'url': "https://zenodo.org/record/4003545/files/Task024_Promise.zip?download=1"
},
"Task027_ACDC": {
'description': "Automatic Cardiac Diagnosis Challenge. \n"
"Segmentation targets are right ventricle, left ventricular cavity and left myocardium, \n"
"input modalities are 0: cine MRI. \n"
"Also see https://acdc.creatis.insa-lyon.fr/",
'url': "https://zenodo.org/record/4003545/files/Task027_ACDC.zip?download=1"
},
"Task029_LiTS": {
'description': "Liver and Liver Tumor Segmentation Challenge. \n"
"Segmentation targets are liver and liver tumors, \n"
"input modalities are 0: abdominal CT scan. \n"
"Also see https://competitions.codalab.org/competitions/17094",
'url': "https://zenodo.org/record/4003545/files/Task029_LITS.zip?download=1"
},
"Task035_ISBILesionSegmentation": {
'description': "Longitudinal multiple sclerosis lesion segmentation Challenge. \n"
"Segmentation target is MS lesions, \n"
"input modalities are 0: FLAIR, 1: MPRAGE, 2: proton density, 3: T2. \n"
"Also see https://smart-stats-tools.org/lesion-challenge",
'url': "https://zenodo.org/record/4003545/files/Task035_ISBILesionSegmentation.zip?download=1"
},
"Task038_CHAOS_Task_3_5_Variant2": {
'description': "CHAOS - Combined (CT-MR) Healthy Abdominal Organ Segmentation Challenge (Task 3 & 5). \n"
"Segmentation targets are left and right kidney, liver, spleen, \n"
"input modalities are 0: T1 in-phase, T1 out-phase, T2 (can be any of those)\n"
"Also see https://chaos.grand-challenge.org/",
'url': "https://zenodo.org/record/4003545/files/Task038_CHAOS_Task_3_5_Variant2.zip?download=1"
},
"Task048_KiTS_clean": {
'description': "Kidney and Kidney Tumor Segmentation Challenge. "
"Segmentation targets kidney and kidney tumors, "
"input modalities are 0: abdominal CT scan. "
"Also see https://kits19.grand-challenge.org/",
'url': "https://zenodo.org/record/4003545/files/Task048_KiTS_clean.zip?download=1"
},
"Task055_SegTHOR": {
'description': "SegTHOR: Segmentation of THoracic Organs at Risk in CT images. \n"
"Segmentation targets are aorta, esophagus, heart and trachea, \n"
"input modalities are 0: CT scan. \n"
"Also see https://competitions.codalab.org/competitions/21145",
'url': "https://zenodo.org/record/4003545/files/Task055_SegTHOR.zip?download=1"
},
"Task061_CREMI": {
'description': "MICCAI Challenge on Circuit Reconstruction from Electron Microscopy Images (Synaptic Cleft segmentation task). \n"
"Segmentation target is synaptic clefts, \n"
"input modalities are 0: serial section transmission electron microscopy of neural tissue. \n"
"Also see https://cremi.org/",
'url': "https://zenodo.org/record/4003545/files/Task061_CREMI.zip?download=1"
},
"Task075_Fluo_C3DH_A549_ManAndSim": {
'description': "Fluo-C3DH-A549-SIM and Fluo-C3DH-A549 datasets of the cell tracking challenge. Segmentation target are C3DH cells in fluorescence microscopy images.\n"
"input modalities are 0: fluorescence_microscopy\n"
"Also see http://celltrackingchallenge.net/",
'url': "https://zenodo.org/record/4003545/files/Task075_Fluo_C3DH_A549_ManAndSim.zip?download=1"
},
"Task076_Fluo_N3DH_SIM": {
'description': "Fluo-N3DH-SIM dataset of the cell tracking challenge. Segmentation target are N3DH cells and cell borders in fluorescence microscopy images.\n"
"input modalities are 0: fluorescence_microscopy\n"
"Also see http://celltrackingchallenge.net/\n",
"Note that the segmentation output of the models are cell center and cell border. These outputs mus tbe converted to an instance segmentation for the challenge. \n"
"See https://github.com/MIC-DKFZ/nnUNet/blob/master/nnunet/dataset_conversion/Task076_Fluo_N3DH_SIM.py"
'url': "https://zenodo.org/record/4003545/files/Task076_Fluo_N3DH_SIM.zip?download=1"
},
"Task089_Fluo-N2DH-SIM_thickborder_time": {
'description': "Fluo-N2DH-SIM dataset of the cell tracking challenge. Segmentation target are nuclei of N2DH cells and cell borders in fluorescence microscopy images.\n"
"input modalities are 0: t minus 4, 0: t minus 3, 0: t minus 2, 0: t minus 1, 0: frame of interest\n"
"Note that the input channels are different time steps from a time series acquisition\n"
"Note that the segmentation output of the models are cell center and cell border. These outputs mus tbe converted to an instance segmentation for the challenge. \n"
"See https://github.com/MIC-DKFZ/nnUNet/blob/master/nnunet/dataset_conversion/Task089_Fluo-N2DH-SIM.py"
"Also see http://celltrackingchallenge.net/",
'url': "https://zenodo.org/record/4003545/files/Task089_Fluo-N2DH-SIM_thickborder_time.zip?download=1"
},
"Task114_heart_MNMs": {
'description': "Cardiac MRI short axis images from the M&Ms challenge 2020.\n"
"input modalities are 0: MRI \n"
"See also https://www.ub.edu/mnms/ \n"
"Note: Labels of the M&Ms Challenge are not in the same order as for the ACDC challenge. \n"
"See https://github.com/MIC-DKFZ/nnUNet/blob/master/nnunet/dataset_conversion/Task114_heart_mnms.py",
'url': "https://zenodo.org/record/4288464/files/Task114_heart_MNMs.zip?download=1"
},
}
return available_models
def print_available_pretrained_models():
print('The following pretrained models are available:\n')
av_models = get_available_models()
for m in av_models.keys():
print('')
print(m)
print(av_models[m]['description'])
def download_and_install_pretrained_model_by_name(taskname):
av_models = get_available_models()
if taskname not in av_models.keys():
raise RuntimeError("\nThe requested pretrained model ('%s') is not available." % taskname)
if len(av_models[taskname]['url']) == 0:
raise RuntimeError("The requested model has not been uploaded yet. Please check back in a few days")
download_and_install_from_url(av_models[taskname]['url'])
def download_and_install_from_url(url):
assert network_training_output_dir is not None, "Cannot install model because network_training_output_dir is not " \
"set (RESULTS_FOLDER missing as environment variable, see " \
"Installation instructions)"
import http.client
http.client.HTTPConnection._http_vsn = 10
http.client.HTTPConnection._http_vsn_str = 'HTTP/1.0'
import os
home = os.path.expanduser('~')
random_number = int(time() * 1e7)
tempfile = join(home, '.nnunetdownload_%s' % str(random_number))
try:
with open(tempfile, 'wb') as f:
with requests.get(url, stream=True) as r:
r.raise_for_status()
for chunk in r.iter_content(chunk_size=8192 * 16):
# If you have chunk encoded response uncomment if
# and set chunk_size parameter to None.
# if chunk:
f.write(chunk)
print("Download finished. Extracting...")
install_model_from_zip_file(tempfile)
print("Done")
except Exception as e:
raise e
finally:
if isfile(tempfile):
os.remove(tempfile)
def download_file(url, local_filename):
# borrowed from https://stackoverflow.com/questions/16694907/download-large-file-in-python-with-requests
# NOTE the stream=True parameter below
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=None):
# If you have chunk encoded response uncomment if
# and set chunk_size parameter to None.
#if chunk:
f.write(chunk)
return local_filename
def install_model_from_zip_file(zip_file: str):
with zipfile.ZipFile(zip_file, 'r') as zip_ref:
zip_ref.extractall(network_training_output_dir)
def print_license_warning():
print('')
print('######################################################')
print('!!!!!!!!!!!!!!!!!!!!!!!!WARNING!!!!!!!!!!!!!!!!!!!!!!!')
print('######################################################')
print("Using the pretrained model weights is subject to the license of the dataset they were trained on. Some "
"allow commercial use, others don't. It is your responsibility to make sure you use them appropriately! Use "
"nnUNet_print_pretrained_model_info(task_name) to see a summary of the dataset and where to find its license!")
print('######################################################')
print('')
def download_by_name():
import argparse
parser = argparse.ArgumentParser(description="Use this to download pretrained models. CAREFUL: This script will "
"overwrite "
"existing models (if they share the same trainer class and plans as "
"the pretrained model")
parser.add_argument("task_name", type=str, help='Task name of the pretrained model. To see '
'available task names, run nnUNet_print_available_'
'pretrained_models')
args = parser.parse_args()
taskname = args.task_name
print_license_warning()
download_and_install_pretrained_model_by_name(taskname)
def download_by_url():
import argparse
parser = argparse.ArgumentParser(
description="Use this to download pretrained models. This script is intended to download models via url only. "
"If you want to download one of our pretrained models, please use nnUNet_download_pretrained_model. "
"CAREFUL: This script will overwrite "
"existing models (if they share the same trainer class and plans as "
"the pretrained model.")
parser.add_argument("url", type=str, help='URL of the pretrained model')
args = parser.parse_args()
url = args.url
download_and_install_from_url(url)
def install_from_zip_entry_point():
import argparse
parser = argparse.ArgumentParser(
description="Use this to install a zip file containing a pretrained model.")
parser.add_argument("zip", type=str, help='zip file')
args = parser.parse_args()
zip = args.zip
install_model_from_zip_file(zip)
def print_pretrained_model_requirements():
import argparse
parser = argparse.ArgumentParser(description="Use this to see the properties of a pretrained model, especially "
"what input modalities it requires")
parser.add_argument("task_name", type=str, help='Task name of the pretrained model. To see '
'available task names, run nnUNet_print_available_'
'pretrained_models')
args = parser.parse_args()
taskname = args.task_name
av = get_available_models()
if taskname not in av.keys():
raise RuntimeError("Invalid task name. This pretrained model does not exist. To see available task names, "
"run nnUNet_print_available_pretrained_models")
print(av[taskname]['description'])
if __name__ == '__main__':
url = 'https://www.dropbox.com/s/ft54q1gi060vm2x/Task004_Hippocampus.zip?dl=1' | 57.304217 | 191 | 0.595848 |
import zipfile
from time import time
import requests
from batchgenerators.utilities.file_and_folder_operations import join, isfile
from nnunet.paths import network_training_output_dir
def get_available_models():
available_models = {
"Task001_BrainTumour": {
'description': "Brain Tumor Segmentation. \n"
"Segmentation targets are edema, enhancing tumor and necrosis, \n"
"input modalities are 0: FLAIR, 1: T1, 2: T1 with contrast agent, 3: T2. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task001_BrainTumour.zip?download=1"
},
"Task002_Heart": {
'description': "Left Atrium Segmentation. \n"
"Segmentation target is the left atrium, \n"
"input modalities are 0: MRI. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task002_Heart.zip?download=1"
},
"Task003_Liver": {
'description': "Liver and Liver Tumor Segmentation. \n"
"Segmentation targets are liver and tumors, \n"
"input modalities are 0: abdominal CT scan. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task003_Liver.zip?download=1"
},
"Task004_Hippocampus": {
'description': "Hippocampus Segmentation. \n"
"Segmentation targets posterior and anterior parts of the hippocampus, \n"
"input modalities are 0: MRI. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task004_Hippocampus.zip?download=1"
},
"Task005_Prostate": {
'description': "Prostate Segmentation. \n"
"Segmentation targets are peripheral and central zone, \n"
"input modalities are 0: T2, 1: ADC. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4485926/files/Task005_Prostate.zip?download=1"
},
"Task006_Lung": {
'description': "Lung Nodule Segmentation. \n"
"Segmentation target are lung nodules, \n"
"input modalities are 0: abdominal CT scan. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task006_Lung.zip?download=1"
},
"Task007_Pancreas": {
'description': "Pancreas Segmentation. \n"
"Segmentation targets are pancras and pancreas tumor, \n"
"input modalities are 0: abdominal CT scan. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task007_Pancreas.zip?download=1"
},
"Task008_HepaticVessel": {
'description': "Hepatic Vessel Segmentation. \n"
"Segmentation targets are hepatic vesels and liver tumors, \n"
"input modalities are 0: abdominal CT scan. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task008_HepaticVessel.zip?download=1"
},
"Task009_Spleen": {
'description': "Spleen Segmentation. \n"
"Segmentation target is the spleen, \n"
"input modalities are 0: abdominal CT scan. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task009_Spleen.zip?download=1"
},
"Task010_Colon": {
'description': "Colon Cancer Segmentation. \n"
"Segmentation target are colon caner primaries, \n"
"input modalities are 0: CT scan. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task010_Colon.zip?download=1"
},
"Task017_AbdominalOrganSegmentation": {
'description': "Multi-Atlas Labeling Beyond the Cranial Vault - Abdomen. \n"
"Segmentation targets are thirteen different abdominal organs, \n"
"input modalities are 0: abdominal CT scan. \n"
"Also see https://www.synapse.org/#!Synapse:syn3193805/wiki/217754",
'url': "https://zenodo.org/record/4003545/files/Task017_AbdominalOrganSegmentation.zip?download=1"
},
"Task024_Promise": {
'description': "Prostate MR Image Segmentation 2012. \n"
"Segmentation target is the prostate, \n"
"input modalities are 0: T2. \n"
"Also see https://promise12.grand-challenge.org/",
'url': "https://zenodo.org/record/4003545/files/Task024_Promise.zip?download=1"
},
"Task027_ACDC": {
'description': "Automatic Cardiac Diagnosis Challenge. \n"
"Segmentation targets are right ventricle, left ventricular cavity and left myocardium, \n"
"input modalities are 0: cine MRI. \n"
"Also see https://acdc.creatis.insa-lyon.fr/",
'url': "https://zenodo.org/record/4003545/files/Task027_ACDC.zip?download=1"
},
"Task029_LiTS": {
'description': "Liver and Liver Tumor Segmentation Challenge. \n"
"Segmentation targets are liver and liver tumors, \n"
"input modalities are 0: abdominal CT scan. \n"
"Also see https://competitions.codalab.org/competitions/17094",
'url': "https://zenodo.org/record/4003545/files/Task029_LITS.zip?download=1"
},
"Task035_ISBILesionSegmentation": {
'description': "Longitudinal multiple sclerosis lesion segmentation Challenge. \n"
"Segmentation target is MS lesions, \n"
"input modalities are 0: FLAIR, 1: MPRAGE, 2: proton density, 3: T2. \n"
"Also see https://smart-stats-tools.org/lesion-challenge",
'url': "https://zenodo.org/record/4003545/files/Task035_ISBILesionSegmentation.zip?download=1"
},
"Task038_CHAOS_Task_3_5_Variant2": {
'description': "CHAOS - Combined (CT-MR) Healthy Abdominal Organ Segmentation Challenge (Task 3 & 5). \n"
"Segmentation targets are left and right kidney, liver, spleen, \n"
"input modalities are 0: T1 in-phase, T1 out-phase, T2 (can be any of those)\n"
"Also see https://chaos.grand-challenge.org/",
'url': "https://zenodo.org/record/4003545/files/Task038_CHAOS_Task_3_5_Variant2.zip?download=1"
},
"Task048_KiTS_clean": {
'description': "Kidney and Kidney Tumor Segmentation Challenge. "
"Segmentation targets kidney and kidney tumors, "
"input modalities are 0: abdominal CT scan. "
"Also see https://kits19.grand-challenge.org/",
'url': "https://zenodo.org/record/4003545/files/Task048_KiTS_clean.zip?download=1"
},
"Task055_SegTHOR": {
'description': "SegTHOR: Segmentation of THoracic Organs at Risk in CT images. \n"
"Segmentation targets are aorta, esophagus, heart and trachea, \n"
"input modalities are 0: CT scan. \n"
"Also see https://competitions.codalab.org/competitions/21145",
'url': "https://zenodo.org/record/4003545/files/Task055_SegTHOR.zip?download=1"
},
"Task061_CREMI": {
'description': "MICCAI Challenge on Circuit Reconstruction from Electron Microscopy Images (Synaptic Cleft segmentation task). \n"
"Segmentation target is synaptic clefts, \n"
"input modalities are 0: serial section transmission electron microscopy of neural tissue. \n"
"Also see https://cremi.org/",
'url': "https://zenodo.org/record/4003545/files/Task061_CREMI.zip?download=1"
},
"Task075_Fluo_C3DH_A549_ManAndSim": {
'description': "Fluo-C3DH-A549-SIM and Fluo-C3DH-A549 datasets of the cell tracking challenge. Segmentation target are C3DH cells in fluorescence microscopy images.\n"
"input modalities are 0: fluorescence_microscopy\n"
"Also see http://celltrackingchallenge.net/",
'url': "https://zenodo.org/record/4003545/files/Task075_Fluo_C3DH_A549_ManAndSim.zip?download=1"
},
"Task076_Fluo_N3DH_SIM": {
'description': "Fluo-N3DH-SIM dataset of the cell tracking challenge. Segmentation target are N3DH cells and cell borders in fluorescence microscopy images.\n"
"input modalities are 0: fluorescence_microscopy\n"
"Also see http://celltrackingchallenge.net/\n",
"Note that the segmentation output of the models are cell center and cell border. These outputs mus tbe converted to an instance segmentation for the challenge. \n"
"See https://github.com/MIC-DKFZ/nnUNet/blob/master/nnunet/dataset_conversion/Task076_Fluo_N3DH_SIM.py"
'url': "https://zenodo.org/record/4003545/files/Task076_Fluo_N3DH_SIM.zip?download=1"
},
"Task089_Fluo-N2DH-SIM_thickborder_time": {
'description': "Fluo-N2DH-SIM dataset of the cell tracking challenge. Segmentation target are nuclei of N2DH cells and cell borders in fluorescence microscopy images.\n"
"input modalities are 0: t minus 4, 0: t minus 3, 0: t minus 2, 0: t minus 1, 0: frame of interest\n"
"Note that the input channels are different time steps from a time series acquisition\n"
"Note that the segmentation output of the models are cell center and cell border. These outputs mus tbe converted to an instance segmentation for the challenge. \n"
"See https://github.com/MIC-DKFZ/nnUNet/blob/master/nnunet/dataset_conversion/Task089_Fluo-N2DH-SIM.py"
"Also see http://celltrackingchallenge.net/",
'url': "https://zenodo.org/record/4003545/files/Task089_Fluo-N2DH-SIM_thickborder_time.zip?download=1"
},
"Task114_heart_MNMs": {
'description': "Cardiac MRI short axis images from the M&Ms challenge 2020.\n"
"input modalities are 0: MRI \n"
"See also https://www.ub.edu/mnms/ \n"
"Note: Labels of the M&Ms Challenge are not in the same order as for the ACDC challenge. \n"
"See https://github.com/MIC-DKFZ/nnUNet/blob/master/nnunet/dataset_conversion/Task114_heart_mnms.py",
'url': "https://zenodo.org/record/4288464/files/Task114_heart_MNMs.zip?download=1"
},
}
return available_models
def print_available_pretrained_models():
print('The following pretrained models are available:\n')
av_models = get_available_models()
for m in av_models.keys():
print('')
print(m)
print(av_models[m]['description'])
def download_and_install_pretrained_model_by_name(taskname):
av_models = get_available_models()
if taskname not in av_models.keys():
raise RuntimeError("\nThe requested pretrained model ('%s') is not available." % taskname)
if len(av_models[taskname]['url']) == 0:
raise RuntimeError("The requested model has not been uploaded yet. Please check back in a few days")
download_and_install_from_url(av_models[taskname]['url'])
def download_and_install_from_url(url):
assert network_training_output_dir is not None, "Cannot install model because network_training_output_dir is not " \
"set (RESULTS_FOLDER missing as environment variable, see " \
"Installation instructions)"
import http.client
http.client.HTTPConnection._http_vsn = 10
http.client.HTTPConnection._http_vsn_str = 'HTTP/1.0'
import os
home = os.path.expanduser('~')
random_number = int(time() * 1e7)
tempfile = join(home, '.nnunetdownload_%s' % str(random_number))
try:
with open(tempfile, 'wb') as f:
with requests.get(url, stream=True) as r:
r.raise_for_status()
for chunk in r.iter_content(chunk_size=8192 * 16):
f.write(chunk)
print("Download finished. Extracting...")
install_model_from_zip_file(tempfile)
print("Done")
except Exception as e:
raise e
finally:
if isfile(tempfile):
os.remove(tempfile)
def download_file(url, local_filename):
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=None):
f.write(chunk)
return local_filename
def install_model_from_zip_file(zip_file: str):
with zipfile.ZipFile(zip_file, 'r') as zip_ref:
zip_ref.extractall(network_training_output_dir)
def print_license_warning():
print('')
print('######################################################')
print('!!!!!!!!!!!!!!!!!!!!!!!!WARNING!!!!!!!!!!!!!!!!!!!!!!!')
print('######################################################')
print("Using the pretrained model weights is subject to the license of the dataset they were trained on. Some "
"allow commercial use, others don't. It is your responsibility to make sure you use them appropriately! Use "
"nnUNet_print_pretrained_model_info(task_name) to see a summary of the dataset and where to find its license!")
print('ownload_and_install_from_url(url)
def install_from_zip_entry_point():
import argparse
parser = argparse.ArgumentParser(
description="Use this to install a zip file containing a pretrained model.")
parser.add_argument("zip", type=str, help='zip file')
args = parser.parse_args()
zip = args.zip
install_model_from_zip_file(zip)
def print_pretrained_model_requirements():
import argparse
parser = argparse.ArgumentParser(description="Use this to see the properties of a pretrained model, especially "
"what input modalities it requires")
parser.add_argument("task_name", type=str, help='Task name of the pretrained model. To see '
'available task names, run nnUNet_print_available_'
'pretrained_models')
args = parser.parse_args()
taskname = args.task_name
av = get_available_models()
if taskname not in av.keys():
raise RuntimeError("Invalid task name. This pretrained model does not exist. To see available task names, "
"run nnUNet_print_available_pretrained_models")
print(av[taskname]['description'])
if __name__ == '__main__':
url = 'https://www.dropbox.com/s/ft54q1gi060vm2x/Task004_Hippocampus.zip?dl=1' | true | true |
f72157380ef02e33e2ef0f6f19e81eebfeeb2a1a | 5,318 | py | Python | optimizers/bohb_one_shot/plots/util.py | Mirofil/nasbench-1shot1 | 46637e259691ea2b1ab3b2f1cbbd309068f02cde | [
"Apache-2.0"
] | 65 | 2019-12-20T12:20:22.000Z | 2022-03-12T07:34:08.000Z | optimizers/bohb_one_shot/plots/util.py | crwhite14/nasbench-1shot1 | c34bf9c0222f07a30ba1518b3e52e120a3560aa4 | [
"Apache-2.0"
] | 8 | 2020-01-29T07:49:31.000Z | 2021-10-20T08:58:29.000Z | optimizers/bohb_one_shot/plots/util.py | crwhite14/nasbench-1shot1 | c34bf9c0222f07a30ba1518b3e52e120a3560aa4 | [
"Apache-2.0"
] | 18 | 2020-01-26T08:40:18.000Z | 2021-09-20T15:13:00.000Z | import os
import pickle
import collections
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from IPython import embed
colors={
'BOHB-PC-DARTS': 'darkorange',
'BOHB-DARTS': 'dodgerblue',
'BOHB-GDAS' : 'forestgreen',
'RE': 'crimson',
'RS': 'darkorchid',
'RL': 'sienna',
'TPE': 'deepskyblue',
'SMAC': 'violet',
'HB': 'darkgray',
'BOHB': 'gold'
}
markers={
'BOHB-DARTS': '^',
'BOHB-PC-DARTS': 'v',
'BOHB-GDAS' : 'x',
'RS': 'D',
'RE': 'o',
'RL': 's',
'SMAC': 'h',
'HB': '>',
'BOHB': '*',
'TPE': '<'
}
def get_incumbent(losses, time_stamps):
return_dict = {'time_stamps': [],
'losses': [],
}
current_incumbent = float('inf')
incumbent_budget = -float('inf')
for l, t in zip(losses, time_stamps):
if l < current_incumbent:
current_incumbent = l
return_dict['losses'].append(l)
return_dict['time_stamps'].append(t)
else:
return_dict['losses'].append(return_dict['losses'][-1])
return_dict['time_stamps'].append(t)
return return_dict.values()
def get_trajectories(args, global_min, path='regularized_evolution',
methods=['RE', 'RS']):
all_trajectories = {}
for m in methods:
dfs = []
for seed in range(500):
filename = os.path.join(path, m,
'algo_{}_0_ssp_{}_seed_{}.obj'.format(m, args.space,
seed))
try:
with open(filename, 'rb') as f:
data = pickle.load(f)
losses = [1 - x.test_accuracy - global_min for x in data]
times = np.array([x.training_time for x in data])
times = [np.sum(times[:i+1]) for i in range(len(times))]
if m in ['HB', 'BOHB']:
costs = np.array([x.budget for x in data])
costs = np.array(
[np.sum(costs[:i+1]) for i in range(len(costs))]
)
n = len(np.where(costs <= 280*108)[0])
times, losses = get_incumbent(losses[:n], times[:n])
else:
times, losses = get_incumbent(losses, times)
print(seed, ' MIN: ', min(losses))
df = pd.DataFrame({str(seed): losses}, index=times)
#embed()
dfs.append(df)
except FileNotFoundError:
break
df = merge_and_fill_trajectories(dfs, default_value=None)
if df.empty:
continue
print(m, df.shape)
all_trajectories[m] = {
'time_stamps': np.array(df.index),
'losses': np.array(df.T)
}
return all_trajectories
def merge_and_fill_trajectories(pandas_data_frames, default_value=None):
# merge all tracjectories keeping all time steps
df = pd.DataFrame().join(pandas_data_frames, how='outer')
# forward fill to make it a propper step function
df=df.fillna(method='ffill')
if default_value is None:
# backward fill to replace the NaNs for the early times by
# the performance of a random configuration
df=df.fillna(method='bfill')
else:
df=df.fillna(default_value)
return(df)
def plot_losses(fig, ax, axins, incumbent_trajectories, regret=True,
incumbent=None, show=True, linewidth=3, marker_size=10,
xscale='log', xlabel='wall clock time [s]', yscale='log',
ylabel=None, legend_loc = 'best', xlim=None, ylim=None,
plot_mean=True, labels={}, markers=markers, colors=colors,
figsize=(16,9)):
if regret:
if ylabel is None: ylabel = 'regret'
# find lowest performance in the data to update incumbent
if incumbent is None:
incumbent = np.inf
for tr in incumbent_trajectories.values():
incumbent = min(tr['losses'][:,-1].min(), incumbent)
print('incumbent value: ', incumbent)
for m,tr in incumbent_trajectories.items():
trajectory = np.copy(tr['losses'])
if (trajectory.shape[0] == 0): continue
if regret: trajectory -= incumbent
sem = np.sqrt(trajectory.var(axis=0, ddof=1)/tr['losses'].shape[0])
if plot_mean:
mean = trajectory.mean(axis=0)
else:
mean = np.median(trajectory,axis=0)
sem *= 1.253
if 'DARTS' in m or 'GDAS' in m:
ax.fill_between(tr['time_stamps'], mean-2*sem, mean+2*sem,
color=colors[m], alpha=0.2)
ax.plot(tr['time_stamps'],mean,
label=labels.get(m, m), color=colors.get(m, None),linewidth=linewidth,
marker=markers.get(m,None), markersize=marker_size, markevery=(0.1,0.1))
if axins is not None:
axins.plot(tr['time_stamps'],mean,
label=labels.get(m, m), color=colors.get(m, None),linewidth=linewidth,
marker=markers.get(m,None), markersize=marker_size, markevery=(0.1,0.1))
return (fig, ax)
| 33.446541 | 95 | 0.531403 | import os
import pickle
import collections
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from IPython import embed
colors={
'BOHB-PC-DARTS': 'darkorange',
'BOHB-DARTS': 'dodgerblue',
'BOHB-GDAS' : 'forestgreen',
'RE': 'crimson',
'RS': 'darkorchid',
'RL': 'sienna',
'TPE': 'deepskyblue',
'SMAC': 'violet',
'HB': 'darkgray',
'BOHB': 'gold'
}
markers={
'BOHB-DARTS': '^',
'BOHB-PC-DARTS': 'v',
'BOHB-GDAS' : 'x',
'RS': 'D',
'RE': 'o',
'RL': 's',
'SMAC': 'h',
'HB': '>',
'BOHB': '*',
'TPE': '<'
}
def get_incumbent(losses, time_stamps):
return_dict = {'time_stamps': [],
'losses': [],
}
current_incumbent = float('inf')
incumbent_budget = -float('inf')
for l, t in zip(losses, time_stamps):
if l < current_incumbent:
current_incumbent = l
return_dict['losses'].append(l)
return_dict['time_stamps'].append(t)
else:
return_dict['losses'].append(return_dict['losses'][-1])
return_dict['time_stamps'].append(t)
return return_dict.values()
def get_trajectories(args, global_min, path='regularized_evolution',
methods=['RE', 'RS']):
all_trajectories = {}
for m in methods:
dfs = []
for seed in range(500):
filename = os.path.join(path, m,
'algo_{}_0_ssp_{}_seed_{}.obj'.format(m, args.space,
seed))
try:
with open(filename, 'rb') as f:
data = pickle.load(f)
losses = [1 - x.test_accuracy - global_min for x in data]
times = np.array([x.training_time for x in data])
times = [np.sum(times[:i+1]) for i in range(len(times))]
if m in ['HB', 'BOHB']:
costs = np.array([x.budget for x in data])
costs = np.array(
[np.sum(costs[:i+1]) for i in range(len(costs))]
)
n = len(np.where(costs <= 280*108)[0])
times, losses = get_incumbent(losses[:n], times[:n])
else:
times, losses = get_incumbent(losses, times)
print(seed, ' MIN: ', min(losses))
df = pd.DataFrame({str(seed): losses}, index=times)
dfs.append(df)
except FileNotFoundError:
break
df = merge_and_fill_trajectories(dfs, default_value=None)
if df.empty:
continue
print(m, df.shape)
all_trajectories[m] = {
'time_stamps': np.array(df.index),
'losses': np.array(df.T)
}
return all_trajectories
def merge_and_fill_trajectories(pandas_data_frames, default_value=None):
df = pd.DataFrame().join(pandas_data_frames, how='outer')
df=df.fillna(method='ffill')
if default_value is None:
df=df.fillna(method='bfill')
else:
df=df.fillna(default_value)
return(df)
def plot_losses(fig, ax, axins, incumbent_trajectories, regret=True,
incumbent=None, show=True, linewidth=3, marker_size=10,
xscale='log', xlabel='wall clock time [s]', yscale='log',
ylabel=None, legend_loc = 'best', xlim=None, ylim=None,
plot_mean=True, labels={}, markers=markers, colors=colors,
figsize=(16,9)):
if regret:
if ylabel is None: ylabel = 'regret'
if incumbent is None:
incumbent = np.inf
for tr in incumbent_trajectories.values():
incumbent = min(tr['losses'][:,-1].min(), incumbent)
print('incumbent value: ', incumbent)
for m,tr in incumbent_trajectories.items():
trajectory = np.copy(tr['losses'])
if (trajectory.shape[0] == 0): continue
if regret: trajectory -= incumbent
sem = np.sqrt(trajectory.var(axis=0, ddof=1)/tr['losses'].shape[0])
if plot_mean:
mean = trajectory.mean(axis=0)
else:
mean = np.median(trajectory,axis=0)
sem *= 1.253
if 'DARTS' in m or 'GDAS' in m:
ax.fill_between(tr['time_stamps'], mean-2*sem, mean+2*sem,
color=colors[m], alpha=0.2)
ax.plot(tr['time_stamps'],mean,
label=labels.get(m, m), color=colors.get(m, None),linewidth=linewidth,
marker=markers.get(m,None), markersize=marker_size, markevery=(0.1,0.1))
if axins is not None:
axins.plot(tr['time_stamps'],mean,
label=labels.get(m, m), color=colors.get(m, None),linewidth=linewidth,
marker=markers.get(m,None), markersize=marker_size, markevery=(0.1,0.1))
return (fig, ax)
| true | true |
f721587995d4a908abf3416954f163ffd9986c6f | 5,391 | py | Python | tools/PolicyAnalysis/Businesses.py | Randal1936/FinancialSupervision | 3d78b1cc662a2c0675ace880a772cc38eaf7672f | [
"MIT"
] | 1 | 2021-08-16T08:47:53.000Z | 2021-08-16T08:47:53.000Z | tools/PolicyAnalysis/Businesses.py | Randal1936/FSML | 3d78b1cc662a2c0675ace880a772cc38eaf7672f | [
"MIT"
] | 16 | 2021-08-02T14:34:52.000Z | 2021-08-04T12:48:06.000Z | tools/PolicyAnalysis/Businesses.py | Randal1936/FinancialSupervision | 3d78b1cc662a2c0675ace880a772cc38eaf7672f | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import xlwings as xw
from PolicyAnalysis import cptj as cj
"""
————————————————————
以下是使用 re 检索+ DFC 映射的数据处理写法
————————————————————
"""
class businesses_re:
def __init__(self, Data, userdict):
self.Data = Data
self.userdict = userdict
data = Data.copy()
# 先获取关键词字典
n = cj.txt_to_list(self.userdict)
# 把被监管的业务分类,做成字典映射
# 首先生成一个列表,标记一下关键词所在的位置
loc = [(0, 4), (4, 10), (10, 15), (15, 19), (19, 22), (22, 26), (26, 29), (29, 31), (31, 40),
(40, 41), (41, 42), (42, 43), (43, 44), (44, 45)]
# 然后遍历列表,按照标记的位置生成关键词切片,把同类的关键词映射到相同的数值
i = 0
keymap = {}
for rank in loc:
lst = n[rank[0]: rank[1]]
for item in lst:
keymap[item] = i
i += 1
# 情况一,对全部正文进行检索
result1 = cj.words_docs_freq(n, data)
dfc1 = result1['DFC']
dtm1_class = result1['DTM']
dtm1_final = cj.dfc_sort_filter(dfc1, keymap, '被监管业务-正文分类统计.xlsx')
# 情况二,对正文前十句话进行检索
# 造一个正文栏只包括正文前十句话的样本矩阵
tf = data
for i in range(0, data.shape[0]):
tf.iloc[i, 2] = cj.top_n_sent(10, data.iloc[i, 2])
result2 = cj.words_docs_freq(n, tf)
dfc2 = result2['DFC']
dtm2_class = result2['DTM']
dtm2_final = cj.dfc_sort_filter(dfc2, keymap, '被监管业务-前十句话分类统计.xlsx')
# 情况三,仅对标题进行检索
# 首先把样本弄成一样的格式
# 建议用这种赋值+循环 iloc 赋值来新建样本
# 否则会报乱七八糟的错:不能用 slice 来更改原 DataFrame 值啊 blablabla
tf3 = data
for i in range(0, data.shape[0]):
tf3.iloc[i, 2] = data.iloc[i, 1]
# 生成词频统计结果
result3 = cj.words_docs_freq(n, tf3)
dfc3 = result3['DFC']
dtm3_class = result3['DTM']
dtm3_final = cj.dfc_sort_filter(dfc3, keymap, '被监管业务-标题分类统计.xlsx')
dtm_final = pd.concat([dtm1_final, dtm2_final, dtm3_final], axis=1)
dtm_final.columns = ['被监管业务数(正文)', '被监管业务数(前十句)', '被监管业务数(标题)']
dtm_aver_class = dtm_final.agg(np.mean, axis=1)
dtm_aver_class = pd.DataFrame(dtm_aver_class, columns=['被监管业务数'])
self.DTM_aver = dtm_aver_class # DTM 1、2、3 被监管业务数求均值
self.DTM_final = dtm_final # DTM 1、2、3 被监管业务种类数汇总
self.DTM1_class = dtm1_class # 按正文检索得到的 Doc-Term Matrix
self.DTM2_class = dtm2_class # 按前十句话检索的 Doc-Term Matrix
self.DTM3_class = dtm3_class # 按标题检索得到的 Doc-Term Matrix
"""
——————————————————————
以下是使用 jieba 检索+ DTM 映射的数据处理写法
——————————————————————
"""
class business_jieba:
def __init__(self, Data, userdict, indifile, indisheet, stopwords):
self.Data = Data
self.userdict = userdict
self.indifile = indifile
self.indisheet = indisheet
self.stopwords = stopwords
data = Data.copy()
# 导入指标文件
app = xw.App(visible=False, add_book=False)
app.screen_updating = False
app.display_alerts = False
try:
wb = app.books.open(self.indifile)
sht = wb.sheets[self.indisheet]
df_indi = sht.used_range.value
df_indi = pd.DataFrame(df_indi)
df_indi.columns = df_indi.loc[0]
df_indi.drop(0, axis=0, inplace=True)
df_indi.dropna(axis=0, how='all', inplace=True)
finally:
app.quit()
# 生成 Business 分类字典, {'Institution': [keyword1, keyword2, keyword3, ....], ....}
keymap = {}
for i in range(df_indi.shape[1]):
keymap[df_indi.columns[i]] = list(df_indi.iloc[:, i].dropna(''))
# 情况一,对全部正文进行检索
dtm1 = cj.jieba_vectorizer(data, self.userdict, self.stopwords).DTM
dtm1_result = cj.dtm_sort_filter(dtm1, keymap, '被监管业务-正文分类统计.xlsx')
dtm1_class = dtm1_result['DTM_class']
dtm1_final = dtm1_result['DTM_final']
# 情况二,对正文前十句话进行检索
# 造一个正文栏只包括正文前十句话的样本矩阵
tf = data.copy()
for i in range(0, data.shape[0]):
tf.iloc[i, 2] = cj.top_n_sent(10, data.iloc[i, 2])
dtm2 = cj.jieba_vectorizer(tf, self.userdict, self.stopwords).DTM
dtm2_result = cj.dtm_sort_filter(dtm2, keymap, '被监管业务-前十句话分类统计.xlsx')
dtm2_class = dtm2_result['DTM_class']
dtm2_final = dtm2_result['DTM_final']
# 情况三,仅对标题进行检索
# 首先把样本弄成一样的格式
# 建议用这种赋值+循环 iloc 赋值来新建样本
# 否则会报乱七八糟的错:不能用 slice 来更改原 DataFrame 值啊 blablabla
tf3 = data.copy()
for i in range(0, data.shape[0]):
tf3.iloc[i, 2] = data.iloc[i, 1]
# 生成词频统计结果
dtm3 = cj.jieba_vectorizer(tf3, self.userdict, self.stopwords).DTM
dtm3_result = cj.dtm_sort_filter(dtm3, keymap)
dtm3_class = dtm3_result['DTM_class']
dtm3_final = dtm3_result['DTM_final']
dtm_final = pd.concat([dtm1_final, dtm2_final, dtm3_final], axis=1)
dtm_final.columns = ['被监管业务数(正文)', '被监管业务数(前十句)', '被监管业务数(标题)']
dtm_aver_class = dtm_final.agg(np.mean, axis=1)
dtm_aver_class = pd.DataFrame(dtm_aver_class, columns=['被监管业务种类数'])
self.DTM_aver = dtm_aver_class # DTM 1、2、3 被监管业务数求均值
self.DTM_final = dtm_final # DTM 1、2、3 被监管业务种类数汇总
self.DTM1_class = dtm1_class # 按正文检索得到的 Doc-Term Matrix
self.DTM2_class = dtm2_class # 按前十句话检索的 Doc-Term Matrix
self.DTM3_class = dtm3_class # 按标题检索得到的 Doc-Term Matrix
| 34.120253 | 101 | 0.584122 | import pandas as pd
import numpy as np
import xlwings as xw
from PolicyAnalysis import cptj as cj
class businesses_re:
def __init__(self, Data, userdict):
self.Data = Data
self.userdict = userdict
data = Data.copy()
n = cj.txt_to_list(self.userdict)
loc = [(0, 4), (4, 10), (10, 15), (15, 19), (19, 22), (22, 26), (26, 29), (29, 31), (31, 40),
(40, 41), (41, 42), (42, 43), (43, 44), (44, 45)]
i = 0
keymap = {}
for rank in loc:
lst = n[rank[0]: rank[1]]
for item in lst:
keymap[item] = i
i += 1
result1 = cj.words_docs_freq(n, data)
dfc1 = result1['DFC']
dtm1_class = result1['DTM']
dtm1_final = cj.dfc_sort_filter(dfc1, keymap, '被监管业务-正文分类统计.xlsx')
tf = data
for i in range(0, data.shape[0]):
tf.iloc[i, 2] = cj.top_n_sent(10, data.iloc[i, 2])
result2 = cj.words_docs_freq(n, tf)
dfc2 = result2['DFC']
dtm2_class = result2['DTM']
dtm2_final = cj.dfc_sort_filter(dfc2, keymap, '被监管业务-前十句话分类统计.xlsx')
tf3 = data
for i in range(0, data.shape[0]):
tf3.iloc[i, 2] = data.iloc[i, 1]
result3 = cj.words_docs_freq(n, tf3)
dfc3 = result3['DFC']
dtm3_class = result3['DTM']
dtm3_final = cj.dfc_sort_filter(dfc3, keymap, '被监管业务-标题分类统计.xlsx')
dtm_final = pd.concat([dtm1_final, dtm2_final, dtm3_final], axis=1)
dtm_final.columns = ['被监管业务数(正文)', '被监管业务数(前十句)', '被监管业务数(标题)']
dtm_aver_class = dtm_final.agg(np.mean, axis=1)
dtm_aver_class = pd.DataFrame(dtm_aver_class, columns=['被监管业务数'])
self.DTM_aver = dtm_aver_class
self.DTM_final = dtm_final
self.DTM1_class = dtm1_class
self.DTM2_class = dtm2_class
self.DTM3_class = dtm3_class
class business_jieba:
def __init__(self, Data, userdict, indifile, indisheet, stopwords):
self.Data = Data
self.userdict = userdict
self.indifile = indifile
self.indisheet = indisheet
self.stopwords = stopwords
data = Data.copy()
app = xw.App(visible=False, add_book=False)
app.screen_updating = False
app.display_alerts = False
try:
wb = app.books.open(self.indifile)
sht = wb.sheets[self.indisheet]
df_indi = sht.used_range.value
df_indi = pd.DataFrame(df_indi)
df_indi.columns = df_indi.loc[0]
df_indi.drop(0, axis=0, inplace=True)
df_indi.dropna(axis=0, how='all', inplace=True)
finally:
app.quit()
keymap = {}
for i in range(df_indi.shape[1]):
keymap[df_indi.columns[i]] = list(df_indi.iloc[:, i].dropna(''))
dtm1 = cj.jieba_vectorizer(data, self.userdict, self.stopwords).DTM
dtm1_result = cj.dtm_sort_filter(dtm1, keymap, '被监管业务-正文分类统计.xlsx')
dtm1_class = dtm1_result['DTM_class']
dtm1_final = dtm1_result['DTM_final']
tf = data.copy()
for i in range(0, data.shape[0]):
tf.iloc[i, 2] = cj.top_n_sent(10, data.iloc[i, 2])
dtm2 = cj.jieba_vectorizer(tf, self.userdict, self.stopwords).DTM
dtm2_result = cj.dtm_sort_filter(dtm2, keymap, '被监管业务-前十句话分类统计.xlsx')
dtm2_class = dtm2_result['DTM_class']
dtm2_final = dtm2_result['DTM_final']
tf3 = data.copy()
for i in range(0, data.shape[0]):
tf3.iloc[i, 2] = data.iloc[i, 1]
dtm3 = cj.jieba_vectorizer(tf3, self.userdict, self.stopwords).DTM
dtm3_result = cj.dtm_sort_filter(dtm3, keymap)
dtm3_class = dtm3_result['DTM_class']
dtm3_final = dtm3_result['DTM_final']
dtm_final = pd.concat([dtm1_final, dtm2_final, dtm3_final], axis=1)
dtm_final.columns = ['被监管业务数(正文)', '被监管业务数(前十句)', '被监管业务数(标题)']
dtm_aver_class = dtm_final.agg(np.mean, axis=1)
dtm_aver_class = pd.DataFrame(dtm_aver_class, columns=['被监管业务种类数'])
self.DTM_aver = dtm_aver_class
self.DTM_final = dtm_final
self.DTM1_class = dtm1_class
self.DTM2_class = dtm2_class
self.DTM3_class = dtm3_class
| true | true |
f7215945d58449184be2ce4c38342a88d9dfe3a5 | 1,847 | py | Python | accounts/migrations/0002_auto_20200522_2023.py | codertimeless/StudentAssociation | 3f6caf2b362623d4f8cf82bab9529951a375fe6a | [
"Apache-2.0"
] | null | null | null | accounts/migrations/0002_auto_20200522_2023.py | codertimeless/StudentAssociation | 3f6caf2b362623d4f8cf82bab9529951a375fe6a | [
"Apache-2.0"
] | 15 | 2020-03-09T11:56:13.000Z | 2022-02-10T15:03:01.000Z | accounts/migrations/0002_auto_20200522_2023.py | codertimeless/StudentAssociation | 3f6caf2b362623d4f8cf82bab9529951a375fe6a | [
"Apache-2.0"
] | null | null | null | # Generated by Django 2.2.8 on 2020-05-22 20:23
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('management', '0001_initial'),
('accounts', '0001_initial'),
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.AddField(
model_name='clubuserprofile',
name='club',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='management.Club', verbose_name='社团'),
),
migrations.AddField(
model_name='clubuserprofile',
name='student_class',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='management.StudentClass', verbose_name='班级'),
),
migrations.AddField(
model_name='clubuserprofile',
name='unit',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='management.Unit', verbose_name='单位'),
),
migrations.AddField(
model_name='studentclubuser',
name='groups',
field=models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups'),
),
migrations.AddField(
model_name='studentclubuser',
name='user_permissions',
field=models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions'),
),
]
| 41.977273 | 256 | 0.650785 |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('management', '0001_initial'),
('accounts', '0001_initial'),
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.AddField(
model_name='clubuserprofile',
name='club',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='management.Club', verbose_name='社团'),
),
migrations.AddField(
model_name='clubuserprofile',
name='student_class',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='management.StudentClass', verbose_name='班级'),
),
migrations.AddField(
model_name='clubuserprofile',
name='unit',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='management.Unit', verbose_name='单位'),
),
migrations.AddField(
model_name='studentclubuser',
name='groups',
field=models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups'),
),
migrations.AddField(
model_name='studentclubuser',
name='user_permissions',
field=models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions'),
),
]
| true | true |
f72159a07dd981d68fbdfa405294da73b86fbb56 | 593 | py | Python | hashmaps/hash_map_tests.py | informramiz/data-structures-and-algorithms | 7038c8becc4cbad82867c9c8bca42637ca27c8d7 | [
"Apache-2.0"
] | null | null | null | hashmaps/hash_map_tests.py | informramiz/data-structures-and-algorithms | 7038c8becc4cbad82867c9c8bca42637ca27c8d7 | [
"Apache-2.0"
] | null | null | null | hashmaps/hash_map_tests.py | informramiz/data-structures-and-algorithms | 7038c8becc4cbad82867c9c8bca42637ca27c8d7 | [
"Apache-2.0"
] | 1 | 2020-09-24T22:54:52.000Z | 2020-09-24T22:54:52.000Z | from hash_map import HashMap
from asserts.asserts import assert_
def test_hash_map():
hash_map = HashMap(2)
# Test HashMap get and put
key = "abcde"
value = "ramiz"
hash_map.put(key, value)
output = hash_map.get(key)
assert_(value, output)
# Test size
assert_(1, hash_map.size())
# delete
hash_map.delete("abcde")
assert_(0, hash_map.size())
# Test Rehash
hash_map.put("mine", "mine")
# this should trigger rehashing
hash_map.put("hi", "hi")
assert_(2, hash_map.size())
print("All Tests Passed!")
test_hash_map()
| 19.129032 | 35 | 0.637437 | from hash_map import HashMap
from asserts.asserts import assert_
def test_hash_map():
hash_map = HashMap(2)
key = "abcde"
value = "ramiz"
hash_map.put(key, value)
output = hash_map.get(key)
assert_(value, output)
assert_(1, hash_map.size())
hash_map.delete("abcde")
assert_(0, hash_map.size())
hash_map.put("mine", "mine")
hash_map.put("hi", "hi")
assert_(2, hash_map.size())
print("All Tests Passed!")
test_hash_map()
| true | true |
f7215b16f3948f7d90fe03e4471250973c15ca0c | 639 | py | Python | davarocr/davarocr/davar_rcg/tools/__init__.py | icedream2/DAVAR-Lab-OCR | c8b82f45516850eeadcab2739fb2a4292f2fdca1 | [
"Apache-2.0"
] | 387 | 2021-01-02T07:50:15.000Z | 2022-03-31T04:30:03.000Z | davarocr/davarocr/davar_rcg/tools/__init__.py | icedream2/DAVAR-Lab-OCR | c8b82f45516850eeadcab2739fb2a4292f2fdca1 | [
"Apache-2.0"
] | 70 | 2021-05-04T18:28:18.000Z | 2022-03-31T14:14:52.000Z | davarocr/davarocr/davar_rcg/tools/__init__.py | icedream2/DAVAR-Lab-OCR | c8b82f45516850eeadcab2739fb2a4292f2fdca1 | [
"Apache-2.0"
] | 83 | 2021-01-05T08:28:26.000Z | 2022-03-31T07:14:03.000Z | """
##################################################################################################
# Copyright Info : Copyright (c) Davar Lab @ Hikvision Research Institute. All rights reserved.
# Filename : __init__.py
# Abstract :
# Current Version: 1.0.0
# Date : 2021-05-01
##################################################################################################
"""
from .test_utils import filter_punctuation, make_paths, show_result_table, results2json, eval_json
__all__ = [
"filter_punctuation",
"make_paths",
"show_result_table",
"results2json",
"eval_json"
]
| 31.95 | 98 | 0.450704 | from .test_utils import filter_punctuation, make_paths, show_result_table, results2json, eval_json
__all__ = [
"filter_punctuation",
"make_paths",
"show_result_table",
"results2json",
"eval_json"
]
| true | true |
f7215baa85a5f4afcd2f4643ea78bfc425bfefa5 | 2,586 | py | Python | app/back/settings/routes/api/__init__.py | jgphilpott/polyplot | c46861174ee5881dadffbfb2278d555462523547 | [
"MIT"
] | 5 | 2021-05-17T14:17:14.000Z | 2021-12-14T12:54:32.000Z | app/back/settings/routes/api/__init__.py | jgphilpott/iGraph | 2a91ba57e4950856a83d3a109753f8f2badee829 | [
"MIT"
] | 8 | 2020-02-09T02:48:41.000Z | 2021-05-16T04:57:02.000Z | app/back/settings/routes/api/__init__.py | jgphilpott/iGraph | 2a91ba57e4950856a83d3a109753f8f2badee829 | [
"MIT"
] | 2 | 2016-09-12T03:48:16.000Z | 2019-05-04T14:15:19.000Z | from front.tree.home.api.route import register_api_route
from front.tree.home.api.airports.route import register_api_airports_route
from front.tree.home.api.airports.airport.route import register_api_airport_route
from front.tree.home.api.cities.route import register_api_cities_route
from front.tree.home.api.cities.city.route import register_api_city_route
from front.tree.home.api.countries.route import register_api_countries_route
from front.tree.home.api.countries.country.route import register_api_country_route
from front.tree.home.api.graticules.route import register_api_graticules_route
from front.tree.home.api.graticules.graticule.route import register_api_graticule_route
from front.tree.home.api.indicators.route import register_api_indicators_route
from front.tree.home.api.indicators.indicator.route import register_api_indicator_route
from front.tree.home.api.lakes.route import register_api_lakes_route
from front.tree.home.api.lakes.lake.route import register_api_lake_route
from front.tree.home.api.maps.route import register_api_maps_route
from front.tree.home.api.maps.map.route import register_api_map_route
from front.tree.home.api.ports.route import register_api_ports_route
from front.tree.home.api.ports.port.route import register_api_port_route
from front.tree.home.api.railroads.route import register_api_railroads_route
from front.tree.home.api.railroads.railroad.route import register_api_railroad_route
from front.tree.home.api.rivers.route import register_api_rivers_route
from front.tree.home.api.rivers.river.route import register_api_river_route
from front.tree.home.api.roads.route import register_api_roads_route
from front.tree.home.api.roads.road.route import register_api_road_route
def register_api_routes(app):
register_api_route(app)
register_api_airports_route(app)
register_api_airport_route(app)
register_api_cities_route(app)
register_api_city_route(app)
register_api_countries_route(app)
register_api_country_route(app)
register_api_graticules_route(app)
register_api_graticule_route(app)
register_api_indicators_route(app)
register_api_indicator_route(app)
register_api_lakes_route(app)
register_api_lake_route(app)
register_api_maps_route(app)
register_api_map_route(app)
register_api_ports_route(app)
register_api_port_route(app)
register_api_railroads_route(app)
register_api_railroad_route(app)
register_api_rivers_route(app)
register_api_river_route(app)
register_api_roads_route(app)
register_api_road_route(app)
| 35.916667 | 87 | 0.842614 | from front.tree.home.api.route import register_api_route
from front.tree.home.api.airports.route import register_api_airports_route
from front.tree.home.api.airports.airport.route import register_api_airport_route
from front.tree.home.api.cities.route import register_api_cities_route
from front.tree.home.api.cities.city.route import register_api_city_route
from front.tree.home.api.countries.route import register_api_countries_route
from front.tree.home.api.countries.country.route import register_api_country_route
from front.tree.home.api.graticules.route import register_api_graticules_route
from front.tree.home.api.graticules.graticule.route import register_api_graticule_route
from front.tree.home.api.indicators.route import register_api_indicators_route
from front.tree.home.api.indicators.indicator.route import register_api_indicator_route
from front.tree.home.api.lakes.route import register_api_lakes_route
from front.tree.home.api.lakes.lake.route import register_api_lake_route
from front.tree.home.api.maps.route import register_api_maps_route
from front.tree.home.api.maps.map.route import register_api_map_route
from front.tree.home.api.ports.route import register_api_ports_route
from front.tree.home.api.ports.port.route import register_api_port_route
from front.tree.home.api.railroads.route import register_api_railroads_route
from front.tree.home.api.railroads.railroad.route import register_api_railroad_route
from front.tree.home.api.rivers.route import register_api_rivers_route
from front.tree.home.api.rivers.river.route import register_api_river_route
from front.tree.home.api.roads.route import register_api_roads_route
from front.tree.home.api.roads.road.route import register_api_road_route
def register_api_routes(app):
register_api_route(app)
register_api_airports_route(app)
register_api_airport_route(app)
register_api_cities_route(app)
register_api_city_route(app)
register_api_countries_route(app)
register_api_country_route(app)
register_api_graticules_route(app)
register_api_graticule_route(app)
register_api_indicators_route(app)
register_api_indicator_route(app)
register_api_lakes_route(app)
register_api_lake_route(app)
register_api_maps_route(app)
register_api_map_route(app)
register_api_ports_route(app)
register_api_port_route(app)
register_api_railroads_route(app)
register_api_railroad_route(app)
register_api_rivers_route(app)
register_api_river_route(app)
register_api_roads_route(app)
register_api_road_route(app)
| true | true |
f7215cee8919dee9ba60b53f2fdaa5fd496bb91f | 656 | py | Python | testing.py | vuthalab/spectrum-awg | 5edd7eb3b06f877bb6f77359773c9ba2d727c52d | [
"MIT"
] | 1 | 2022-02-28T15:38:57.000Z | 2022-02-28T15:38:57.000Z | testing.py | vuthalab/spectrum-awg | 5edd7eb3b06f877bb6f77359773c9ba2d727c52d | [
"MIT"
] | null | null | null | testing.py | vuthalab/spectrum-awg | 5edd7eb3b06f877bb6f77359773c9ba2d727c52d | [
"MIT"
] | null | null | null | import time
from M4i6622 import *
from Functions.functions import *
#4 functions to be used
def f0(x):
return sin(x)#sin_for_time(60000000, 40000000, 20000,10000, x)
def f1(x):
return sin(x)
def f2(x):
return sin(x,f=1000)
def f3(x):
return x
t0 = time.perf_counter()
M4i = M4i6622(channelNum=3,sampleRate=625,clockOut=True,referenceClock=False)
r = M4i.setSoftwareBuffer()
M4i.setupCard( (f0,f1,f2) )
tf = time.perf_counter() - t0
print("Done")
print("Time elapsed: {0: 10f} s".format(tf))
M4i.startCard()
r = M4i.stop()
print("Card has been stopped with error code: ",str(r))
| 15.255814 | 78 | 0.637195 | import time
from M4i6622 import *
from Functions.functions import *
def f0(x):
return sin(x)
def f1(x):
return sin(x)
def f2(x):
return sin(x,f=1000)
def f3(x):
return x
t0 = time.perf_counter()
M4i = M4i6622(channelNum=3,sampleRate=625,clockOut=True,referenceClock=False)
r = M4i.setSoftwareBuffer()
M4i.setupCard( (f0,f1,f2) )
tf = time.perf_counter() - t0
print("Done")
print("Time elapsed: {0: 10f} s".format(tf))
M4i.startCard()
r = M4i.stop()
print("Card has been stopped with error code: ",str(r))
| true | true |
f7215cf985916a431b4772d83a2ecae8b5f0c458 | 4,513 | py | Python | experiments/exp_movie_5and7.py | Leaflowave/PrivCQ | 8acc6ad0888793fb7fa190a1bd5b4f9eb1140514 | [
"MIT"
] | null | null | null | experiments/exp_movie_5and7.py | Leaflowave/PrivCQ | 8acc6ad0888793fb7fa190a1bd5b4f9eb1140514 | [
"MIT"
] | null | null | null | experiments/exp_movie_5and7.py | Leaflowave/PrivCQ | 8acc6ad0888793fb7fa190a1bd5b4f9eb1140514 | [
"MIT"
] | null | null | null | import group_frequency_oracle as freq
import linecache
import random
def query_on_adult_dim2(oraclePath,oracleInterval,queryPath,trueOraclePath,aggregation="count"):
# adult_2 equal 5 and 7
queriesStr=linecache.getline(queryPath,1)
queries=eval(queriesStr)
answer=[0]*500
trueOracleStr=linecache.getline(trueOraclePath,1)
trueOracle= eval(trueOracleStr)
n=sum([sum(trueOracle[k].values()) for k in trueOracle.keys()])
TrueAnswer=[0]*500
relativeError = 0
averageError=0
absrelativeError=0
absaverageError=0
for i in range(1,501):
for _ in range(10):
kthoracle=random.randint(1,500)
# kthoracle=_+1
oracle=freq.group_frequency_oracle(oraclePath, oracleInterval,k_th_oracle=kthoracle)
if aggregation=="count":
count_value=0
true_count_value=0
# print(i)
# print(queries[i-1])
# for k1 in range(queries[i - 1][0][0], queries[i - 1][0][1] + 1):
# for k2 in range(queries[i - 1][1][0], queries[i - 1][1][1] + 1):
for j in oracle.keys():
count_value+=oracle[j][queries[i-1]]
true_count_value += trueOracle[j][queries[i - 1]]
answer[i-1]+=count_value
TrueAnswer[i-1]+=true_count_value
# averageError += count_value - true_count_value
# relativeError+= (abs(count_value - true_count_value))/max(0.001*n,float(true_count_value))
elif aggregation=="sum":
sum_value = 0
true_sum_value = 0
# for k1 in range(queries[i - 1][0][0], queries[i - 1][0][1] + 1):
# for k2 in range(queries[i - 1][1][0], queries[i - 1][1][1] + 1):
for j in oracle.keys():
sum_value += j*oracle[j][queries[i-1]]
true_sum_value += j*trueOracle[j][queries[i - 1]]
answer[i-1]+=sum_value
TrueAnswer[i-1]+=true_sum_value
# averageError += sum_value - true_sum_value
# relativeError += (abs(sum_value - true_sum_value)) /max(0.001*n,float(true_sum_value))
answer[i - 1] /= 10.0
TrueAnswer[i - 1] /= 10.0
# absrelativeError += (abs(answer[i - 1] - TrueAnswer[i - 1])) / max(0.001 * n, float(TrueAnswer[i - 1]))
relativeError += (answer[i - 1] - TrueAnswer[i - 1]) / max(0.001 * n, float(TrueAnswer[i - 1]))
averageError += answer[i - 1] - TrueAnswer[i - 1]
# absaverageError+= abs(answer[i - 1] - TrueAnswer[i - 1])
return answer,TrueAnswer,relativeError/500,averageError/500
if __name__ == '__main__':
oraclePath = "experiments//movie_2_results.txt"
oracleInterval = 18
queryPath = "experiments//movie_query_5_7_9.txt"
trueOraclePath = "movie//movie5.txt"
ans, Trueans,relativeError, averageError = query_on_adult_dim2(oraclePath, oracleInterval,
queryPath,
trueOraclePath,
aggregation="count")
print(relativeError)
with open("experiments//final_movie_2_count.txt", "w+") as f:
f.write(str(ans) + "\n")
f.write("true ans"+str(Trueans)+"\n")
f.write("relativeError:" + str(relativeError) + "\n")
f.write("averageError:" + str(averageError) + "\n")
# f.write("absrelativeError:" + str(absrelativeError) + "\n")
# f.write("absaverageError:" + str(absaverageError) + "\n")
ans, Trueans,relativeError, averageError = query_on_adult_dim2(oraclePath, oracleInterval,
queryPath,
trueOraclePath,
aggregation="sum")
print(relativeError)
with open("experiments//final_movie_2_sum.txt", "w+") as f:
f.write(str(ans) + "\n")
f.write("true ans" + str(Trueans) + "\n")
f.write("relativeError:" + str(relativeError) + "\n")
f.write("averageError:" + str(averageError) + "\n")
# f.write("absrelativeError:" + str(absrelativeError) + "\n")
# f.write("absaverageError:" + str(absaverageError) + "\n")
| 48.010638 | 114 | 0.534899 | import group_frequency_oracle as freq
import linecache
import random
def query_on_adult_dim2(oraclePath,oracleInterval,queryPath,trueOraclePath,aggregation="count"):
queriesStr=linecache.getline(queryPath,1)
queries=eval(queriesStr)
answer=[0]*500
trueOracleStr=linecache.getline(trueOraclePath,1)
trueOracle= eval(trueOracleStr)
n=sum([sum(trueOracle[k].values()) for k in trueOracle.keys()])
TrueAnswer=[0]*500
relativeError = 0
averageError=0
absrelativeError=0
absaverageError=0
for i in range(1,501):
for _ in range(10):
kthoracle=random.randint(1,500)
oracle=freq.group_frequency_oracle(oraclePath, oracleInterval,k_th_oracle=kthoracle)
if aggregation=="count":
count_value=0
true_count_value=0
for j in oracle.keys():
count_value+=oracle[j][queries[i-1]]
true_count_value += trueOracle[j][queries[i - 1]]
answer[i-1]+=count_value
TrueAnswer[i-1]+=true_count_value
elif aggregation=="sum":
sum_value = 0
true_sum_value = 0
for j in oracle.keys():
sum_value += j*oracle[j][queries[i-1]]
true_sum_value += j*trueOracle[j][queries[i - 1]]
answer[i-1]+=sum_value
TrueAnswer[i-1]+=true_sum_value
answer[i - 1] /= 10.0
TrueAnswer[i - 1] /= 10.0
relativeError += (answer[i - 1] - TrueAnswer[i - 1]) / max(0.001 * n, float(TrueAnswer[i - 1]))
averageError += answer[i - 1] - TrueAnswer[i - 1]
return answer,TrueAnswer,relativeError/500,averageError/500
if __name__ == '__main__':
oraclePath = "experiments//movie_2_results.txt"
oracleInterval = 18
queryPath = "experiments//movie_query_5_7_9.txt"
trueOraclePath = "movie//movie5.txt"
ans, Trueans,relativeError, averageError = query_on_adult_dim2(oraclePath, oracleInterval,
queryPath,
trueOraclePath,
aggregation="count")
print(relativeError)
with open("experiments//final_movie_2_count.txt", "w+") as f:
f.write(str(ans) + "\n")
f.write("true ans"+str(Trueans)+"\n")
f.write("relativeError:" + str(relativeError) + "\n")
f.write("averageError:" + str(averageError) + "\n")
ans, Trueans,relativeError, averageError = query_on_adult_dim2(oraclePath, oracleInterval,
queryPath,
trueOraclePath,
aggregation="sum")
print(relativeError)
with open("experiments//final_movie_2_sum.txt", "w+") as f:
f.write(str(ans) + "\n")
f.write("true ans" + str(Trueans) + "\n")
f.write("relativeError:" + str(relativeError) + "\n")
f.write("averageError:" + str(averageError) + "\n")
| true | true |
f7215e55cf136f7e2d8b5021a9fd804c6a6a0820 | 42,837 | py | Python | pytests/epengine/bucket_level_durability.py | cgghali/TAF | 1de8dec77ad781c373e18d9c285befd534ac203a | [
"Apache-2.0"
] | null | null | null | pytests/epengine/bucket_level_durability.py | cgghali/TAF | 1de8dec77ad781c373e18d9c285befd534ac203a | [
"Apache-2.0"
] | null | null | null | pytests/epengine/bucket_level_durability.py | cgghali/TAF | 1de8dec77ad781c373e18d9c285befd534ac203a | [
"Apache-2.0"
] | null | null | null | from copy import deepcopy
from random import sample, choice
from BucketLib.bucket import Bucket
from cb_tools.cb_cli import CbCli
from couchbase_helper.documentgenerator import doc_generator
from couchbase_helper.durability_helper import BucketDurability
from epengine.durability_base import BucketDurabilityBase
from error_simulation.cb_error import CouchbaseError
from sdk_client3 import SDKClient
from sdk_exceptions import SDKException
class CreateBucketTests(BucketDurabilityBase):
def setUp(self):
super(CreateBucketTests, self).setUp()
def tearDown(self):
super(CreateBucketTests, self).tearDown()
def test_create_bucket_using_cli(self):
"""
Create Bucket with all possible durability_levels and make sure
durability levels are honored for document CRUDs
- Will test for all bucket types (Couchbase, Ephemeral, Memcached)
- With all possible d_levels for bucket_durability
- Perform doc insert for each bucket to validate the sync_writes
"""
# Create cb_cli session object
shell = self.vbs_in_node[self.cluster.master]["shell"]
cb_cli = CbCli(shell)
for d_level in self.bucket_util.get_supported_durability_levels():
create_failed = False
test_step = "Creating %s bucket with level %s" \
% (self.bucket_type, d_level)
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
# Remove unsupported replica string in case if MC bucket
if self.bucket_type == Bucket.Type.MEMCACHED:
del bucket_dict[Bucket.replicaNumber]
# Object to support performing CRUDs
bucket_obj = Bucket(bucket_dict)
output = cb_cli.create_bucket(bucket_dict, wait=True)
self.get_vbucket_type_mapping(bucket_obj.name)
if "SUCCESS: Bucket created" not in str(output):
create_failed = True
if d_level in self.possible_d_levels[self.bucket_type]:
self.log_failure("Create failed for %s bucket "
"with min_durability_level %s"
% (self.bucket_type, d_level))
self.bucket_util.buckets = [bucket_obj]
self.bucket_util.print_bucket_stats()
self.summary.add_step(test_step)
# Perform CRUDs to validate bucket_creation with durability
if not create_failed:
verification_dict = self.get_cb_stat_verification_dict()
self.validate_durability_with_crud(bucket_obj, d_level,
verification_dict)
self.summary.add_step("Validate_CRUD_operation")
# Cbstats vbucket-details validation
self.cb_stat_verify(verification_dict)
output = cb_cli.delete_bucket(bucket_obj.name)
if create_failed:
if "ERROR: Bucket not found" not in str(output):
self.log_failure("Mismatch in bucket-delete output")
elif "SUCCESS: Bucket deleted" not in str(output):
self.log_failure("Mismatch in bucket-delete output")
self.summary.add_step("Delete bucket")
def test_create_bucket_using_rest(self):
for d_level in self.bucket_util.get_supported_durability_levels():
create_failed = False
test_step = "Creating %s bucket with level %s" \
% (self.bucket_type, d_level)
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
# Object to support performing CRUDs
bucket_obj = Bucket(bucket_dict)
try:
self.bucket_util.create_bucket(bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
if d_level not in self.possible_d_levels[self.bucket_type]:
self.log_failure("Create succeeded for %s bucket for "
"unsupported durability %s"
% (self.bucket_type, d_level))
except Exception as rest_exception:
create_failed = True
self.log.info(rest_exception)
self.bucket_util.print_bucket_stats()
self.summary.add_step(test_step)
# Perform CRUDs to validate bucket_creation with durability
if not create_failed:
verification_dict = self.get_cb_stat_verification_dict()
self.validate_durability_with_crud(bucket_obj, d_level,
verification_dict)
self.summary.add_step("Validate CRUD operation")
# Cbstats vbucket-details validation
self.cb_stat_verify(verification_dict)
self.bucket_util.delete_bucket(self.cluster.master, bucket_obj)
self.summary.add_step("Bucket deletion")
class BucketDurabilityTests(BucketDurabilityBase):
def setUp(self):
super(BucketDurabilityTests, self).setUp()
def tearDown(self):
super(BucketDurabilityTests, self).tearDown()
def test_durability_with_bucket_level_none(self):
"""
Create Buckets with NONE durability level.
Attempts sync_write with different durability_levels and validate
CRUDs are honored with respective durability_levels set from clients
"""
create_desc = "Creating %s bucket with level 'None'" % self.bucket_type
b_durability = Bucket.DurabilityLevel.NONE
verification_dict = self.get_cb_stat_verification_dict()
bucket_dict = self.get_bucket_dict(self.bucket_type, b_durability)
self.log.info(create_desc)
# Object to support performing CRUDs and create Bucket
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
self.summary.add_step(create_desc)
# Index for doc_gen to avoid creating/deleting same docs across d_level
index = 0
for d_level in self.get_supported_durability_for_bucket():
self.validate_durability_with_crud(bucket_obj, b_durability,
verification_dict,
doc_durability=d_level,
doc_start_index=index)
self.summary.add_step("CRUD with doc_durability %s" % d_level)
# Cbstats vbucket-details validation
self.cb_stat_verify(verification_dict)
index += 10
def test_ops_only_with_bucket_level_durability(self):
"""
Create Buckets with durability_levels set and perform
CRUDs from client without explicitly setting the durability and
validate the ops to make sure respective durability is honored
"""
for d_level in self.get_supported_durability_for_bucket():
# Avoid creating bucket with durability=None
if d_level == Bucket.DurabilityLevel.NONE:
continue
step_desc = "Creating %s bucket with level '%s'" \
% (self.bucket_type, d_level)
verification_dict = self.get_cb_stat_verification_dict()
self.log.info(step_desc)
# Object to support performing CRUDs and create Bucket
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
self.summary.add_step(step_desc)
self.validate_durability_with_crud(bucket_obj, d_level,
verification_dict)
self.summary.add_step("Async write with bucket durability %s"
% d_level)
# Cbstats vbucket-details validation
self.cb_stat_verify(verification_dict)
# Delete the bucket on server
self.bucket_util.delete_bucket(self.cluster.master, bucket_obj)
self.summary.add_step("Delete %s bucket" % self.bucket_type)
def test_sub_doc_op_with_bucket_level_durability(self):
"""
Create Buckets with durability_levels set and perform
Sub_doc CRUDs from client without durability settings and
validate the ops to make sure respective durability is honored
"""
key, value = doc_generator("test_key", 0, 1).next()
sub_doc_key = "sub_doc_key"
sub_doc_vals = ["val_1", "val_2", "val_3", "val_4", "val_5"]
for d_level in self.get_supported_durability_for_bucket():
# Avoid creating bucket with durability=None
if d_level == Bucket.DurabilityLevel.NONE:
continue
step_desc = "Creating %s bucket with level '%s'" \
% (self.bucket_type, d_level)
verification_dict = self.get_cb_stat_verification_dict()
self.log.info(step_desc)
# Object to support performing CRUDs and create Bucket
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(bucket_obj,
wait_for_warmup=True)
self.summary.add_step(step_desc)
# SDK client to perform sub_doc ops
client = SDKClient([self.cluster.master], bucket_obj)
result = client.crud("create", key, value)
verification_dict["ops_create"] += 1
verification_dict["sync_write_committed_count"] += 1
if result["status"] is False:
self.log_failure("Doc insert failed for key: %s" % key)
# Perform sub_doc CRUD
for sub_doc_op in ["subdoc_insert", "subdoc_upsert",
"subdoc_replace"]:
sub_doc_val = choice(sub_doc_vals)
_, fail = client.crud(sub_doc_op, key,
[sub_doc_key, sub_doc_val])
if fail:
self.log_failure("%s failure. Key %s, sub_doc (%s, %s): %s"
% (sub_doc_op, key,
sub_doc_key, sub_doc_val, result))
else:
verification_dict["ops_update"] += 1
verification_dict["sync_write_committed_count"] += 1
success, fail = client.crud("subdoc_read", key, sub_doc_key)
if fail or str(success[key]["value"].get(0)) != sub_doc_val:
self.log_failure("%s failed. Expected: %s, Actual: %s"
% (sub_doc_op, sub_doc_val,
success[key]["value"].get(0)))
self.summary.add_step("%s for key %s" % (sub_doc_op, key))
# Subdoc_delete and verify
sub_doc_op = "subdoc_delete"
_, fail = client.crud(sub_doc_op, key, sub_doc_key)
if fail:
self.log_failure("%s failure. Key %s, sub_doc (%s, %s): %s"
% (sub_doc_op, key,
sub_doc_key, sub_doc_val, result))
verification_dict["ops_update"] += 1
verification_dict["sync_write_committed_count"] += 1
_, fail = client.crud(sub_doc_op, key, sub_doc_key)
if SDKException.PathNotFoundException \
not in str(fail[key]["error"]):
self.log_failure("Invalid error after sub_doc_delete")
self.summary.add_step("%s for key %s" % (sub_doc_op, key))
# Validate doc_count
self.bucket_util._wait_for_stats_all_buckets()
self.bucket_util.verify_stats_all_buckets(1)
# Cbstats vbucket-details validation
self.cb_stat_verify(verification_dict)
# Close SDK client
client.close()
# Delete the bucket on server
self.bucket_util.delete_bucket(self.cluster.master, bucket_obj)
self.summary.add_step("Delete %s bucket" % self.bucket_type)
def test_higher_durability_level_from_client(self):
"""
Create bucket with durability_levels set and perform CRUDs using
durability_level > the bucket's durability_level and validate
"""
d_level_order_len = len(self.d_level_order)
supported_d_levels = self.get_supported_durability_for_bucket()
for d_level in supported_d_levels:
create_desc = "Creating %s bucket with level '%s'" \
% (self.bucket_type, d_level)
verification_dict = self.get_cb_stat_verification_dict()
self.log.info(create_desc)
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
# Object to support performing CRUDs and create Bucket
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
self.summary.add_step(create_desc)
# Perform doc_ops using all possible higher durability levels
index = 0
op_type = "create"
durability_index = self.d_level_order.index(d_level) + 1
while durability_index < d_level_order_len:
# Ephemeral case
if self.d_level_order[durability_index] not in supported_d_levels:
durability_index += 1
continue
self.validate_durability_with_crud(
bucket_obj,
d_level,
verification_dict,
op_type=op_type,
doc_durability=self.d_level_order[durability_index],
doc_start_index=index)
self.summary.add_step("%s with doc_level_durability %s"
% (op_type,
self.d_level_order[durability_index]))
durability_index += 1
index += 10
# Cbstats vbucket-details validation
self.cb_stat_verify(verification_dict)
# Delete the bucket on server
self.bucket_util.delete_bucket(self.cluster.master, bucket_obj)
self.summary.add_step("Delete %s bucket" % self.bucket_type)
def test_lower_durability_level_from_client(self):
"""
Create bucket with durability_levels set and perform CRUDs using
durability_level > the bucket's d_level and validate
"""
for d_level in self.get_supported_durability_for_bucket():
create_desc = "Creating %s bucket with level '%s'" \
% (self.bucket_type, d_level)
verification_dict = self.get_cb_stat_verification_dict()
self.log.info(create_desc)
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
# Object to support performing CRUDs and create Bucket
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
self.summary.add_step(create_desc)
# Perform doc_ops using all possible higher durability levels
index = 0
op_type = "create"
durability_index = self.d_level_order.index(d_level) - 1
while durability_index >= 0:
self.validate_durability_with_crud(
bucket_obj,
d_level,
verification_dict,
op_type=op_type,
doc_durability=self.d_level_order[durability_index],
doc_start_index=index)
self.summary.add_step("%s with doc_level_durability %s"
% (op_type,
self.d_level_order[durability_index]))
durability_index -= 1
index += 10
# Cbstats vbucket-details validation
self.cb_stat_verify(verification_dict)
# Delete the bucket on server
self.bucket_util.delete_bucket(self.cluster.master, bucket_obj)
self.summary.add_step("Delete %s bucket" % self.bucket_type)
def test_update_durability_level(self):
"""
Create buckets with None durability levels and perform doc_ops.
Update bucket_durability using diag-eval with/without doc_ops in
parallel and validate the doc_ops results.
"""
update_during_ops = self.input.param("update_during_ops", False)
supported_d_levels = self.get_supported_durability_for_bucket()
supported_bucket_d_levels = self.possible_d_levels[self.bucket_type]
create_gen_1 = doc_generator(self.key, 0, self.num_items)
create_gen_2 = doc_generator("random_keys", self.num_items,
self.num_items*2)
update_gen = doc_generator(self.key, 0, self.num_items/2)
delete_gen = doc_generator(self.key, self.num_items/2, self.num_items)
# Override sdk_timeout to max value to avoid TimeoutExceptions
self.sdk_timeout = 60
for bucket_durability in sample(supported_bucket_d_levels,
len(supported_bucket_d_levels)):
b_durability_to_update = list(set(supported_bucket_d_levels)
- set(bucket_durability))
create_desc = "Create %s bucket with durability level '%s'" \
% (self.bucket_type, bucket_durability)
self.log.info(create_desc)
bucket_dict = self.get_bucket_dict(self.bucket_type,
bucket_durability)
# Object to support performing CRUDs and create Bucket
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
self.summary.add_step(create_desc)
self.bucket_util.print_bucket_stats()
# Load basic docs to support other CRUDs
self.log.info("Performing initial doc_load")
create_task = self.task.async_load_gen_docs(
self.cluster, bucket_obj, create_gen_1, "create",
exp=self.maxttl,
compression=self.sdk_compression,
timeout_secs=self.sdk_timeout,
process_concurrency=8,
batch_size=200,
sdk_client_pool=self.sdk_client_pool)
self.task_manager.get_task_result(create_task)
if create_task.fail:
self.log_failure("Failures seen during initial creates")
self.summary.add_step("Initial doc_loading")
# Initiate CRUD task objects
create_task = self.task.async_load_gen_docs(
self.cluster, bucket_obj, create_gen_2, "create",
exp=self.maxttl,
durability=choice(supported_d_levels),
compression=self.sdk_compression,
timeout_secs=self.sdk_timeout,
process_concurrency=2,
batch_size=100,
start_task=False,
print_ops_rate=False,
sdk_client_pool=self.sdk_client_pool)
update_task = self.task.async_load_gen_docs(
self.cluster, bucket_obj, update_gen, "update",
exp=self.maxttl,
durability=choice(supported_d_levels),
compression=self.sdk_compression,
timeout_secs=self.sdk_timeout,
process_concurrency=2,
batch_size=100,
start_task=False,
print_ops_rate=False,
sdk_client_pool=self.sdk_client_pool)
read_task = self.task.async_load_gen_docs(
self.cluster, bucket_obj, update_gen, "read",
compression=self.sdk_compression,
timeout_secs=self.sdk_timeout,
process_concurrency=2,
batch_size=100,
start_task=False,
print_ops_rate=False,
sdk_client_pool=self.sdk_client_pool)
delete_task = self.task.async_load_gen_docs(
self.cluster, bucket_obj, delete_gen, "delete",
exp=self.maxttl,
durability=choice(supported_d_levels),
compression=self.sdk_compression,
timeout_secs=self.sdk_timeout,
process_concurrency=2,
batch_size=100,
start_task=False,
print_ops_rate=False,
sdk_client_pool=self.sdk_client_pool)
# Start CRUD and update bucket-durability as specified
# by config param 'update_during_ops'
tasks_to_run = [create_task, update_task,
read_task, delete_task]
if self.bucket_type == Bucket.Type.EPHEMERAL:
tasks_to_run = [create_task,
choice([update_task, delete_task])]
clients = read_task.clients
# Close clients in unused tasks
if tasks_to_run[1].op_type == "delete":
clients += update_task.clients
else:
clients += delete_task.clients
for client in clients:
client.close()
for task in tasks_to_run:
new_d_level = BucketDurability[b_durability_to_update.pop()]
self.log.info("Starting %s task" % task.op_type)
self.task_manager.add_new_task(task)
if update_during_ops:
self.sleep(5, "Wait for load_task to start before "
"setting durability=%s" % new_d_level)
else:
self.task_manager.get_task_result(task)
# Update bucket durability
self.bucket_util.update_bucket_property(
bucket_obj,
bucket_durability=new_d_level)
buckets = self.bucket_util.get_all_buckets()
if buckets[0].durability_level != new_d_level:
self.log_failure("Failed to update bucket_d_level to %s"
% new_d_level)
self.summary.add_step("Set bucket-durability=%s"
% new_d_level)
self.bucket_util.print_bucket_stats()
if update_during_ops:
self.task_manager.get_task_result(task)
if task.fail:
self.log_failure("Failures seen during %s"
% task.op_type)
self.summary.add_step("Doc op %s during bucket durability"
% task.op_type)
# Delete the bucket on server
self.bucket_util.delete_bucket(self.cluster.master, bucket_obj)
self.summary.add_step("Delete %s bucket" % self.bucket_type)
def test_update_durability_between_doc_op(self):
"""
1. Create Bucket with durability level set.
2. Bring down a node such that durability CRUD will wait
3. Perform doc_op and update bucket_level_durability
4. Revert scenario induced in step#2, such that doc_op will complete
5. Make sure doc_ops in step#3 went through using prev. d-level
"""
# Starting from max_durability levels because to iterate
# all lower levels for doc_ops with level update
supported_d_levels = deepcopy(self.d_level_order)
if self.bucket_type == Bucket.Type.EPHEMERAL:
supported_d_levels = supported_d_levels[0:2]
supported_d_levels.reverse()
supported_d_levels += [supported_d_levels[0]]
create_desc = "Creating %s bucket with level '%s'" \
% (self.bucket_type, supported_d_levels[0])
self.log.info(create_desc)
bucket_dict = self.get_bucket_dict(self.bucket_type,
supported_d_levels[0])
# Object to support performing CRUDs and create Bucket
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
self.summary.add_step(create_desc)
self.bucket_util.print_bucket_stats()
# Loop to update all other durability levels
prev_d_level = supported_d_levels[0]
for bucket_durability in supported_d_levels[1:]:
target_vb_type, simulate_error = \
self.durability_helper.get_vb_and_error_type(bucket_durability)
# Pick a random node to perform error sim and load
random_node = choice(self.vbs_in_node.keys())
error_sim = CouchbaseError(
self.log,
self.vbs_in_node[random_node]["shell"])
target_vbs = self.vbs_in_node[random_node][target_vb_type]
doc_gen = doc_generator(self.key, 0, 1,
target_vbucket=target_vbs)
doc_load_task = self.task.async_load_gen_docs(
self.cluster, bucket_obj, doc_gen, "update",
durability=Bucket.DurabilityLevel.NONE,
timeout_secs=60,
start_task=False,
sdk_client_pool=self.sdk_client_pool)
# Simulate target error condition
error_sim.create(simulate_error)
self.sleep(5, "Wait before starting doc_op")
self.task_manager.add_new_task(doc_load_task)
new_d_level = BucketDurability[bucket_durability]
self.sleep(5, "Wait before updating bucket level "
"durability=%s" % new_d_level)
self.bucket_util.update_bucket_property(
bucket_obj,
bucket_durability=new_d_level)
self.bucket_util.print_bucket_stats()
buckets = self.bucket_util.get_all_buckets()
if buckets[0].durability_level != new_d_level:
self.log_failure("Failed to update bucket_d_level to %s"
% new_d_level)
self.summary.add_step("Set bucket-durability=%s" % new_d_level)
if prev_d_level == Bucket.DurabilityLevel.NONE:
if not doc_load_task.completed:
self.log_failure("Doc-op still pending for d_level 'NONE'")
elif doc_load_task.completed:
self.log_failure("Doc-op completed before reverting the "
"error condition: %s" % simulate_error)
# Revert the induced error condition
error_sim.revert(simulate_error)
self.task_manager.get_task_result(doc_load_task)
if doc_load_task.fail:
self.log_failure("Doc_op failed")
self.summary.add_step("Doc_op with previous d_level %s"
% prev_d_level)
prev_d_level = bucket_durability
# Delete the bucket on server
self.bucket_util.delete_bucket(self.cluster.master, bucket_obj)
self.summary.add_step("Delete %s bucket" % self.bucket_type)
def test_sync_write_in_progress(self):
"""
Test to simulate sync_write_in_progress error and validate the behavior
This will validate failure in majority of nodes, where durability will
surely fail for all CRUDs
1. Select nodes to simulate the error which will affect the durability
2. Enable the specified error_scenario on the selected nodes
3. Perform individual CRUDs and verify sync_write_in_progress errors
4. Validate the end results
"""
def test_scenario(bucket, doc_ops,
with_sync_write_val=None):
# Set crud_batch_size
crud_batch_size = 4
simulate_error = CouchbaseError.STOP_MEMCACHED
# Fetch target_vbs for CRUDs
node_vb_info = self.vbs_in_node
target_vbuckets = node_vb_info[target_nodes[0]]["replica"]
if len(target_nodes) > 1:
index = 1
while index < len(target_nodes):
target_vbuckets = list(
set(target_vbuckets).intersection(
set(node_vb_info[target_nodes[index]]["replica"]))
)
index += 1
# Variable to hold one of the doc_generator objects
gen_loader_1 = None
gen_loader_2 = None
# Initialize doc_generators to use for testing
self.log.info("Creating doc_generators")
gen_create = doc_generator(
self.key, self.num_items, crud_batch_size,
vbuckets=self.cluster_util.vbuckets,
target_vbucket=target_vbuckets)
gen_update = doc_generator(
self.key, 0, crud_batch_size,
vbuckets=self.cluster_util.vbuckets,
target_vbucket=target_vbuckets, mutate=1)
gen_delete = doc_generator(
self.key, 0, crud_batch_size,
vbuckets=self.cluster_util.vbuckets,
target_vbucket=target_vbuckets)
self.log.info("Done creating doc_generators")
# Start CRUD operation based on the given 'doc_op' type
if doc_ops[0] == "create":
self.num_items += crud_batch_size
gen_loader_1 = gen_create
elif doc_ops[0] in ["update", "replace", "touch"]:
gen_loader_1 = gen_update
elif doc_ops[0] == "delete":
gen_loader_1 = gen_delete
self.num_items -= crud_batch_size
if doc_ops[1] == "create":
gen_loader_2 = gen_create
elif doc_ops[1] in ["update", "replace", "touch"]:
gen_loader_2 = gen_update
elif doc_ops[1] == "delete":
gen_loader_2 = gen_delete
# Load required docs for doc_op_1 in case of type != create
if doc_op[2] == "load_initial_docs":
doc_loading_task = self.task.async_load_gen_docs(
self.cluster, bucket, gen_loader_1, "create", 0,
batch_size=crud_batch_size, process_concurrency=1,
timeout_secs=10,
print_ops_rate=False,
sdk_client_pool=self.sdk_client_pool)
self.task_manager.get_task_result(doc_loading_task)
if doc_loading_task.fail:
self.log_failure("Failure while loading initial docs")
self.summary.add_step("Create docs for %s" % doc_op[0])
verification_dict["ops_create"] += crud_batch_size
verification_dict["sync_write_committed_count"] \
+= crud_batch_size
# Initialize tasks and store the task objects
doc_loader_task = self.task.async_load_gen_docs(
self.cluster, bucket, gen_loader_1, doc_ops[0], 0,
batch_size=crud_batch_size, process_concurrency=8,
timeout_secs=60,
print_ops_rate=False,
start_task=False,
sdk_client_pool=self.sdk_client_pool)
# SDK client for performing individual ops
client = SDKClient([self.cluster.master], bucket)
# Perform specified action
for node in target_nodes:
error_sim = CouchbaseError(self.log,
self.vbs_in_node[node]["shell"])
error_sim.create(simulate_error,
bucket_name=bucket.name)
self.sleep(5, "Wait for error simulation to take effect")
self.task_manager.add_new_task(doc_loader_task)
self.sleep(5, "Wait for task_1 CRUDs to reach server")
# Perform specified CRUD operation on sync_write docs
tem_gen = deepcopy(gen_loader_2)
while tem_gen.has_next():
key, value = tem_gen.next()
for fail_fast in [True, False]:
if with_sync_write_val:
fail = client.crud(doc_ops[1], key, value=value,
exp=0,
durability=with_sync_write_val,
timeout=3, time_unit="seconds",
fail_fast=fail_fast)
else:
fail = client.crud(doc_ops[1], key, value=value,
exp=0,
timeout=3, time_unit="seconds",
fail_fast=fail_fast)
expected_exception = SDKException.AmbiguousTimeoutException
retry_reason = \
SDKException.RetryReason.KV_SYNC_WRITE_IN_PROGRESS
if fail_fast:
expected_exception = \
SDKException.RequestCanceledException
retry_reason = \
SDKException.RetryReason \
.KV_SYNC_WRITE_IN_PROGRESS_NO_MORE_RETRIES
# Validate the returned error from the SDK
if expected_exception not in str(fail["error"]):
self.log_failure("Invalid exception for {0}: {1}"
.format(key, fail["error"]))
if retry_reason not in str(fail["error"]):
self.log_failure("Invalid retry reason for {0}: {1}"
.format(key, fail["error"]))
# Try reading the value in SyncWrite in-progress state
fail = client.crud("read", key)
if doc_ops[0] == "create":
# Expected KeyNotFound in case of CREATE operation
if fail["status"] is True:
self.log_failure(
"%s returned value during SyncWrite state: %s"
% (key, fail))
else:
# Expects prev value in case of other operations
if fail["status"] is False:
self.log_failure(
"Key %s read failed for previous value: %s"
% (key, fail))
# Revert the introduced error condition
for node in target_nodes:
error_sim = CouchbaseError(self.log,
self.vbs_in_node[node]["shell"])
error_sim.revert(simulate_error,
bucket_name=bucket.name)
# Wait for doc_loader_task to complete
self.task.jython_task_manager.get_task_result(doc_loader_task)
verification_dict["ops_%s" % doc_op[0]] += crud_batch_size
verification_dict["sync_write_committed_count"] \
+= crud_batch_size
# Disconnect the client
client.close()
crud_variations = [
["create", "create", ""],
["update", "update", "load_initial_docs"],
["update", "delete", ""],
["update", "touch", ""],
["update", "replace", ""],
["delete", "delete", ""],
["delete", "update", "load_initial_docs"],
["delete", "touch", "load_initial_docs"],
["delete", "replace", "load_initial_docs"]
]
# Select nodes to affect and open required shell_connections
target_nodes = self.getTargetNodes()
for b_d_level in self.possible_d_levels[self.bucket_type]:
# Skip of Bucket durability level 'None'
if b_d_level == Bucket.DurabilityLevel.NONE:
continue
verification_dict = self.get_cb_stat_verification_dict()
create_desc = "Creating %s bucket with level '%s'" \
% (self.bucket_type, b_d_level)
self.log.info(create_desc)
bucket_dict = self.get_bucket_dict(self.bucket_type, b_d_level)
# Object to support performing CRUDs and create Bucket
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
self.summary.add_step(create_desc)
for doc_op in crud_variations:
test_scenario(bucket_obj, doc_op)
self.summary.add_step("SyncWriteInProgress for [%s, %s]"
% (doc_op[0], doc_op[1]))
# Cbstats vbucket-details validation
self.cb_stat_verify(verification_dict)
# Bucket deletion
self.bucket_util.delete_bucket(self.cluster.master, bucket_obj)
self.summary.add_step("Delete %s bucket" % self.bucket_type)
def test_observe_scenario(self):
"""
Creates bucket with bucket level durability.
Perform CRUD operations and make sure all the operations are
done as sync_write in server.
Note: Passing persistTo/replicateTo will test the observe scenarios
"""
def perform_crud_ops():
old_cas = 0
client = SDKClient([self.cluster.master], bucket_obj)
for op_type in ["create", "update", "read", "replace", "delete"]:
crud_desc = "Key %s, doc_op: %s" % (key, op_type)
self.log.info(crud_desc)
result = client.crud(op_type, key, value,
replicate_to=self.replicate_to,
persist_to=self.persist_to)
if op_type != "read":
if op_type != "replace":
dict_key = "ops_%s" % op_type
else:
dict_key = "ops_update"
verification_dict[dict_key] += 1
verification_dict["sync_write_committed_count"] += 1
if result["cas"] == old_cas:
self.log_failure("CAS didn't get updated: %s"
% result["cas"])
elif op_type == "read":
if result["cas"] != old_cas:
self.log_failure("CAS updated for read operation: %s"
% result["cas"])
self.summary.add_step(crud_desc)
old_cas = result["cas"]
client.close()
doc_gen = doc_generator("test_key", 0, 1, mutate=0)
key, value = doc_gen.next()
for d_level in self.possible_d_levels[self.bucket_type]:
if d_level == Bucket.DurabilityLevel.NONE:
continue
create_desc = "Create bucket with durability %s" % d_level
self.log.info(create_desc)
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
# Object to support performing CRUDs
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(bucket_obj, wait_for_warmup=True)
self.summary.add_step(create_desc)
verification_dict = self.get_cb_stat_verification_dict()
# Test CRUD operations
perform_crud_ops()
# Validate doc_count
self.bucket_util._wait_for_stats_all_buckets()
self.bucket_util.verify_stats_all_buckets(0)
# Cbstats vbucket-details validation
self.cb_stat_verify(verification_dict)
# Delete the created bucket
self.bucket_util.delete_bucket(self.cluster.master, bucket_obj)
self.summary.add_step("Delete bucket with d_level %s" % d_level)
def test_durability_impossible(self):
"""
Create bucket with replica > num_kv_nodes.
Perform doc insert to make sure we get TimeoutException due to
durability_impossible from the server.
"""
verification_dict = self.get_cb_stat_verification_dict()
key, value = doc_generator("test_key", 0, 1).next()
for d_level in self.possible_d_levels[self.bucket_type]:
if d_level == Bucket.DurabilityLevel.NONE:
continue
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
# Object to support performing CRUDs
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(bucket_obj, wait_for_warmup=True)
self.summary.add_step("Create bucket with durability %s"
% d_level)
client = SDKClient([self.cluster.master], bucket_obj)
result = client.crud("create", key, value, timeout=3)
if result["status"] is True \
or SDKException.DurabilityImpossibleException \
not in result["error"]:
self.log_failure("Indirect sync_write succeeded "
"without enough nodes")
client.close()
# Cbstats vbucket-details validation
self.cb_stat_verify(verification_dict)
# Delete the created bucket
self.bucket_util.delete_bucket(self.cluster.master, bucket_obj)
self.summary.add_step("Delete bucket with d_level %s" % d_level)
| 44.855497 | 82 | 0.572682 | from copy import deepcopy
from random import sample, choice
from BucketLib.bucket import Bucket
from cb_tools.cb_cli import CbCli
from couchbase_helper.documentgenerator import doc_generator
from couchbase_helper.durability_helper import BucketDurability
from epengine.durability_base import BucketDurabilityBase
from error_simulation.cb_error import CouchbaseError
from sdk_client3 import SDKClient
from sdk_exceptions import SDKException
class CreateBucketTests(BucketDurabilityBase):
def setUp(self):
super(CreateBucketTests, self).setUp()
def tearDown(self):
super(CreateBucketTests, self).tearDown()
def test_create_bucket_using_cli(self):
shell = self.vbs_in_node[self.cluster.master]["shell"]
cb_cli = CbCli(shell)
for d_level in self.bucket_util.get_supported_durability_levels():
create_failed = False
test_step = "Creating %s bucket with level %s" \
% (self.bucket_type, d_level)
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
if self.bucket_type == Bucket.Type.MEMCACHED:
del bucket_dict[Bucket.replicaNumber]
bucket_obj = Bucket(bucket_dict)
output = cb_cli.create_bucket(bucket_dict, wait=True)
self.get_vbucket_type_mapping(bucket_obj.name)
if "SUCCESS: Bucket created" not in str(output):
create_failed = True
if d_level in self.possible_d_levels[self.bucket_type]:
self.log_failure("Create failed for %s bucket "
"with min_durability_level %s"
% (self.bucket_type, d_level))
self.bucket_util.buckets = [bucket_obj]
self.bucket_util.print_bucket_stats()
self.summary.add_step(test_step)
if not create_failed:
verification_dict = self.get_cb_stat_verification_dict()
self.validate_durability_with_crud(bucket_obj, d_level,
verification_dict)
self.summary.add_step("Validate_CRUD_operation")
self.cb_stat_verify(verification_dict)
output = cb_cli.delete_bucket(bucket_obj.name)
if create_failed:
if "ERROR: Bucket not found" not in str(output):
self.log_failure("Mismatch in bucket-delete output")
elif "SUCCESS: Bucket deleted" not in str(output):
self.log_failure("Mismatch in bucket-delete output")
self.summary.add_step("Delete bucket")
def test_create_bucket_using_rest(self):
for d_level in self.bucket_util.get_supported_durability_levels():
create_failed = False
test_step = "Creating %s bucket with level %s" \
% (self.bucket_type, d_level)
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
bucket_obj = Bucket(bucket_dict)
try:
self.bucket_util.create_bucket(bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
if d_level not in self.possible_d_levels[self.bucket_type]:
self.log_failure("Create succeeded for %s bucket for "
"unsupported durability %s"
% (self.bucket_type, d_level))
except Exception as rest_exception:
create_failed = True
self.log.info(rest_exception)
self.bucket_util.print_bucket_stats()
self.summary.add_step(test_step)
if not create_failed:
verification_dict = self.get_cb_stat_verification_dict()
self.validate_durability_with_crud(bucket_obj, d_level,
verification_dict)
self.summary.add_step("Validate CRUD operation")
self.cb_stat_verify(verification_dict)
self.bucket_util.delete_bucket(self.cluster.master, bucket_obj)
self.summary.add_step("Bucket deletion")
class BucketDurabilityTests(BucketDurabilityBase):
def setUp(self):
super(BucketDurabilityTests, self).setUp()
def tearDown(self):
super(BucketDurabilityTests, self).tearDown()
def test_durability_with_bucket_level_none(self):
create_desc = "Creating %s bucket with level 'None'" % self.bucket_type
b_durability = Bucket.DurabilityLevel.NONE
verification_dict = self.get_cb_stat_verification_dict()
bucket_dict = self.get_bucket_dict(self.bucket_type, b_durability)
self.log.info(create_desc)
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
self.summary.add_step(create_desc)
index = 0
for d_level in self.get_supported_durability_for_bucket():
self.validate_durability_with_crud(bucket_obj, b_durability,
verification_dict,
doc_durability=d_level,
doc_start_index=index)
self.summary.add_step("CRUD with doc_durability %s" % d_level)
self.cb_stat_verify(verification_dict)
index += 10
def test_ops_only_with_bucket_level_durability(self):
for d_level in self.get_supported_durability_for_bucket():
if d_level == Bucket.DurabilityLevel.NONE:
continue
step_desc = "Creating %s bucket with level '%s'" \
% (self.bucket_type, d_level)
verification_dict = self.get_cb_stat_verification_dict()
self.log.info(step_desc)
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
self.summary.add_step(step_desc)
self.validate_durability_with_crud(bucket_obj, d_level,
verification_dict)
self.summary.add_step("Async write with bucket durability %s"
% d_level)
self.cb_stat_verify(verification_dict)
self.bucket_util.delete_bucket(self.cluster.master, bucket_obj)
self.summary.add_step("Delete %s bucket" % self.bucket_type)
def test_sub_doc_op_with_bucket_level_durability(self):
key, value = doc_generator("test_key", 0, 1).next()
sub_doc_key = "sub_doc_key"
sub_doc_vals = ["val_1", "val_2", "val_3", "val_4", "val_5"]
for d_level in self.get_supported_durability_for_bucket():
if d_level == Bucket.DurabilityLevel.NONE:
continue
step_desc = "Creating %s bucket with level '%s'" \
% (self.bucket_type, d_level)
verification_dict = self.get_cb_stat_verification_dict()
self.log.info(step_desc)
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(bucket_obj,
wait_for_warmup=True)
self.summary.add_step(step_desc)
client = SDKClient([self.cluster.master], bucket_obj)
result = client.crud("create", key, value)
verification_dict["ops_create"] += 1
verification_dict["sync_write_committed_count"] += 1
if result["status"] is False:
self.log_failure("Doc insert failed for key: %s" % key)
for sub_doc_op in ["subdoc_insert", "subdoc_upsert",
"subdoc_replace"]:
sub_doc_val = choice(sub_doc_vals)
_, fail = client.crud(sub_doc_op, key,
[sub_doc_key, sub_doc_val])
if fail:
self.log_failure("%s failure. Key %s, sub_doc (%s, %s): %s"
% (sub_doc_op, key,
sub_doc_key, sub_doc_val, result))
else:
verification_dict["ops_update"] += 1
verification_dict["sync_write_committed_count"] += 1
success, fail = client.crud("subdoc_read", key, sub_doc_key)
if fail or str(success[key]["value"].get(0)) != sub_doc_val:
self.log_failure("%s failed. Expected: %s, Actual: %s"
% (sub_doc_op, sub_doc_val,
success[key]["value"].get(0)))
self.summary.add_step("%s for key %s" % (sub_doc_op, key))
sub_doc_op = "subdoc_delete"
_, fail = client.crud(sub_doc_op, key, sub_doc_key)
if fail:
self.log_failure("%s failure. Key %s, sub_doc (%s, %s): %s"
% (sub_doc_op, key,
sub_doc_key, sub_doc_val, result))
verification_dict["ops_update"] += 1
verification_dict["sync_write_committed_count"] += 1
_, fail = client.crud(sub_doc_op, key, sub_doc_key)
if SDKException.PathNotFoundException \
not in str(fail[key]["error"]):
self.log_failure("Invalid error after sub_doc_delete")
self.summary.add_step("%s for key %s" % (sub_doc_op, key))
self.bucket_util._wait_for_stats_all_buckets()
self.bucket_util.verify_stats_all_buckets(1)
self.cb_stat_verify(verification_dict)
client.close()
self.bucket_util.delete_bucket(self.cluster.master, bucket_obj)
self.summary.add_step("Delete %s bucket" % self.bucket_type)
def test_higher_durability_level_from_client(self):
d_level_order_len = len(self.d_level_order)
supported_d_levels = self.get_supported_durability_for_bucket()
for d_level in supported_d_levels:
create_desc = "Creating %s bucket with level '%s'" \
% (self.bucket_type, d_level)
verification_dict = self.get_cb_stat_verification_dict()
self.log.info(create_desc)
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
self.summary.add_step(create_desc)
index = 0
op_type = "create"
durability_index = self.d_level_order.index(d_level) + 1
while durability_index < d_level_order_len:
if self.d_level_order[durability_index] not in supported_d_levels:
durability_index += 1
continue
self.validate_durability_with_crud(
bucket_obj,
d_level,
verification_dict,
op_type=op_type,
doc_durability=self.d_level_order[durability_index],
doc_start_index=index)
self.summary.add_step("%s with doc_level_durability %s"
% (op_type,
self.d_level_order[durability_index]))
durability_index += 1
index += 10
self.cb_stat_verify(verification_dict)
self.bucket_util.delete_bucket(self.cluster.master, bucket_obj)
self.summary.add_step("Delete %s bucket" % self.bucket_type)
def test_lower_durability_level_from_client(self):
for d_level in self.get_supported_durability_for_bucket():
create_desc = "Creating %s bucket with level '%s'" \
% (self.bucket_type, d_level)
verification_dict = self.get_cb_stat_verification_dict()
self.log.info(create_desc)
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
self.summary.add_step(create_desc)
index = 0
op_type = "create"
durability_index = self.d_level_order.index(d_level) - 1
while durability_index >= 0:
self.validate_durability_with_crud(
bucket_obj,
d_level,
verification_dict,
op_type=op_type,
doc_durability=self.d_level_order[durability_index],
doc_start_index=index)
self.summary.add_step("%s with doc_level_durability %s"
% (op_type,
self.d_level_order[durability_index]))
durability_index -= 1
index += 10
self.cb_stat_verify(verification_dict)
self.bucket_util.delete_bucket(self.cluster.master, bucket_obj)
self.summary.add_step("Delete %s bucket" % self.bucket_type)
def test_update_durability_level(self):
update_during_ops = self.input.param("update_during_ops", False)
supported_d_levels = self.get_supported_durability_for_bucket()
supported_bucket_d_levels = self.possible_d_levels[self.bucket_type]
create_gen_1 = doc_generator(self.key, 0, self.num_items)
create_gen_2 = doc_generator("random_keys", self.num_items,
self.num_items*2)
update_gen = doc_generator(self.key, 0, self.num_items/2)
delete_gen = doc_generator(self.key, self.num_items/2, self.num_items)
self.sdk_timeout = 60
for bucket_durability in sample(supported_bucket_d_levels,
len(supported_bucket_d_levels)):
b_durability_to_update = list(set(supported_bucket_d_levels)
- set(bucket_durability))
create_desc = "Create %s bucket with durability level '%s'" \
% (self.bucket_type, bucket_durability)
self.log.info(create_desc)
bucket_dict = self.get_bucket_dict(self.bucket_type,
bucket_durability)
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
self.summary.add_step(create_desc)
self.bucket_util.print_bucket_stats()
self.log.info("Performing initial doc_load")
create_task = self.task.async_load_gen_docs(
self.cluster, bucket_obj, create_gen_1, "create",
exp=self.maxttl,
compression=self.sdk_compression,
timeout_secs=self.sdk_timeout,
process_concurrency=8,
batch_size=200,
sdk_client_pool=self.sdk_client_pool)
self.task_manager.get_task_result(create_task)
if create_task.fail:
self.log_failure("Failures seen during initial creates")
self.summary.add_step("Initial doc_loading")
create_task = self.task.async_load_gen_docs(
self.cluster, bucket_obj, create_gen_2, "create",
exp=self.maxttl,
durability=choice(supported_d_levels),
compression=self.sdk_compression,
timeout_secs=self.sdk_timeout,
process_concurrency=2,
batch_size=100,
start_task=False,
print_ops_rate=False,
sdk_client_pool=self.sdk_client_pool)
update_task = self.task.async_load_gen_docs(
self.cluster, bucket_obj, update_gen, "update",
exp=self.maxttl,
durability=choice(supported_d_levels),
compression=self.sdk_compression,
timeout_secs=self.sdk_timeout,
process_concurrency=2,
batch_size=100,
start_task=False,
print_ops_rate=False,
sdk_client_pool=self.sdk_client_pool)
read_task = self.task.async_load_gen_docs(
self.cluster, bucket_obj, update_gen, "read",
compression=self.sdk_compression,
timeout_secs=self.sdk_timeout,
process_concurrency=2,
batch_size=100,
start_task=False,
print_ops_rate=False,
sdk_client_pool=self.sdk_client_pool)
delete_task = self.task.async_load_gen_docs(
self.cluster, bucket_obj, delete_gen, "delete",
exp=self.maxttl,
durability=choice(supported_d_levels),
compression=self.sdk_compression,
timeout_secs=self.sdk_timeout,
process_concurrency=2,
batch_size=100,
start_task=False,
print_ops_rate=False,
sdk_client_pool=self.sdk_client_pool)
tasks_to_run = [create_task, update_task,
read_task, delete_task]
if self.bucket_type == Bucket.Type.EPHEMERAL:
tasks_to_run = [create_task,
choice([update_task, delete_task])]
clients = read_task.clients
if tasks_to_run[1].op_type == "delete":
clients += update_task.clients
else:
clients += delete_task.clients
for client in clients:
client.close()
for task in tasks_to_run:
new_d_level = BucketDurability[b_durability_to_update.pop()]
self.log.info("Starting %s task" % task.op_type)
self.task_manager.add_new_task(task)
if update_during_ops:
self.sleep(5, "Wait for load_task to start before "
"setting durability=%s" % new_d_level)
else:
self.task_manager.get_task_result(task)
self.bucket_util.update_bucket_property(
bucket_obj,
bucket_durability=new_d_level)
buckets = self.bucket_util.get_all_buckets()
if buckets[0].durability_level != new_d_level:
self.log_failure("Failed to update bucket_d_level to %s"
% new_d_level)
self.summary.add_step("Set bucket-durability=%s"
% new_d_level)
self.bucket_util.print_bucket_stats()
if update_during_ops:
self.task_manager.get_task_result(task)
if task.fail:
self.log_failure("Failures seen during %s"
% task.op_type)
self.summary.add_step("Doc op %s during bucket durability"
% task.op_type)
self.bucket_util.delete_bucket(self.cluster.master, bucket_obj)
self.summary.add_step("Delete %s bucket" % self.bucket_type)
def test_update_durability_between_doc_op(self):
supported_d_levels = deepcopy(self.d_level_order)
if self.bucket_type == Bucket.Type.EPHEMERAL:
supported_d_levels = supported_d_levels[0:2]
supported_d_levels.reverse()
supported_d_levels += [supported_d_levels[0]]
create_desc = "Creating %s bucket with level '%s'" \
% (self.bucket_type, supported_d_levels[0])
self.log.info(create_desc)
bucket_dict = self.get_bucket_dict(self.bucket_type,
supported_d_levels[0])
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
self.summary.add_step(create_desc)
self.bucket_util.print_bucket_stats()
prev_d_level = supported_d_levels[0]
for bucket_durability in supported_d_levels[1:]:
target_vb_type, simulate_error = \
self.durability_helper.get_vb_and_error_type(bucket_durability)
random_node = choice(self.vbs_in_node.keys())
error_sim = CouchbaseError(
self.log,
self.vbs_in_node[random_node]["shell"])
target_vbs = self.vbs_in_node[random_node][target_vb_type]
doc_gen = doc_generator(self.key, 0, 1,
target_vbucket=target_vbs)
doc_load_task = self.task.async_load_gen_docs(
self.cluster, bucket_obj, doc_gen, "update",
durability=Bucket.DurabilityLevel.NONE,
timeout_secs=60,
start_task=False,
sdk_client_pool=self.sdk_client_pool)
error_sim.create(simulate_error)
self.sleep(5, "Wait before starting doc_op")
self.task_manager.add_new_task(doc_load_task)
new_d_level = BucketDurability[bucket_durability]
self.sleep(5, "Wait before updating bucket level "
"durability=%s" % new_d_level)
self.bucket_util.update_bucket_property(
bucket_obj,
bucket_durability=new_d_level)
self.bucket_util.print_bucket_stats()
buckets = self.bucket_util.get_all_buckets()
if buckets[0].durability_level != new_d_level:
self.log_failure("Failed to update bucket_d_level to %s"
% new_d_level)
self.summary.add_step("Set bucket-durability=%s" % new_d_level)
if prev_d_level == Bucket.DurabilityLevel.NONE:
if not doc_load_task.completed:
self.log_failure("Doc-op still pending for d_level 'NONE'")
elif doc_load_task.completed:
self.log_failure("Doc-op completed before reverting the "
"error condition: %s" % simulate_error)
error_sim.revert(simulate_error)
self.task_manager.get_task_result(doc_load_task)
if doc_load_task.fail:
self.log_failure("Doc_op failed")
self.summary.add_step("Doc_op with previous d_level %s"
% prev_d_level)
prev_d_level = bucket_durability
self.bucket_util.delete_bucket(self.cluster.master, bucket_obj)
self.summary.add_step("Delete %s bucket" % self.bucket_type)
def test_sync_write_in_progress(self):
def test_scenario(bucket, doc_ops,
with_sync_write_val=None):
crud_batch_size = 4
simulate_error = CouchbaseError.STOP_MEMCACHED
node_vb_info = self.vbs_in_node
target_vbuckets = node_vb_info[target_nodes[0]]["replica"]
if len(target_nodes) > 1:
index = 1
while index < len(target_nodes):
target_vbuckets = list(
set(target_vbuckets).intersection(
set(node_vb_info[target_nodes[index]]["replica"]))
)
index += 1
gen_loader_1 = None
gen_loader_2 = None
self.log.info("Creating doc_generators")
gen_create = doc_generator(
self.key, self.num_items, crud_batch_size,
vbuckets=self.cluster_util.vbuckets,
target_vbucket=target_vbuckets)
gen_update = doc_generator(
self.key, 0, crud_batch_size,
vbuckets=self.cluster_util.vbuckets,
target_vbucket=target_vbuckets, mutate=1)
gen_delete = doc_generator(
self.key, 0, crud_batch_size,
vbuckets=self.cluster_util.vbuckets,
target_vbucket=target_vbuckets)
self.log.info("Done creating doc_generators")
if doc_ops[0] == "create":
self.num_items += crud_batch_size
gen_loader_1 = gen_create
elif doc_ops[0] in ["update", "replace", "touch"]:
gen_loader_1 = gen_update
elif doc_ops[0] == "delete":
gen_loader_1 = gen_delete
self.num_items -= crud_batch_size
if doc_ops[1] == "create":
gen_loader_2 = gen_create
elif doc_ops[1] in ["update", "replace", "touch"]:
gen_loader_2 = gen_update
elif doc_ops[1] == "delete":
gen_loader_2 = gen_delete
if doc_op[2] == "load_initial_docs":
doc_loading_task = self.task.async_load_gen_docs(
self.cluster, bucket, gen_loader_1, "create", 0,
batch_size=crud_batch_size, process_concurrency=1,
timeout_secs=10,
print_ops_rate=False,
sdk_client_pool=self.sdk_client_pool)
self.task_manager.get_task_result(doc_loading_task)
if doc_loading_task.fail:
self.log_failure("Failure while loading initial docs")
self.summary.add_step("Create docs for %s" % doc_op[0])
verification_dict["ops_create"] += crud_batch_size
verification_dict["sync_write_committed_count"] \
+= crud_batch_size
doc_loader_task = self.task.async_load_gen_docs(
self.cluster, bucket, gen_loader_1, doc_ops[0], 0,
batch_size=crud_batch_size, process_concurrency=8,
timeout_secs=60,
print_ops_rate=False,
start_task=False,
sdk_client_pool=self.sdk_client_pool)
client = SDKClient([self.cluster.master], bucket)
for node in target_nodes:
error_sim = CouchbaseError(self.log,
self.vbs_in_node[node]["shell"])
error_sim.create(simulate_error,
bucket_name=bucket.name)
self.sleep(5, "Wait for error simulation to take effect")
self.task_manager.add_new_task(doc_loader_task)
self.sleep(5, "Wait for task_1 CRUDs to reach server")
tem_gen = deepcopy(gen_loader_2)
while tem_gen.has_next():
key, value = tem_gen.next()
for fail_fast in [True, False]:
if with_sync_write_val:
fail = client.crud(doc_ops[1], key, value=value,
exp=0,
durability=with_sync_write_val,
timeout=3, time_unit="seconds",
fail_fast=fail_fast)
else:
fail = client.crud(doc_ops[1], key, value=value,
exp=0,
timeout=3, time_unit="seconds",
fail_fast=fail_fast)
expected_exception = SDKException.AmbiguousTimeoutException
retry_reason = \
SDKException.RetryReason.KV_SYNC_WRITE_IN_PROGRESS
if fail_fast:
expected_exception = \
SDKException.RequestCanceledException
retry_reason = \
SDKException.RetryReason \
.KV_SYNC_WRITE_IN_PROGRESS_NO_MORE_RETRIES
if expected_exception not in str(fail["error"]):
self.log_failure("Invalid exception for {0}: {1}"
.format(key, fail["error"]))
if retry_reason not in str(fail["error"]):
self.log_failure("Invalid retry reason for {0}: {1}"
.format(key, fail["error"]))
fail = client.crud("read", key)
if doc_ops[0] == "create":
if fail["status"] is True:
self.log_failure(
"%s returned value during SyncWrite state: %s"
% (key, fail))
else:
if fail["status"] is False:
self.log_failure(
"Key %s read failed for previous value: %s"
% (key, fail))
for node in target_nodes:
error_sim = CouchbaseError(self.log,
self.vbs_in_node[node]["shell"])
error_sim.revert(simulate_error,
bucket_name=bucket.name)
self.task.jython_task_manager.get_task_result(doc_loader_task)
verification_dict["ops_%s" % doc_op[0]] += crud_batch_size
verification_dict["sync_write_committed_count"] \
+= crud_batch_size
client.close()
crud_variations = [
["create", "create", ""],
["update", "update", "load_initial_docs"],
["update", "delete", ""],
["update", "touch", ""],
["update", "replace", ""],
["delete", "delete", ""],
["delete", "update", "load_initial_docs"],
["delete", "touch", "load_initial_docs"],
["delete", "replace", "load_initial_docs"]
]
target_nodes = self.getTargetNodes()
for b_d_level in self.possible_d_levels[self.bucket_type]:
if b_d_level == Bucket.DurabilityLevel.NONE:
continue
verification_dict = self.get_cb_stat_verification_dict()
create_desc = "Creating %s bucket with level '%s'" \
% (self.bucket_type, b_d_level)
self.log.info(create_desc)
bucket_dict = self.get_bucket_dict(self.bucket_type, b_d_level)
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(bucket_obj,
wait_for_warmup=True)
self.get_vbucket_type_mapping(bucket_obj.name)
self.summary.add_step(create_desc)
for doc_op in crud_variations:
test_scenario(bucket_obj, doc_op)
self.summary.add_step("SyncWriteInProgress for [%s, %s]"
% (doc_op[0], doc_op[1]))
self.cb_stat_verify(verification_dict)
self.bucket_util.delete_bucket(self.cluster.master, bucket_obj)
self.summary.add_step("Delete %s bucket" % self.bucket_type)
def test_observe_scenario(self):
def perform_crud_ops():
old_cas = 0
client = SDKClient([self.cluster.master], bucket_obj)
for op_type in ["create", "update", "read", "replace", "delete"]:
crud_desc = "Key %s, doc_op: %s" % (key, op_type)
self.log.info(crud_desc)
result = client.crud(op_type, key, value,
replicate_to=self.replicate_to,
persist_to=self.persist_to)
if op_type != "read":
if op_type != "replace":
dict_key = "ops_%s" % op_type
else:
dict_key = "ops_update"
verification_dict[dict_key] += 1
verification_dict["sync_write_committed_count"] += 1
if result["cas"] == old_cas:
self.log_failure("CAS didn't get updated: %s"
% result["cas"])
elif op_type == "read":
if result["cas"] != old_cas:
self.log_failure("CAS updated for read operation: %s"
% result["cas"])
self.summary.add_step(crud_desc)
old_cas = result["cas"]
client.close()
doc_gen = doc_generator("test_key", 0, 1, mutate=0)
key, value = doc_gen.next()
for d_level in self.possible_d_levels[self.bucket_type]:
if d_level == Bucket.DurabilityLevel.NONE:
continue
create_desc = "Create bucket with durability %s" % d_level
self.log.info(create_desc)
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
# Object to support performing CRUDs
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(bucket_obj, wait_for_warmup=True)
self.summary.add_step(create_desc)
verification_dict = self.get_cb_stat_verification_dict()
# Test CRUD operations
perform_crud_ops()
# Validate doc_count
self.bucket_util._wait_for_stats_all_buckets()
self.bucket_util.verify_stats_all_buckets(0)
# Cbstats vbucket-details validation
self.cb_stat_verify(verification_dict)
# Delete the created bucket
self.bucket_util.delete_bucket(self.cluster.master, bucket_obj)
self.summary.add_step("Delete bucket with d_level %s" % d_level)
def test_durability_impossible(self):
verification_dict = self.get_cb_stat_verification_dict()
key, value = doc_generator("test_key", 0, 1).next()
for d_level in self.possible_d_levels[self.bucket_type]:
if d_level == Bucket.DurabilityLevel.NONE:
continue
bucket_dict = self.get_bucket_dict(self.bucket_type, d_level)
# Object to support performing CRUDs
bucket_obj = Bucket(bucket_dict)
self.bucket_util.create_bucket(bucket_obj, wait_for_warmup=True)
self.summary.add_step("Create bucket with durability %s"
% d_level)
client = SDKClient([self.cluster.master], bucket_obj)
result = client.crud("create", key, value, timeout=3)
if result["status"] is True \
or SDKException.DurabilityImpossibleException \
not in result["error"]:
self.log_failure("Indirect sync_write succeeded "
"without enough nodes")
client.close()
# Cbstats vbucket-details validation
self.cb_stat_verify(verification_dict)
# Delete the created bucket
self.bucket_util.delete_bucket(self.cluster.master, bucket_obj)
self.summary.add_step("Delete bucket with d_level %s" % d_level)
| true | true |
f7215eaffbd11774be2c8286cbada91c47ac6b09 | 1,136 | py | Python | tools/getTask.py | Sh4der/adventofcode | 1823d7bac33778ef850e384d914843a0c0ded869 | [
"MIT"
] | null | null | null | tools/getTask.py | Sh4der/adventofcode | 1823d7bac33778ef850e384d914843a0c0ded869 | [
"MIT"
] | null | null | null | tools/getTask.py | Sh4der/adventofcode | 1823d7bac33778ef850e384d914843a0c0ded869 | [
"MIT"
] | null | null | null | import html2markdown as h2m
import urllib.request
import re
from sys import argv, exit
if __name__ == '__main__':
if len(argv) not in (2, 3): exit(1)
day = argv[1]
if int(day) not in range(1, 24):
exit(1)
destinationFilePath = argv[2]
url = "https://adventofcode.com/2020/day/" + day
response = urllib.request.urlopen(url)
content = str(response.read())
taskBegin = '<article class="day-desc">'
taskEnd = '</article>'
htmlTask = content[content.index(taskBegin) + len(taskBegin):content.index(taskEnd)]
print(htmlTask)
markdownTask = h2m.convert(htmlTask)
markdownTask = markdownTask.replace('\\n', '\n\t')
markdownTask = markdownTask.replace("\\\\'", "'")
markdownTask = re.sub('<em.*?>', '**', markdownTask)
markdownTask = re.sub('</em>', '**', markdownTask)
markdownTask = re.sub('(\[.*?\]\()/(.*?\))', r'\1https://adventofcode.com/\2', markdownTask)
markdownTask = re.sub('<.*?>', '', markdownTask)
destinationFile = open(destinationFilePath, "w")
destinationFile.write(format(markdownTask))
destinationFile.close()
| 28.4 | 96 | 0.626761 | import html2markdown as h2m
import urllib.request
import re
from sys import argv, exit
if __name__ == '__main__':
if len(argv) not in (2, 3): exit(1)
day = argv[1]
if int(day) not in range(1, 24):
exit(1)
destinationFilePath = argv[2]
url = "https://adventofcode.com/2020/day/" + day
response = urllib.request.urlopen(url)
content = str(response.read())
taskBegin = '<article class="day-desc">'
taskEnd = '</article>'
htmlTask = content[content.index(taskBegin) + len(taskBegin):content.index(taskEnd)]
print(htmlTask)
markdownTask = h2m.convert(htmlTask)
markdownTask = markdownTask.replace('\\n', '\n\t')
markdownTask = markdownTask.replace("\\\\'", "'")
markdownTask = re.sub('<em.*?>', '**', markdownTask)
markdownTask = re.sub('</em>', '**', markdownTask)
markdownTask = re.sub('(\[.*?\]\()/(.*?\))', r'\1https://adventofcode.com/\2', markdownTask)
markdownTask = re.sub('<.*?>', '', markdownTask)
destinationFile = open(destinationFilePath, "w")
destinationFile.write(format(markdownTask))
destinationFile.close()
| true | true |
f7215eeae590fcac6dd15874392f9bd3361b29e0 | 1,294 | py | Python | wikum-env3/lib/python3.7/site-packages/sumy/models/dom/_sentence.py | xuericlin/wikum | f0171f1697efa91d6957f976f473c9201db85648 | [
"MIT"
] | null | null | null | wikum-env3/lib/python3.7/site-packages/sumy/models/dom/_sentence.py | xuericlin/wikum | f0171f1697efa91d6957f976f473c9201db85648 | [
"MIT"
] | null | null | null | wikum-env3/lib/python3.7/site-packages/sumy/models/dom/_sentence.py | xuericlin/wikum | f0171f1697efa91d6957f976f473c9201db85648 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
from ...utils import cached_property
from ..._compat import to_unicode, to_string, unicode_compatible
@unicode_compatible
class Sentence(object):
__slots__ = ("_text", "_cached_property_words", "_tokenizer", "_is_heading",)
def __init__(self, text, tokenizer, is_heading=False):
self._text = to_unicode(text).strip()
self._tokenizer = tokenizer
self._is_heading = bool(is_heading)
@cached_property
def words(self):
return self._tokenizer.to_words(self._text)
@property
def is_heading(self):
return self._is_heading
def __eq__(self, sentence):
assert isinstance(sentence, Sentence)
return self._is_heading is sentence._is_heading and self._text == sentence._text
def __ne__(self, sentence):
return not self.__eq__(sentence)
def __hash__(self):
return hash((self._is_heading, self._text))
def __unicode__(self):
return self._text
def __repr__(self):
return to_string("<%s: %s>") % (
"Heading" if self._is_heading else "Sentence",
self.__str__()
)
| 28.755556 | 89 | 0.651468 |
from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
from ...utils import cached_property
from ..._compat import to_unicode, to_string, unicode_compatible
@unicode_compatible
class Sentence(object):
__slots__ = ("_text", "_cached_property_words", "_tokenizer", "_is_heading",)
def __init__(self, text, tokenizer, is_heading=False):
self._text = to_unicode(text).strip()
self._tokenizer = tokenizer
self._is_heading = bool(is_heading)
@cached_property
def words(self):
return self._tokenizer.to_words(self._text)
@property
def is_heading(self):
return self._is_heading
def __eq__(self, sentence):
assert isinstance(sentence, Sentence)
return self._is_heading is sentence._is_heading and self._text == sentence._text
def __ne__(self, sentence):
return not self.__eq__(sentence)
def __hash__(self):
return hash((self._is_heading, self._text))
def __unicode__(self):
return self._text
def __repr__(self):
return to_string("<%s: %s>") % (
"Heading" if self._is_heading else "Sentence",
self.__str__()
)
| true | true |
f7216012bdabcc6a4f76ac1521c5236c58f42c7a | 393 | py | Python | bookitoBackend/User/urls.py | mazdakdev/Bookito | 38e18fee22aafea95429da01e9769acf2748f676 | [
"MIT"
] | 10 | 2021-12-09T04:39:03.000Z | 2022-02-07T05:42:29.000Z | bookitoBackend/User/urls.py | mazdakdev/Bookito | 38e18fee22aafea95429da01e9769acf2748f676 | [
"MIT"
] | 2 | 2022-02-07T18:12:54.000Z | 2022-02-10T10:27:37.000Z | bookitoBackend/User/urls.py | mazdakdev/Bookito | 38e18fee22aafea95429da01e9769acf2748f676 | [
"MIT"
] | null | null | null | from django.urls import path
from .api import *
from knox import views as knox_views
urlpatterns = [
#domain.dn/api/v1/register/ | POST
path('register/' , SignUpAPI.as_view() , name='register'),
#domain.dn/api/v1/register/ | POST
path('login/' , SignInAPI.as_view() , name='login'),
#domain.dn/api/v1/user | GET
path('user/', MainUser.as_view() , name='user'),
] | 21.833333 | 62 | 0.64631 | from django.urls import path
from .api import *
from knox import views as knox_views
urlpatterns = [
path('register/' , SignUpAPI.as_view() , name='register'),
path('login/' , SignInAPI.as_view() , name='login'),
path('user/', MainUser.as_view() , name='user'),
] | true | true |
f7216046bb8fc44df661da3c65b4c665932b2bf6 | 1,845 | py | Python | gdsfactory/simulation/simphony/components/ring_double_siepic.py | simbilod/gdsfactory | 4d76db32674c3edb4d16260e3177ee29ef9ce11d | [
"MIT"
] | null | null | null | gdsfactory/simulation/simphony/components/ring_double_siepic.py | simbilod/gdsfactory | 4d76db32674c3edb4d16260e3177ee29ef9ce11d | [
"MIT"
] | null | null | null | gdsfactory/simulation/simphony/components/ring_double_siepic.py | simbilod/gdsfactory | 4d76db32674c3edb4d16260e3177ee29ef9ce11d | [
"MIT"
] | null | null | null | from simphony.library import siepic
from simphony.netlist import Subcircuit
def ring_double_siepic(
wg_width=0.5,
gap=0.2,
length_x=4,
bend_radius=5,
length_y=2,
coupler=siepic.ebeam_dc_halfring_straight,
straight=siepic.ebeam_wg_integral_1550,
terminator=siepic.ebeam_terminator_te1550,
):
r"""Return double bus ring made of two couplers (ct: top, cb: bottom).
connected with two vertical straights (wyl: left, wyr: right)
.. code::
--==ct==--
| |
wl wr length_y
| |
--==cb==-- gap
length_x
drop n1 _ _ n3 cdrop
\______/
______
in n2 _/ \_n4
| |
n1 | | n3
\______/
______
in n2 _/ \_n4 output
"""
straight = straight() if callable(straight) else straight
coupler = coupler() if callable(coupler) else coupler
# Create the circuit, add all individual instances
circuit = Subcircuit("mzi")
circuit.add([(coupler, "ct"), (coupler, "cb"), (straight, "wl"), (straight, "wr")])
# Circuits can be connected using the elements' string names:
circuit.connect_many(
[
("cb", "n1", "wl", "n1"),
("wl", "n2", "ct", "n2"),
("ct", "n4", "wr", "n1"),
("wr", "n2", "cb", "n3"),
]
)
circuit.elements["cb"].pins["n2"] = "input"
circuit.elements["cb"].pins["n4"] = "output"
circuit.elements["ct"].pins["n1"] = "drop"
circuit.elements["ct"].pins["n3"] = "cdrop"
return circuit
if __name__ == "__main__":
import matplotlib.pyplot as plt
from gdsfactory.simulationsimphony import plot_circuit
c = ring_double_siepic()
plot_circuit(c)
plt.show()
| 25.273973 | 87 | 0.539837 | from simphony.library import siepic
from simphony.netlist import Subcircuit
def ring_double_siepic(
wg_width=0.5,
gap=0.2,
length_x=4,
bend_radius=5,
length_y=2,
coupler=siepic.ebeam_dc_halfring_straight,
straight=siepic.ebeam_wg_integral_1550,
terminator=siepic.ebeam_terminator_te1550,
):
straight = straight() if callable(straight) else straight
coupler = coupler() if callable(coupler) else coupler
circuit = Subcircuit("mzi")
circuit.add([(coupler, "ct"), (coupler, "cb"), (straight, "wl"), (straight, "wr")])
circuit.connect_many(
[
("cb", "n1", "wl", "n1"),
("wl", "n2", "ct", "n2"),
("ct", "n4", "wr", "n1"),
("wr", "n2", "cb", "n3"),
]
)
circuit.elements["cb"].pins["n2"] = "input"
circuit.elements["cb"].pins["n4"] = "output"
circuit.elements["ct"].pins["n1"] = "drop"
circuit.elements["ct"].pins["n3"] = "cdrop"
return circuit
if __name__ == "__main__":
import matplotlib.pyplot as plt
from gdsfactory.simulationsimphony import plot_circuit
c = ring_double_siepic()
plot_circuit(c)
plt.show()
| true | true |
f72160586c3494b35606c754418a7f75fc368e1d | 10,517 | py | Python | pytorch/data.py | layumi/dgcnn | a7b58796ffe549f2d8bdb06a84f62aba03e1d3a1 | [
"MIT"
] | null | null | null | pytorch/data.py | layumi/dgcnn | a7b58796ffe549f2d8bdb06a84f62aba03e1d3a1 | [
"MIT"
] | null | null | null | pytorch/data.py | layumi/dgcnn | a7b58796ffe549f2d8bdb06a84f62aba03e1d3a1 | [
"MIT"
] | 1 | 2021-01-15T10:04:33.000Z | 2021-01-15T10:04:33.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Author: Yue Wang
@Contact: yuewangx@mit.edu
@File: data.py
@Time: 2018/10/13 6:21 PM
Modified by
@Author: An Tao
@Contact: ta19@mails.tsinghua.edu.cn
@Time: 2020/2/27 9:32 PM
"""
import os
import sys
import glob
import h5py
import numpy as np
import torch
from torch.utils.data import Dataset
def download_modelnet40():
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'data')
if not os.path.exists(DATA_DIR):
os.mkdir(DATA_DIR)
if not os.path.exists(os.path.join(DATA_DIR, 'modelnet40_ply_hdf5_2048')):
www = 'https://shapenet.cs.stanford.edu/media/modelnet40_ply_hdf5_2048.zip'
zipfile = os.path.basename(www)
os.system('wget %s; unzip %s' % (www, zipfile))
os.system('mv %s %s' % (zipfile[:-4], DATA_DIR))
os.system('rm %s' % (zipfile))
def download_shapenetpart():
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'data')
if not os.path.exists(DATA_DIR):
os.mkdir(DATA_DIR)
if not os.path.exists(os.path.join(DATA_DIR, 'shapenet_part_seg_hdf5_data')):
www = 'https://shapenet.cs.stanford.edu/media/shapenet_part_seg_hdf5_data.zip'
zipfile = os.path.basename(www)
os.system('wget %s --no-check-certificate; unzip %s' % (www, zipfile))
os.system('mv %s %s' % (zipfile[:-4], os.path.join(DATA_DIR, 'shapenet_part_seg_hdf5_data')))
os.system('rm %s' % (zipfile))
def download_S3DIS():
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'data')
if not os.path.exists(DATA_DIR):
os.mkdir(DATA_DIR)
if not os.path.exists(os.path.join(DATA_DIR, 'indoor3d_sem_seg_hdf5_data')):
www = 'https://shapenet.cs.stanford.edu/media/indoor3d_sem_seg_hdf5_data.zip'
zipfile = os.path.basename(www)
os.system('wget --no-check-certificate %s; unzip %s' % (www, zipfile))
os.system('mv %s %s' % (zipfile[:-4], DATA_DIR))
os.system('rm %s' % (zipfile))
if not os.path.exists(os.path.join(DATA_DIR, 'Stanford3dDataset_v1.2_Aligned_Version')):
if not os.path.exists(os.path.join(DATA_DIR, 'Stanford3dDataset_v1.2_Aligned_Version.zip')):
print('Please download Stanford3dDataset_v1.2_Aligned_Version.zip \
from https://goo.gl/forms/4SoGp4KtH1jfRqEj2 and place it under data/')
sys.exit(0)
else:
zippath = os.path.join(DATA_DIR, 'Stanford3dDataset_v1.2_Aligned_Version.zip')
os.system('unzip %s' % (zippath))
os.system('rm %s' % (zippath))
def load_data_cls(partition):
download_modelnet40()
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'data')
all_data = []
all_label = []
for h5_name in glob.glob(os.path.join(DATA_DIR, 'modelnet40*hdf5_2048', '*%s*.h5'%partition)):
f = h5py.File(h5_name, 'r+')
data = f['data'][:].astype('float32')
label = f['label'][:].astype('int64')
f.close()
all_data.append(data)
all_label.append(label)
all_data = np.concatenate(all_data, axis=0)
all_label = np.concatenate(all_label, axis=0)
return all_data, all_label
def load_data_partseg(partition):
download_shapenetpart()
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'data')
all_data = []
all_label = []
all_seg = []
if partition == 'trainval':
file = glob.glob(os.path.join(DATA_DIR, 'shapenet*hdf5*', '*train*.h5')) \
+ glob.glob(os.path.join(DATA_DIR, 'shapenet*hdf5*', '*val*.h5'))
else:
file = glob.glob(os.path.join(DATA_DIR, 'shapenet*hdf5*', '*%s*.h5'%partition))
for h5_name in file:
f = h5py.File(h5_name, 'r+')
data = f['data'][:].astype('float32')
label = f['label'][:].astype('int64')
seg = f['pid'][:].astype('int64')
f.close()
all_data.append(data)
all_label.append(label)
all_seg.append(seg)
all_data = np.concatenate(all_data, axis=0)
all_label = np.concatenate(all_label, axis=0)
all_seg = np.concatenate(all_seg, axis=0)
return all_data, all_label, all_seg
def prepare_test_data_semseg():
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'data')
if not os.path.exists(os.path.join(DATA_DIR, 'stanford_indoor3d')):
os.system('python prepare_data/collect_indoor3d_data.py')
if not os.path.exists(os.path.join(DATA_DIR, 'indoor3d_sem_seg_hdf5_data_test')):
os.system('python prepare_data/gen_indoor3d_h5.py')
def load_data_semseg(partition, test_area):
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'data')
download_S3DIS()
prepare_test_data_semseg()
if partition == 'train':
data_dir = os.path.join(DATA_DIR, 'indoor3d_sem_seg_hdf5_data')
else:
data_dir = os.path.join(DATA_DIR, 'indoor3d_sem_seg_hdf5_data_test')
with open(os.path.join(data_dir, "all_files.txt")) as f:
all_files = [line.rstrip() for line in f]
with open(os.path.join(data_dir, "room_filelist.txt")) as f:
room_filelist = [line.rstrip() for line in f]
data_batchlist, label_batchlist = [], []
for f in all_files:
file = h5py.File(os.path.join(DATA_DIR, f), 'r+')
data = file["data"][:]
label = file["label"][:]
data_batchlist.append(data)
label_batchlist.append(label)
data_batches = np.concatenate(data_batchlist, 0)
seg_batches = np.concatenate(label_batchlist, 0)
test_area_name = "Area_" + test_area
train_idxs, test_idxs = [], []
for i, room_name in enumerate(room_filelist):
if test_area_name in room_name:
test_idxs.append(i)
else:
train_idxs.append(i)
if partition == 'train':
all_data = data_batches[train_idxs, ...]
all_seg = seg_batches[train_idxs, ...]
else:
all_data = data_batches[test_idxs, ...]
all_seg = seg_batches[test_idxs, ...]
return all_data, all_seg
def translate_pointcloud(pointcloud):
xyz1 = np.random.uniform(low=2./3., high=3./2., size=[3])
xyz2 = np.random.uniform(low=-0.2, high=0.2, size=[3])
translated_pointcloud = np.add(np.multiply(pointcloud, xyz1), xyz2).astype('float32')
return translated_pointcloud
def jitter_pointcloud(pointcloud, sigma=0.01, clip=0.02):
N, C = pointcloud.shape
pointcloud += np.clip(sigma * np.random.randn(N, C), -1*clip, clip)
return pointcloud
def rotate_pointcloud(pointcloud):
theta = np.pi*2 * np.random.uniform()
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)],[np.sin(theta), np.cos(theta)]])
pointcloud[:,[0,2]] = pointcloud[:,[0,2]].dot(rotation_matrix) # random rotation (x,z)
return pointcloud
class ModelNet40(Dataset):
def __init__(self, num_points, partition='train'):
self.data, self.label = load_data_cls(partition)
self.num_points = num_points
self.partition = partition
def __getitem__(self, item):
pointcloud = self.data[item][:self.num_points]
label = self.label[item]
if self.partition == 'train':
pointcloud = translate_pointcloud(pointcloud)
np.random.shuffle(pointcloud)
return pointcloud, label
def __len__(self):
return self.data.shape[0]
class ShapeNetPart(Dataset):
def __init__(self, num_points, partition='train', class_choice=None):
self.data, self.label, self.seg = load_data_partseg(partition)
self.cat2id = {'airplane': 0, 'bag': 1, 'cap': 2, 'car': 3, 'chair': 4,
'earphone': 5, 'guitar': 6, 'knife': 7, 'lamp': 8, 'laptop': 9,
'motor': 10, 'mug': 11, 'pistol': 12, 'rocket': 13, 'skateboard': 14, 'table': 15}
self.seg_num = [4, 2, 2, 4, 4, 3, 3, 2, 4, 2, 6, 2, 3, 3, 3, 3]
self.index_start = [0, 4, 6, 8, 12, 16, 19, 22, 24, 28, 30, 36, 38, 41, 44, 47]
self.num_points = num_points
self.partition = partition
self.class_choice = class_choice
if self.class_choice != None:
id_choice = self.cat2id[self.class_choice]
indices = (self.label == id_choice).squeeze()
self.data = self.data[indices]
self.label = self.label[indices]
self.seg = self.seg[indices]
self.seg_num_all = self.seg_num[id_choice]
self.seg_start_index = self.index_start[id_choice]
else:
self.seg_num_all = 50
self.seg_start_index = 0
def __getitem__(self, item):
pointcloud = self.data[item][:self.num_points]
label = self.label[item]
seg = self.seg[item][:self.num_points]
if self.partition == 'trainval':
# pointcloud = translate_pointcloud(pointcloud)
indices = list(range(pointcloud.shape[0]))
np.random.shuffle(indices)
pointcloud = pointcloud[indices]
seg = seg[indices]
return pointcloud, label, seg
def __len__(self):
return self.data.shape[0]
class S3DIS(Dataset):
def __init__(self, num_points=4096, partition='train', test_area='1'):
self.data, self.seg = load_data_semseg(partition, test_area)
self.num_points = num_points
self.partition = partition
def __getitem__(self, item):
pointcloud = self.data[item][:self.num_points]
seg = self.seg[item][:self.num_points]
if self.partition == 'train':
indices = list(range(pointcloud.shape[0]))
np.random.shuffle(indices)
pointcloud = pointcloud[indices]
seg = seg[indices]
seg = torch.LongTensor(seg)
return pointcloud, seg
def __len__(self):
return self.data.shape[0]
if __name__ == '__main__':
train = ModelNet40(1024)
test = ModelNet40(1024, 'test')
data, label = train[0]
print(data.shape)
print(label.shape)
trainval = ShapeNetPart(2048, 'trainval')
test = ShapeNetPart(2048, 'test')
data, label, seg = trainval[0]
print(data.shape)
print(label.shape)
print(seg.shape)
train = S3DIS(4096)
test = S3DIS(4096, 'test')
data, seg = train[0]
print(data.shape)
print(seg.shape)
| 37.03169 | 105 | 0.629837 |
import os
import sys
import glob
import h5py
import numpy as np
import torch
from torch.utils.data import Dataset
def download_modelnet40():
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'data')
if not os.path.exists(DATA_DIR):
os.mkdir(DATA_DIR)
if not os.path.exists(os.path.join(DATA_DIR, 'modelnet40_ply_hdf5_2048')):
www = 'https://shapenet.cs.stanford.edu/media/modelnet40_ply_hdf5_2048.zip'
zipfile = os.path.basename(www)
os.system('wget %s; unzip %s' % (www, zipfile))
os.system('mv %s %s' % (zipfile[:-4], DATA_DIR))
os.system('rm %s' % (zipfile))
def download_shapenetpart():
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'data')
if not os.path.exists(DATA_DIR):
os.mkdir(DATA_DIR)
if not os.path.exists(os.path.join(DATA_DIR, 'shapenet_part_seg_hdf5_data')):
www = 'https://shapenet.cs.stanford.edu/media/shapenet_part_seg_hdf5_data.zip'
zipfile = os.path.basename(www)
os.system('wget %s --no-check-certificate; unzip %s' % (www, zipfile))
os.system('mv %s %s' % (zipfile[:-4], os.path.join(DATA_DIR, 'shapenet_part_seg_hdf5_data')))
os.system('rm %s' % (zipfile))
def download_S3DIS():
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'data')
if not os.path.exists(DATA_DIR):
os.mkdir(DATA_DIR)
if not os.path.exists(os.path.join(DATA_DIR, 'indoor3d_sem_seg_hdf5_data')):
www = 'https://shapenet.cs.stanford.edu/media/indoor3d_sem_seg_hdf5_data.zip'
zipfile = os.path.basename(www)
os.system('wget --no-check-certificate %s; unzip %s' % (www, zipfile))
os.system('mv %s %s' % (zipfile[:-4], DATA_DIR))
os.system('rm %s' % (zipfile))
if not os.path.exists(os.path.join(DATA_DIR, 'Stanford3dDataset_v1.2_Aligned_Version')):
if not os.path.exists(os.path.join(DATA_DIR, 'Stanford3dDataset_v1.2_Aligned_Version.zip')):
print('Please download Stanford3dDataset_v1.2_Aligned_Version.zip \
from https://goo.gl/forms/4SoGp4KtH1jfRqEj2 and place it under data/')
sys.exit(0)
else:
zippath = os.path.join(DATA_DIR, 'Stanford3dDataset_v1.2_Aligned_Version.zip')
os.system('unzip %s' % (zippath))
os.system('rm %s' % (zippath))
def load_data_cls(partition):
download_modelnet40()
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'data')
all_data = []
all_label = []
for h5_name in glob.glob(os.path.join(DATA_DIR, 'modelnet40*hdf5_2048', '*%s*.h5'%partition)):
f = h5py.File(h5_name, 'r+')
data = f['data'][:].astype('float32')
label = f['label'][:].astype('int64')
f.close()
all_data.append(data)
all_label.append(label)
all_data = np.concatenate(all_data, axis=0)
all_label = np.concatenate(all_label, axis=0)
return all_data, all_label
def load_data_partseg(partition):
download_shapenetpart()
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'data')
all_data = []
all_label = []
all_seg = []
if partition == 'trainval':
file = glob.glob(os.path.join(DATA_DIR, 'shapenet*hdf5*', '*train*.h5')) \
+ glob.glob(os.path.join(DATA_DIR, 'shapenet*hdf5*', '*val*.h5'))
else:
file = glob.glob(os.path.join(DATA_DIR, 'shapenet*hdf5*', '*%s*.h5'%partition))
for h5_name in file:
f = h5py.File(h5_name, 'r+')
data = f['data'][:].astype('float32')
label = f['label'][:].astype('int64')
seg = f['pid'][:].astype('int64')
f.close()
all_data.append(data)
all_label.append(label)
all_seg.append(seg)
all_data = np.concatenate(all_data, axis=0)
all_label = np.concatenate(all_label, axis=0)
all_seg = np.concatenate(all_seg, axis=0)
return all_data, all_label, all_seg
def prepare_test_data_semseg():
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'data')
if not os.path.exists(os.path.join(DATA_DIR, 'stanford_indoor3d')):
os.system('python prepare_data/collect_indoor3d_data.py')
if not os.path.exists(os.path.join(DATA_DIR, 'indoor3d_sem_seg_hdf5_data_test')):
os.system('python prepare_data/gen_indoor3d_h5.py')
def load_data_semseg(partition, test_area):
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'data')
download_S3DIS()
prepare_test_data_semseg()
if partition == 'train':
data_dir = os.path.join(DATA_DIR, 'indoor3d_sem_seg_hdf5_data')
else:
data_dir = os.path.join(DATA_DIR, 'indoor3d_sem_seg_hdf5_data_test')
with open(os.path.join(data_dir, "all_files.txt")) as f:
all_files = [line.rstrip() for line in f]
with open(os.path.join(data_dir, "room_filelist.txt")) as f:
room_filelist = [line.rstrip() for line in f]
data_batchlist, label_batchlist = [], []
for f in all_files:
file = h5py.File(os.path.join(DATA_DIR, f), 'r+')
data = file["data"][:]
label = file["label"][:]
data_batchlist.append(data)
label_batchlist.append(label)
data_batches = np.concatenate(data_batchlist, 0)
seg_batches = np.concatenate(label_batchlist, 0)
test_area_name = "Area_" + test_area
train_idxs, test_idxs = [], []
for i, room_name in enumerate(room_filelist):
if test_area_name in room_name:
test_idxs.append(i)
else:
train_idxs.append(i)
if partition == 'train':
all_data = data_batches[train_idxs, ...]
all_seg = seg_batches[train_idxs, ...]
else:
all_data = data_batches[test_idxs, ...]
all_seg = seg_batches[test_idxs, ...]
return all_data, all_seg
def translate_pointcloud(pointcloud):
xyz1 = np.random.uniform(low=2./3., high=3./2., size=[3])
xyz2 = np.random.uniform(low=-0.2, high=0.2, size=[3])
translated_pointcloud = np.add(np.multiply(pointcloud, xyz1), xyz2).astype('float32')
return translated_pointcloud
def jitter_pointcloud(pointcloud, sigma=0.01, clip=0.02):
N, C = pointcloud.shape
pointcloud += np.clip(sigma * np.random.randn(N, C), -1*clip, clip)
return pointcloud
def rotate_pointcloud(pointcloud):
theta = np.pi*2 * np.random.uniform()
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)],[np.sin(theta), np.cos(theta)]])
pointcloud[:,[0,2]] = pointcloud[:,[0,2]].dot(rotation_matrix)
return pointcloud
class ModelNet40(Dataset):
def __init__(self, num_points, partition='train'):
self.data, self.label = load_data_cls(partition)
self.num_points = num_points
self.partition = partition
def __getitem__(self, item):
pointcloud = self.data[item][:self.num_points]
label = self.label[item]
if self.partition == 'train':
pointcloud = translate_pointcloud(pointcloud)
np.random.shuffle(pointcloud)
return pointcloud, label
def __len__(self):
return self.data.shape[0]
class ShapeNetPart(Dataset):
def __init__(self, num_points, partition='train', class_choice=None):
self.data, self.label, self.seg = load_data_partseg(partition)
self.cat2id = {'airplane': 0, 'bag': 1, 'cap': 2, 'car': 3, 'chair': 4,
'earphone': 5, 'guitar': 6, 'knife': 7, 'lamp': 8, 'laptop': 9,
'motor': 10, 'mug': 11, 'pistol': 12, 'rocket': 13, 'skateboard': 14, 'table': 15}
self.seg_num = [4, 2, 2, 4, 4, 3, 3, 2, 4, 2, 6, 2, 3, 3, 3, 3]
self.index_start = [0, 4, 6, 8, 12, 16, 19, 22, 24, 28, 30, 36, 38, 41, 44, 47]
self.num_points = num_points
self.partition = partition
self.class_choice = class_choice
if self.class_choice != None:
id_choice = self.cat2id[self.class_choice]
indices = (self.label == id_choice).squeeze()
self.data = self.data[indices]
self.label = self.label[indices]
self.seg = self.seg[indices]
self.seg_num_all = self.seg_num[id_choice]
self.seg_start_index = self.index_start[id_choice]
else:
self.seg_num_all = 50
self.seg_start_index = 0
def __getitem__(self, item):
pointcloud = self.data[item][:self.num_points]
label = self.label[item]
seg = self.seg[item][:self.num_points]
if self.partition == 'trainval':
indices = list(range(pointcloud.shape[0]))
np.random.shuffle(indices)
pointcloud = pointcloud[indices]
seg = seg[indices]
return pointcloud, label, seg
def __len__(self):
return self.data.shape[0]
class S3DIS(Dataset):
def __init__(self, num_points=4096, partition='train', test_area='1'):
self.data, self.seg = load_data_semseg(partition, test_area)
self.num_points = num_points
self.partition = partition
def __getitem__(self, item):
pointcloud = self.data[item][:self.num_points]
seg = self.seg[item][:self.num_points]
if self.partition == 'train':
indices = list(range(pointcloud.shape[0]))
np.random.shuffle(indices)
pointcloud = pointcloud[indices]
seg = seg[indices]
seg = torch.LongTensor(seg)
return pointcloud, seg
def __len__(self):
return self.data.shape[0]
if __name__ == '__main__':
train = ModelNet40(1024)
test = ModelNet40(1024, 'test')
data, label = train[0]
print(data.shape)
print(label.shape)
trainval = ShapeNetPart(2048, 'trainval')
test = ShapeNetPart(2048, 'test')
data, label, seg = trainval[0]
print(data.shape)
print(label.shape)
print(seg.shape)
train = S3DIS(4096)
test = S3DIS(4096, 'test')
data, seg = train[0]
print(data.shape)
print(seg.shape)
| true | true |
f721616e2e38048326fa72960040fcebc5347540 | 1,016 | py | Python | packages/syft/tests/syft/lib/sklearn/model_serialize_test.py | wip-abramson/PySyft | c321b26ce1aa3c969793874e663a8a46b1228dd1 | [
"Apache-1.1"
] | 1 | 2021-08-31T11:37:19.000Z | 2021-08-31T11:37:19.000Z | packages/syft/tests/syft/lib/sklearn/model_serialize_test.py | karapto/PySyft | 2940bfebb3e0f37a1b7451cf9581c41917534ed6 | [
"Apache-1.1"
] | null | null | null | packages/syft/tests/syft/lib/sklearn/model_serialize_test.py | karapto/PySyft | 2940bfebb3e0f37a1b7451cf9581c41917534ed6 | [
"Apache-1.1"
] | null | null | null | # third party
import numpy as np
import pytest
from sklearn.linear_model import LogisticRegression
# syft absolute
import syft as sy
from syft.experimental_flags import flags
sy.load("sklearn")
sy.load("numpy")
@pytest.mark.vendor(lib="sklearn")
@pytest.mark.parametrize("arrow_backend", [True, False])
def test_logistic_model_serde(
arrow_backend: bool, root_client: sy.VirtualMachineClient
) -> None:
flags.APACHE_ARROW_TENSOR_SERDE = arrow_backend
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([0, 0, 1, 1])
clf = LogisticRegression(random_state=0).fit(X, y)
clf_remote = clf.send(root_client)
clf_2 = clf_remote.get()
dict_1 = vars(clf)
dict_2 = vars(clf_2)
for key in dict_1.keys():
if type(dict_1[key]) == float:
assert abs(dict_1[key] - dict_2[key]) < 0.0001
elif type(dict_1[key]) == np.ndarray:
assert dict_1[key].all() == dict_2[key].all()
else:
assert dict_1[key] == dict_2[key]
| 26.736842 | 61 | 0.649606 |
import numpy as np
import pytest
from sklearn.linear_model import LogisticRegression
import syft as sy
from syft.experimental_flags import flags
sy.load("sklearn")
sy.load("numpy")
@pytest.mark.vendor(lib="sklearn")
@pytest.mark.parametrize("arrow_backend", [True, False])
def test_logistic_model_serde(
arrow_backend: bool, root_client: sy.VirtualMachineClient
) -> None:
flags.APACHE_ARROW_TENSOR_SERDE = arrow_backend
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([0, 0, 1, 1])
clf = LogisticRegression(random_state=0).fit(X, y)
clf_remote = clf.send(root_client)
clf_2 = clf_remote.get()
dict_1 = vars(clf)
dict_2 = vars(clf_2)
for key in dict_1.keys():
if type(dict_1[key]) == float:
assert abs(dict_1[key] - dict_2[key]) < 0.0001
elif type(dict_1[key]) == np.ndarray:
assert dict_1[key].all() == dict_2[key].all()
else:
assert dict_1[key] == dict_2[key]
| true | true |
f72161c03ab784a2ce4d00015d797e74b8a25925 | 2,042 | py | Python | SecuriTree/views.py | davymaish/django-SecuriTree | 01cf925e591877ae2669ca8430845abe278832bf | [
"BSD-2-Clause"
] | null | null | null | SecuriTree/views.py | davymaish/django-SecuriTree | 01cf925e591877ae2669ca8430845abe278832bf | [
"BSD-2-Clause"
] | null | null | null | SecuriTree/views.py | davymaish/django-SecuriTree | 01cf925e591877ae2669ca8430845abe278832bf | [
"BSD-2-Clause"
] | null | null | null | from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.template import loader
from django.contrib import messages
from django.views import generic
from django.views.generic.base import TemplateView
from django.utils import timezone
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from .models import Door, Area, AccessRule, User
class IndexView(TemplateView):
template_name = 'SecuriTree/index.html'
class HomeView(LoginRequiredMixin,TemplateView):
template_name = 'SecuriTree/home.html'
class HierarchyView(LoginRequiredMixin,generic.ListView):
model = Area
template_name = 'SecuriTree/hierarchy.html'
context_object_name = 'area_list'
def get_queryset(self):
return Area.objects.filter(parent_area__isnull=True).order_by('id')
class DoorManageView(LoginRequiredMixin,TemplateView):
template_name = 'SecuriTree/manage_doors.html'
class DoorsView(LoginRequiredMixin,generic.ListView):
model = Door
template_name = 'SecuriTree/all_doors.html'
context_object_name = 'door_list'
def get_queryset(self):
return Door.objects.all()
@login_required
def door_form(request):
r_action = request.GET['action']
if r_action == 'unlock':
action = 'unlock'
else:
action = 'lock'
return render(request, 'SecuriTree/door_form.html', {'action':action})
@login_required
def door_status(request):
door_id = request.POST['doorid']
status = request.POST['status']
door = get_object_or_404(Door, pk=door_id)
# door = Door.objects.filter(pk = door_id).first()
door.status = status;
door.save()
if status == 'closed':
msg = 'Door ' + door.id + ' successfully locked.'
else:
msg = 'Door ' + door.id + ' successfully unlocked.'
messages.success(request, msg)
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
| 27.972603 | 75 | 0.731636 | from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.template import loader
from django.contrib import messages
from django.views import generic
from django.views.generic.base import TemplateView
from django.utils import timezone
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from .models import Door, Area, AccessRule, User
class IndexView(TemplateView):
template_name = 'SecuriTree/index.html'
class HomeView(LoginRequiredMixin,TemplateView):
template_name = 'SecuriTree/home.html'
class HierarchyView(LoginRequiredMixin,generic.ListView):
model = Area
template_name = 'SecuriTree/hierarchy.html'
context_object_name = 'area_list'
def get_queryset(self):
return Area.objects.filter(parent_area__isnull=True).order_by('id')
class DoorManageView(LoginRequiredMixin,TemplateView):
template_name = 'SecuriTree/manage_doors.html'
class DoorsView(LoginRequiredMixin,generic.ListView):
model = Door
template_name = 'SecuriTree/all_doors.html'
context_object_name = 'door_list'
def get_queryset(self):
return Door.objects.all()
@login_required
def door_form(request):
r_action = request.GET['action']
if r_action == 'unlock':
action = 'unlock'
else:
action = 'lock'
return render(request, 'SecuriTree/door_form.html', {'action':action})
@login_required
def door_status(request):
door_id = request.POST['doorid']
status = request.POST['status']
door = get_object_or_404(Door, pk=door_id)
door.status = status;
door.save()
if status == 'closed':
msg = 'Door ' + door.id + ' successfully locked.'
else:
msg = 'Door ' + door.id + ' successfully unlocked.'
messages.success(request, msg)
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
| true | true |
f721628ea6b0b18873ff1f9593f52a8c6a6f14af | 6,273 | py | Python | thenewboston_node/business_logic/tests/test_file_blockchain/test_primary_validator.py | andbortnik/thenewboston-node | bd63c7def5f224286dba70f9560252a7da8ea712 | [
"MIT"
] | null | null | null | thenewboston_node/business_logic/tests/test_file_blockchain/test_primary_validator.py | andbortnik/thenewboston-node | bd63c7def5f224286dba70f9560252a7da8ea712 | [
"MIT"
] | null | null | null | thenewboston_node/business_logic/tests/test_file_blockchain/test_primary_validator.py | andbortnik/thenewboston-node | bd63c7def5f224286dba70f9560252a7da8ea712 | [
"MIT"
] | null | null | null | from thenewboston_node.business_logic.blockchain.file_blockchain import FileBlockchain
from thenewboston_node.business_logic.models import (
AccountState, Block, Node, NodeDeclarationSignedChangeRequest, PrimaryValidatorSchedule,
PrimaryValidatorScheduleSignedChangeRequest
)
from thenewboston_node.business_logic.node import get_node_signing_key
from thenewboston_node.business_logic.tests.baker_factories import baker
from thenewboston_node.core.utils.cryptography import generate_key_pair
def test_no_pv_schedule(blockchain_directory, blockchain_genesis_state):
blockchain = FileBlockchain(base_directory=blockchain_directory)
blockchain.add_blockchain_state(blockchain_genesis_state)
blockchain.validate()
assert blockchain.get_primary_validator() is None
assert blockchain.get_primary_validator(0) is None
assert blockchain.get_primary_validator(10) is None
def test_can_get_pv_from_blockchain_genesis_state(
blockchain_directory, blockchain_genesis_state, user_account_key_pair
):
blockchain = FileBlockchain(base_directory=blockchain_directory)
account_number = user_account_key_pair.public
node = baker.make(Node, identifier=account_number)
pv_schedule = baker.make(PrimaryValidatorSchedule, begin_block_number=0, end_block_number=99)
blockchain_genesis_state.account_states[account_number] = AccountState(
node=node, primary_validator_schedule=pv_schedule
)
blockchain.add_blockchain_state(blockchain_genesis_state)
blockchain.validate()
assert blockchain.get_primary_validator() == node
assert blockchain.get_primary_validator(0) == node
assert blockchain.get_primary_validator(10) == node
assert blockchain.get_primary_validator(99) == node
assert blockchain.get_primary_validator(100) is None
def test_can_get_pv_from_from_blocks(blockchain_directory, blockchain_genesis_state, user_account_key_pair):
blockchain = FileBlockchain(base_directory=blockchain_directory)
blockchain.add_blockchain_state(blockchain_genesis_state)
blockchain.validate()
signing_key = user_account_key_pair.private
request = NodeDeclarationSignedChangeRequest.create(
network_addresses=['https://127.0.0.1:8555/'], fee_amount=3, signing_key=signing_key
)
node = request.message.node
assert node.identifier
block = Block.create_from_signed_change_request(blockchain, request, get_node_signing_key())
blockchain.add_block(block)
request = PrimaryValidatorScheduleSignedChangeRequest.create(0, 99, signing_key)
block = Block.create_from_signed_change_request(blockchain, request, get_node_signing_key())
blockchain.add_block(block)
assert blockchain.get_primary_validator() == node
assert blockchain.get_primary_validator(0) == node
assert blockchain.get_primary_validator(10) == node
assert blockchain.get_primary_validator(99) == node
assert blockchain.get_primary_validator(100) is None
def test_can_get_node_from_genesis_state_and_pv_from_blocks(
blockchain_directory, blockchain_genesis_state, user_account_key_pair
):
blockchain = FileBlockchain(base_directory=blockchain_directory)
account_number = user_account_key_pair.public
node = baker.make(Node, identifier=account_number)
pv_schedule = baker.make(PrimaryValidatorSchedule, begin_block_number=0, end_block_number=99)
blockchain_genesis_state.account_states[account_number] = AccountState(
node=node, primary_validator_schedule=pv_schedule
)
blockchain.add_blockchain_state(blockchain_genesis_state)
blockchain.validate()
request = PrimaryValidatorScheduleSignedChangeRequest.create(0, 99, user_account_key_pair.private)
block = Block.create_from_signed_change_request(blockchain, request, get_node_signing_key())
blockchain.add_block(block)
assert blockchain.get_primary_validator() == node
assert blockchain.get_primary_validator(0) == node
assert blockchain.get_primary_validator(10) == node
assert blockchain.get_primary_validator(99) == node
assert blockchain.get_primary_validator(100) is None
def test_can_get_overridden_pv(blockchain_directory, blockchain_genesis_state, user_account_key_pair):
blockchain = FileBlockchain(base_directory=blockchain_directory)
account_number = user_account_key_pair.public
node = baker.make(Node, identifier=account_number)
pv_schedule = baker.make(PrimaryValidatorSchedule, begin_block_number=0, end_block_number=99)
blockchain_genesis_state.account_states[account_number] = AccountState(
node=node, primary_validator_schedule=pv_schedule
)
another_key_pair = generate_key_pair()
another_node = baker.make(Node, identifier=another_key_pair.public)
blockchain_genesis_state.account_states[another_key_pair.public] = AccountState(node=another_node)
blockchain.add_blockchain_state(blockchain_genesis_state)
blockchain.validate()
assert blockchain.get_primary_validator() == node
assert blockchain.get_primary_validator(0) == node
assert blockchain.get_primary_validator(10) == node
assert blockchain.get_primary_validator(99) == node
assert blockchain.get_primary_validator(100) is None
request = PrimaryValidatorScheduleSignedChangeRequest.create(0, 99, another_key_pair.private)
block = Block.create_from_signed_change_request(blockchain, request, get_node_signing_key())
blockchain.add_block(block)
assert blockchain.get_primary_validator() == another_node
assert blockchain.get_primary_validator(0) == another_node
assert blockchain.get_primary_validator(10) == another_node
assert blockchain.get_primary_validator(99) == another_node
assert blockchain.get_primary_validator(100) is None
request = PrimaryValidatorScheduleSignedChangeRequest.create(0, 99, user_account_key_pair.private)
block = Block.create_from_signed_change_request(blockchain, request, get_node_signing_key())
blockchain.add_block(block)
assert blockchain.get_primary_validator() == node
assert blockchain.get_primary_validator(0) == node
assert blockchain.get_primary_validator(10) == node
assert blockchain.get_primary_validator(99) == node
assert blockchain.get_primary_validator(100) is None
| 46.466667 | 108 | 0.809501 | from thenewboston_node.business_logic.blockchain.file_blockchain import FileBlockchain
from thenewboston_node.business_logic.models import (
AccountState, Block, Node, NodeDeclarationSignedChangeRequest, PrimaryValidatorSchedule,
PrimaryValidatorScheduleSignedChangeRequest
)
from thenewboston_node.business_logic.node import get_node_signing_key
from thenewboston_node.business_logic.tests.baker_factories import baker
from thenewboston_node.core.utils.cryptography import generate_key_pair
def test_no_pv_schedule(blockchain_directory, blockchain_genesis_state):
blockchain = FileBlockchain(base_directory=blockchain_directory)
blockchain.add_blockchain_state(blockchain_genesis_state)
blockchain.validate()
assert blockchain.get_primary_validator() is None
assert blockchain.get_primary_validator(0) is None
assert blockchain.get_primary_validator(10) is None
def test_can_get_pv_from_blockchain_genesis_state(
blockchain_directory, blockchain_genesis_state, user_account_key_pair
):
blockchain = FileBlockchain(base_directory=blockchain_directory)
account_number = user_account_key_pair.public
node = baker.make(Node, identifier=account_number)
pv_schedule = baker.make(PrimaryValidatorSchedule, begin_block_number=0, end_block_number=99)
blockchain_genesis_state.account_states[account_number] = AccountState(
node=node, primary_validator_schedule=pv_schedule
)
blockchain.add_blockchain_state(blockchain_genesis_state)
blockchain.validate()
assert blockchain.get_primary_validator() == node
assert blockchain.get_primary_validator(0) == node
assert blockchain.get_primary_validator(10) == node
assert blockchain.get_primary_validator(99) == node
assert blockchain.get_primary_validator(100) is None
def test_can_get_pv_from_from_blocks(blockchain_directory, blockchain_genesis_state, user_account_key_pair):
blockchain = FileBlockchain(base_directory=blockchain_directory)
blockchain.add_blockchain_state(blockchain_genesis_state)
blockchain.validate()
signing_key = user_account_key_pair.private
request = NodeDeclarationSignedChangeRequest.create(
network_addresses=['https://127.0.0.1:8555/'], fee_amount=3, signing_key=signing_key
)
node = request.message.node
assert node.identifier
block = Block.create_from_signed_change_request(blockchain, request, get_node_signing_key())
blockchain.add_block(block)
request = PrimaryValidatorScheduleSignedChangeRequest.create(0, 99, signing_key)
block = Block.create_from_signed_change_request(blockchain, request, get_node_signing_key())
blockchain.add_block(block)
assert blockchain.get_primary_validator() == node
assert blockchain.get_primary_validator(0) == node
assert blockchain.get_primary_validator(10) == node
assert blockchain.get_primary_validator(99) == node
assert blockchain.get_primary_validator(100) is None
def test_can_get_node_from_genesis_state_and_pv_from_blocks(
blockchain_directory, blockchain_genesis_state, user_account_key_pair
):
blockchain = FileBlockchain(base_directory=blockchain_directory)
account_number = user_account_key_pair.public
node = baker.make(Node, identifier=account_number)
pv_schedule = baker.make(PrimaryValidatorSchedule, begin_block_number=0, end_block_number=99)
blockchain_genesis_state.account_states[account_number] = AccountState(
node=node, primary_validator_schedule=pv_schedule
)
blockchain.add_blockchain_state(blockchain_genesis_state)
blockchain.validate()
request = PrimaryValidatorScheduleSignedChangeRequest.create(0, 99, user_account_key_pair.private)
block = Block.create_from_signed_change_request(blockchain, request, get_node_signing_key())
blockchain.add_block(block)
assert blockchain.get_primary_validator() == node
assert blockchain.get_primary_validator(0) == node
assert blockchain.get_primary_validator(10) == node
assert blockchain.get_primary_validator(99) == node
assert blockchain.get_primary_validator(100) is None
def test_can_get_overridden_pv(blockchain_directory, blockchain_genesis_state, user_account_key_pair):
blockchain = FileBlockchain(base_directory=blockchain_directory)
account_number = user_account_key_pair.public
node = baker.make(Node, identifier=account_number)
pv_schedule = baker.make(PrimaryValidatorSchedule, begin_block_number=0, end_block_number=99)
blockchain_genesis_state.account_states[account_number] = AccountState(
node=node, primary_validator_schedule=pv_schedule
)
another_key_pair = generate_key_pair()
another_node = baker.make(Node, identifier=another_key_pair.public)
blockchain_genesis_state.account_states[another_key_pair.public] = AccountState(node=another_node)
blockchain.add_blockchain_state(blockchain_genesis_state)
blockchain.validate()
assert blockchain.get_primary_validator() == node
assert blockchain.get_primary_validator(0) == node
assert blockchain.get_primary_validator(10) == node
assert blockchain.get_primary_validator(99) == node
assert blockchain.get_primary_validator(100) is None
request = PrimaryValidatorScheduleSignedChangeRequest.create(0, 99, another_key_pair.private)
block = Block.create_from_signed_change_request(blockchain, request, get_node_signing_key())
blockchain.add_block(block)
assert blockchain.get_primary_validator() == another_node
assert blockchain.get_primary_validator(0) == another_node
assert blockchain.get_primary_validator(10) == another_node
assert blockchain.get_primary_validator(99) == another_node
assert blockchain.get_primary_validator(100) is None
request = PrimaryValidatorScheduleSignedChangeRequest.create(0, 99, user_account_key_pair.private)
block = Block.create_from_signed_change_request(blockchain, request, get_node_signing_key())
blockchain.add_block(block)
assert blockchain.get_primary_validator() == node
assert blockchain.get_primary_validator(0) == node
assert blockchain.get_primary_validator(10) == node
assert blockchain.get_primary_validator(99) == node
assert blockchain.get_primary_validator(100) is None
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.