hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7158d1fdb4e339a2eeef76b607b2b96c5f92797 | 11,206 | py | Python | tensorflow/python/distribute/multi_process_runner_test.py | Diva-Pant/tensorflow | f926d8c10efb07176ae559d0e098cdfdb4d03219 | [
"Apache-2.0"
] | 78 | 2020-08-04T12:36:25.000Z | 2022-03-25T04:23:40.000Z | tensorflow/python/distribute/multi_process_runner_test.py | Diva-Pant/tensorflow | f926d8c10efb07176ae559d0e098cdfdb4d03219 | [
"Apache-2.0"
] | 1 | 2020-08-12T09:47:19.000Z | 2020-08-12T09:47:19.000Z | tensorflow/python/distribute/multi_process_runner_test.py | Diva-Pant/tensorflow | f926d8c10efb07176ae559d0e098cdfdb4d03219 | [
"Apache-2.0"
] | 25 | 2020-08-31T12:21:19.000Z | 2022-03-20T05:16:32.000Z | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `multi_process_runner`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import threading
import time
from absl import logging
from tensorflow.python.distribute import multi_process_runner
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.eager import test
def proc_func_that_adds_task_type_in_return_data():
return multi_worker_test_base.get_task_type()
def proc_func_that_errors():
raise ValueError('This is an error.')
def proc_func_that_does_nothing():
pass
def proc_func_that_adds_simple_return_data():
return 'dummy_data'
def proc_func_that_return_args_and_kwargs(*args, **kwargs):
return list(args) + list(kwargs.items())
def proc_func_with_barrier():
return multi_process_runner.barrier()
class MultiProcessRunnerTest(test.TestCase):
def _worker_idx(self):
config_task = json.loads(os.environ['TF_CONFIG'])['task']
return config_task['index']
def test_multi_process_runner(self):
mpr_result = multi_process_runner.run(
proc_func_that_adds_task_type_in_return_data,
multi_worker_test_base.create_cluster_spec(
num_workers=2, num_ps=3, has_eval=1))
job_count_dict = {'worker': 2, 'ps': 3, 'evaluator': 1}
for data in mpr_result.return_value:
job_count_dict[data] -= 1
self.assertEqual(job_count_dict['worker'], 0)
self.assertEqual(job_count_dict['ps'], 0)
self.assertEqual(job_count_dict['evaluator'], 0)
def test_multi_process_runner_error_propagates_from_subprocesses(self):
runner = multi_process_runner.MultiProcessRunner(
proc_func_that_errors,
multi_worker_test_base.create_cluster_spec(num_workers=1, num_ps=1),
max_run_time=20)
runner.start()
with self.assertRaisesRegexp(ValueError, 'This is an error.'):
runner.join()
def test_multi_process_runner_queue_emptied_between_runs(self):
cluster_spec = multi_worker_test_base.create_cluster_spec(num_workers=2)
return_value = multi_process_runner.run(
proc_func_that_adds_simple_return_data, cluster_spec).return_value
self.assertTrue(return_value)
self.assertEqual(return_value[0], 'dummy_data')
self.assertEqual(return_value[1], 'dummy_data')
return_value = multi_process_runner.run(proc_func_that_does_nothing,
cluster_spec).return_value
self.assertFalse(return_value)
def test_multi_process_runner_args_passed_correctly(self):
return_value = multi_process_runner.run(
proc_func_that_return_args_and_kwargs,
multi_worker_test_base.create_cluster_spec(num_workers=1),
args=('a', 'b'),
kwargs={
'c_k': 'c_v'
}).return_value
self.assertEqual(return_value[0][0], 'a')
self.assertEqual(return_value[0][1], 'b')
self.assertEqual(return_value[0][2], ('c_k', 'c_v'))
def test_stdout_captured(self):
def simple_print_func():
print('This is something printed.', flush=True)
return 'This is returned data.'
mpr_result = multi_process_runner.run(
simple_print_func,
multi_worker_test_base.create_cluster_spec(num_workers=2),
list_stdout=True)
std_stream_results = mpr_result.stdout
return_value = mpr_result.return_value
self.assertIn('[worker-0]: This is something printed.\n',
std_stream_results)
self.assertIn('[worker-1]: This is something printed.\n',
std_stream_results)
self.assertIn('This is returned data.', return_value)
def test_process_that_exits(self):
def func_to_exit_in_25_sec():
logging.error('foo')
time.sleep(100)
logging.error('bar')
mpr = multi_process_runner.MultiProcessRunner(
func_to_exit_in_25_sec,
multi_worker_test_base.create_cluster_spec(num_workers=1),
list_stdout=True,
max_run_time=25)
mpr.start()
stdout = mpr.join().stdout
self.assertLen([msg for msg in stdout if 'foo' in msg], 1)
self.assertLen([msg for msg in stdout if 'bar' in msg], 0)
def test_termination(self):
def proc_func():
for i in range(0, 10):
print(
'index {}, iteration {}'.format(self._worker_idx(), i), flush=True)
time.sleep(5)
mpr = multi_process_runner.MultiProcessRunner(
proc_func,
multi_worker_test_base.create_cluster_spec(num_workers=2),
list_stdout=True)
mpr.start()
time.sleep(5)
mpr.terminate('worker', 0)
std_stream_results = mpr.join().stdout
# Worker 0 is terminated in the middle, so it should not have iteration 9
# printed.
self.assertIn('[worker-0]: index 0, iteration 0\n', std_stream_results)
self.assertNotIn('[worker-0]: index 0, iteration 9\n',
std_stream_results)
self.assertIn('[worker-1]: index 1, iteration 0\n', std_stream_results)
self.assertIn('[worker-1]: index 1, iteration 9\n', std_stream_results)
def test_termination_and_start_single_process(self):
def proc_func():
for i in range(0, 10):
print(
'index {}, iteration {}'.format(self._worker_idx(), i), flush=True)
time.sleep(1)
mpr = multi_process_runner.MultiProcessRunner(
proc_func,
multi_worker_test_base.create_cluster_spec(num_workers=2),
list_stdout=True)
mpr.start()
time.sleep(3)
mpr.terminate('worker', 0)
mpr.start_single_process('worker', 0)
std_stream_results = mpr.join().stdout
# Worker 0 is terminated in the middle, but a new worker 0 is added, so it
# should still have iteration 9 printed. Moreover, iteration 0 of worker 0
# should happen twice.
self.assertLen(
[s for s in std_stream_results if 'index 0, iteration 0' in s], 2)
self.assertIn('[worker-0]: index 0, iteration 9\n', std_stream_results)
self.assertIn('[worker-1]: index 1, iteration 0\n', std_stream_results)
self.assertIn('[worker-1]: index 1, iteration 9\n', std_stream_results)
def test_streaming(self):
def proc_func():
for i in range(5):
logging.info('(logging) %s-%d, i: %d',
multi_worker_test_base.get_task_type(), self._worker_idx(),
i)
print(
'(print) {}-{}, i: {}'.format(
multi_worker_test_base.get_task_type(), self._worker_idx(), i),
flush=True)
time.sleep(1)
mpr = multi_process_runner.MultiProcessRunner(
proc_func,
multi_worker_test_base.create_cluster_spec(
has_chief=True, num_workers=2, num_ps=2, has_eval=True),
list_stdout=True)
mpr._dependence_on_chief = False
mpr.start()
mpr.start_single_process('worker', 2)
mpr.start_single_process('ps', 2)
mpr_result = mpr.join()
list_to_assert = mpr_result.stdout
for job in ['chief', 'evaluator']:
for iteration in range(5):
self.assertTrue(
any('(logging) {}-0, i: {}'.format(job, iteration) in line
for line in list_to_assert))
self.assertTrue(
any('(print) {}-0, i: {}'.format(job, iteration) in line
for line in list_to_assert))
for job in ['worker', 'ps']:
for iteration in range(5):
for task in range(3):
self.assertTrue(
any('(logging) {}-{}, i: {}'.format(job, task, iteration) in line
for line in list_to_assert))
self.assertTrue(
any('(print) {}-{}, i: {}'.format(job, task, iteration) in line
for line in list_to_assert))
task = 3
self.assertFalse(
any('(logging) {}-{}, i: {}'.format(job, task, iteration) in line
for line in list_to_assert))
self.assertFalse(
any('(print) {}-{}, i: {}'.format(job, task, iteration) in line
for line in list_to_assert))
def test_start_in_process_as(self):
def proc_func():
for i in range(5):
logging.info('%s-%d, i: %d', multi_worker_test_base.get_task_type(),
self._worker_idx(), i)
time.sleep(1)
mpr = multi_process_runner.MultiProcessRunner(
proc_func,
multi_worker_test_base.create_cluster_spec(
has_chief=True, num_workers=1),
list_stdout=True)
def eval_func():
time.sleep(1)
mpr.start_single_process(task_type='evaluator', task_id=0)
eval_thread = threading.Thread(target=eval_func)
eval_thread.start()
mpr.start_in_process_as(as_task_type='chief', as_task_id=0)
eval_thread.join()
list_to_assert = mpr.join().stdout
for job in ['worker', 'evaluator']:
for iteration in range(5):
self.assertTrue(
any('{}-0, i: {}'.format(job, iteration) in line
for line in list_to_assert))
def test_terminate_all_does_not_ignore_error(self):
mpr = multi_process_runner.MultiProcessRunner(
proc_func_that_errors,
multi_worker_test_base.create_cluster_spec(num_workers=2),
list_stdout=True)
mpr.start()
time.sleep(60)
mpr.terminate_all()
with self.assertRaisesRegexp(ValueError, 'This is an error.'):
mpr.join()
def test_barrier(self):
multi_process_runner.run(
proc_func_with_barrier,
cluster_spec=multi_worker_test_base.create_cluster_spec(
has_chief=True, num_workers=1),
)
def test_barrier_called_in_main_process(self):
with self.assertRaises(ValueError):
multi_process_runner.barrier()
def test_stdout_available_when_timeout(self):
def proc_func():
for i in range(50):
logging.info('(logging) %s-%d, i: %d',
multi_worker_test_base.get_task_type(), self._worker_idx(),
i)
time.sleep(1)
with self.assertRaises(multi_process_runner.SubprocessTimeoutError) as cm:
multi_process_runner.run(
proc_func,
multi_worker_test_base.create_cluster_spec(num_workers=1, num_ps=1),
list_stdout=True,
timeout=5)
list_to_assert = cm.exception.mpr_result.stdout
for job in ['worker', 'ps']:
for iteration in range(0, 5):
self.assertTrue(
any('(logging) {}-0, i: {}'.format(job, iteration) in line
for line in list_to_assert))
if __name__ == '__main__':
multi_process_runner.test_main()
| 34.374233 | 80 | 0.663127 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import threading
import time
from absl import logging
from tensorflow.python.distribute import multi_process_runner
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.eager import test
def proc_func_that_adds_task_type_in_return_data():
return multi_worker_test_base.get_task_type()
def proc_func_that_errors():
raise ValueError('This is an error.')
def proc_func_that_does_nothing():
pass
def proc_func_that_adds_simple_return_data():
return 'dummy_data'
def proc_func_that_return_args_and_kwargs(*args, **kwargs):
return list(args) + list(kwargs.items())
def proc_func_with_barrier():
return multi_process_runner.barrier()
class MultiProcessRunnerTest(test.TestCase):
def _worker_idx(self):
config_task = json.loads(os.environ['TF_CONFIG'])['task']
return config_task['index']
def test_multi_process_runner(self):
mpr_result = multi_process_runner.run(
proc_func_that_adds_task_type_in_return_data,
multi_worker_test_base.create_cluster_spec(
num_workers=2, num_ps=3, has_eval=1))
job_count_dict = {'worker': 2, 'ps': 3, 'evaluator': 1}
for data in mpr_result.return_value:
job_count_dict[data] -= 1
self.assertEqual(job_count_dict['worker'], 0)
self.assertEqual(job_count_dict['ps'], 0)
self.assertEqual(job_count_dict['evaluator'], 0)
def test_multi_process_runner_error_propagates_from_subprocesses(self):
runner = multi_process_runner.MultiProcessRunner(
proc_func_that_errors,
multi_worker_test_base.create_cluster_spec(num_workers=1, num_ps=1),
max_run_time=20)
runner.start()
with self.assertRaisesRegexp(ValueError, 'This is an error.'):
runner.join()
def test_multi_process_runner_queue_emptied_between_runs(self):
cluster_spec = multi_worker_test_base.create_cluster_spec(num_workers=2)
return_value = multi_process_runner.run(
proc_func_that_adds_simple_return_data, cluster_spec).return_value
self.assertTrue(return_value)
self.assertEqual(return_value[0], 'dummy_data')
self.assertEqual(return_value[1], 'dummy_data')
return_value = multi_process_runner.run(proc_func_that_does_nothing,
cluster_spec).return_value
self.assertFalse(return_value)
def test_multi_process_runner_args_passed_correctly(self):
return_value = multi_process_runner.run(
proc_func_that_return_args_and_kwargs,
multi_worker_test_base.create_cluster_spec(num_workers=1),
args=('a', 'b'),
kwargs={
'c_k': 'c_v'
}).return_value
self.assertEqual(return_value[0][0], 'a')
self.assertEqual(return_value[0][1], 'b')
self.assertEqual(return_value[0][2], ('c_k', 'c_v'))
def test_stdout_captured(self):
def simple_print_func():
print('This is something printed.', flush=True)
return 'This is returned data.'
mpr_result = multi_process_runner.run(
simple_print_func,
multi_worker_test_base.create_cluster_spec(num_workers=2),
list_stdout=True)
std_stream_results = mpr_result.stdout
return_value = mpr_result.return_value
self.assertIn('[worker-0]: This is something printed.\n',
std_stream_results)
self.assertIn('[worker-1]: This is something printed.\n',
std_stream_results)
self.assertIn('This is returned data.', return_value)
def test_process_that_exits(self):
def func_to_exit_in_25_sec():
logging.error('foo')
time.sleep(100)
logging.error('bar')
mpr = multi_process_runner.MultiProcessRunner(
func_to_exit_in_25_sec,
multi_worker_test_base.create_cluster_spec(num_workers=1),
list_stdout=True,
max_run_time=25)
mpr.start()
stdout = mpr.join().stdout
self.assertLen([msg for msg in stdout if 'foo' in msg], 1)
self.assertLen([msg for msg in stdout if 'bar' in msg], 0)
def test_termination(self):
def proc_func():
for i in range(0, 10):
print(
'index {}, iteration {}'.format(self._worker_idx(), i), flush=True)
time.sleep(5)
mpr = multi_process_runner.MultiProcessRunner(
proc_func,
multi_worker_test_base.create_cluster_spec(num_workers=2),
list_stdout=True)
mpr.start()
time.sleep(5)
mpr.terminate('worker', 0)
std_stream_results = mpr.join().stdout
self.assertIn('[worker-0]: index 0, iteration 0\n', std_stream_results)
self.assertNotIn('[worker-0]: index 0, iteration 9\n',
std_stream_results)
self.assertIn('[worker-1]: index 1, iteration 0\n', std_stream_results)
self.assertIn('[worker-1]: index 1, iteration 9\n', std_stream_results)
def test_termination_and_start_single_process(self):
def proc_func():
for i in range(0, 10):
print(
'index {}, iteration {}'.format(self._worker_idx(), i), flush=True)
time.sleep(1)
mpr = multi_process_runner.MultiProcessRunner(
proc_func,
multi_worker_test_base.create_cluster_spec(num_workers=2),
list_stdout=True)
mpr.start()
time.sleep(3)
mpr.terminate('worker', 0)
mpr.start_single_process('worker', 0)
std_stream_results = mpr.join().stdout
self.assertLen(
[s for s in std_stream_results if 'index 0, iteration 0' in s], 2)
self.assertIn('[worker-0]: index 0, iteration 9\n', std_stream_results)
self.assertIn('[worker-1]: index 1, iteration 0\n', std_stream_results)
self.assertIn('[worker-1]: index 1, iteration 9\n', std_stream_results)
def test_streaming(self):
def proc_func():
for i in range(5):
logging.info('(logging) %s-%d, i: %d',
multi_worker_test_base.get_task_type(), self._worker_idx(),
i)
print(
'(print) {}-{}, i: {}'.format(
multi_worker_test_base.get_task_type(), self._worker_idx(), i),
flush=True)
time.sleep(1)
mpr = multi_process_runner.MultiProcessRunner(
proc_func,
multi_worker_test_base.create_cluster_spec(
has_chief=True, num_workers=2, num_ps=2, has_eval=True),
list_stdout=True)
mpr._dependence_on_chief = False
mpr.start()
mpr.start_single_process('worker', 2)
mpr.start_single_process('ps', 2)
mpr_result = mpr.join()
list_to_assert = mpr_result.stdout
for job in ['chief', 'evaluator']:
for iteration in range(5):
self.assertTrue(
any('(logging) {}-0, i: {}'.format(job, iteration) in line
for line in list_to_assert))
self.assertTrue(
any('(print) {}-0, i: {}'.format(job, iteration) in line
for line in list_to_assert))
for job in ['worker', 'ps']:
for iteration in range(5):
for task in range(3):
self.assertTrue(
any('(logging) {}-{}, i: {}'.format(job, task, iteration) in line
for line in list_to_assert))
self.assertTrue(
any('(print) {}-{}, i: {}'.format(job, task, iteration) in line
for line in list_to_assert))
task = 3
self.assertFalse(
any('(logging) {}-{}, i: {}'.format(job, task, iteration) in line
for line in list_to_assert))
self.assertFalse(
any('(print) {}-{}, i: {}'.format(job, task, iteration) in line
for line in list_to_assert))
def test_start_in_process_as(self):
def proc_func():
for i in range(5):
logging.info('%s-%d, i: %d', multi_worker_test_base.get_task_type(),
self._worker_idx(), i)
time.sleep(1)
mpr = multi_process_runner.MultiProcessRunner(
proc_func,
multi_worker_test_base.create_cluster_spec(
has_chief=True, num_workers=1),
list_stdout=True)
def eval_func():
time.sleep(1)
mpr.start_single_process(task_type='evaluator', task_id=0)
eval_thread = threading.Thread(target=eval_func)
eval_thread.start()
mpr.start_in_process_as(as_task_type='chief', as_task_id=0)
eval_thread.join()
list_to_assert = mpr.join().stdout
for job in ['worker', 'evaluator']:
for iteration in range(5):
self.assertTrue(
any('{}-0, i: {}'.format(job, iteration) in line
for line in list_to_assert))
def test_terminate_all_does_not_ignore_error(self):
mpr = multi_process_runner.MultiProcessRunner(
proc_func_that_errors,
multi_worker_test_base.create_cluster_spec(num_workers=2),
list_stdout=True)
mpr.start()
time.sleep(60)
mpr.terminate_all()
with self.assertRaisesRegexp(ValueError, 'This is an error.'):
mpr.join()
def test_barrier(self):
multi_process_runner.run(
proc_func_with_barrier,
cluster_spec=multi_worker_test_base.create_cluster_spec(
has_chief=True, num_workers=1),
)
def test_barrier_called_in_main_process(self):
with self.assertRaises(ValueError):
multi_process_runner.barrier()
def test_stdout_available_when_timeout(self):
def proc_func():
for i in range(50):
logging.info('(logging) %s-%d, i: %d',
multi_worker_test_base.get_task_type(), self._worker_idx(),
i)
time.sleep(1)
with self.assertRaises(multi_process_runner.SubprocessTimeoutError) as cm:
multi_process_runner.run(
proc_func,
multi_worker_test_base.create_cluster_spec(num_workers=1, num_ps=1),
list_stdout=True,
timeout=5)
list_to_assert = cm.exception.mpr_result.stdout
for job in ['worker', 'ps']:
for iteration in range(0, 5):
self.assertTrue(
any('(logging) {}-0, i: {}'.format(job, iteration) in line
for line in list_to_assert))
if __name__ == '__main__':
multi_process_runner.test_main()
| true | true |
f7158d5e6cf2a8dfdb996beebce53453c96ec708 | 281 | py | Python | lambda/index.py | sano307/lambda-container-demo | 6c27c56819c9a3defb63bf26b4fd53bf6cdb71d3 | [
"MIT"
] | null | null | null | lambda/index.py | sano307/lambda-container-demo | 6c27c56819c9a3defb63bf26b4fd53bf6cdb71d3 | [
"MIT"
] | null | null | null | lambda/index.py | sano307/lambda-container-demo | 6c27c56819c9a3defb63bf26b4fd53bf6cdb71d3 | [
"MIT"
] | 1 | 2021-07-18T03:52:40.000Z | 2021-07-18T03:52:40.000Z | import json
import pandas as pd
def handler(event, context):
df = pd.DataFrame({"id": [1, 2], "value": ["foo", "boo"]})
print(df)
return {
"statusCode": 200,
"body": json.dumps({
"message": "This is a container lambda."
})
}
| 17.5625 | 62 | 0.512456 | import json
import pandas as pd
def handler(event, context):
df = pd.DataFrame({"id": [1, 2], "value": ["foo", "boo"]})
print(df)
return {
"statusCode": 200,
"body": json.dumps({
"message": "This is a container lambda."
})
}
| true | true |
f7158d9cf34dc0b5ca5dc19e15a61f7fd3e08c77 | 12,588 | py | Python | test.py | trs123s/ModernFarming | 28f99c090ed041486c3c3bbae1054cc9279261bd | [
"MIT"
] | null | null | null | test.py | trs123s/ModernFarming | 28f99c090ed041486c3c3bbae1054cc9279261bd | [
"MIT"
] | null | null | null | test.py | trs123s/ModernFarming | 28f99c090ed041486c3c3bbae1054cc9279261bd | [
"MIT"
] | null | null | null | import tkinter as tk
from tkinter.ttk import *
import sqlite3
from tkinter import *
'''
import speech_recognition as sr # for speech recognition to play songs
import pyttsx3 as tts # python module for speech
engine = tts.init()
volume = engine.getProperty('volume')
engine.setProperty('volume',0.75)
voices = engine.getProperty('voices')
rate = engine.getProperty('rate')
engine.setProperty('voice', voices[0].id)
engine.setProperty('rate', 150)
'''
root = tk.Tk()
root.title("DataBase Manager by Mohit Gupta")
root.geometry("800x640")
#-------------------------create text box--------------------------------------------
songs = Entry(root, width=50)
songs.grid(row=8,column=1,pady=5)
age0_2 = Entry(root, width=50)
age0_2.grid(row=9, column=1,pady=5)
age4_6 = Entry(root, width=50)
age4_6.grid(row=10, column=1,pady=5)
age8_12 = Entry(root, width=50)
age8_12.grid(row=11, column=1,pady=5)
age15_20 = Entry(root, width=50)
age15_20.grid(row=12, column=1,pady=5)
age25_32 = Entry(root, width=50)
age25_32.grid(row=13, column=1,pady=5)
age38_43 = Entry(root, width=50)
age38_43.grid(row=14, column=1,pady=5)
age48_53 = Entry(root, width=50)
age48_53.grid(row=15, column=1,pady=5)
age60_100 = Entry(root, width=50)
age60_100.grid(row=16, column=1,pady=5)
singer_name = Entry(root, width=50)
singer_name.grid(row=17, column=1,pady=5)
h = Entry(root, width=50)
h.grid(row=18, column=1,pady=5)
s = Entry(root, width=50)
s.grid(row=19, column=1,pady=5)
a = Entry(root, width=50)
a.grid(row=20, column=1,pady=5)
cr = Entry(root, width=50)
cr.grid(row=21, column=1,pady=5)
su = Entry(root, width=50)
su.grid(row=22, column=1,pady=5)
delete = Entry(root, width=20)
delete.grid(row=11, column=3, pady=5)
#--------------------------------create text box label--------------------------------------------
songs_label = Label(root, text="Songs",padx=5)
songs_label.grid(row=8, column=0)
age0_2_label = Label(root, text="Age0_2",padx=5)
age0_2_label.grid(row=9, column=0)
age4_6_label = Label(root, text="Age4_6",padx=5)
age4_6_label.grid(row=10, column=0)
age8_12_label = Label(root, text="Age8_12",padx=5)
age8_12_label.grid(row=11, column=0)
age15_20_label = Label(root, text="Age15_20",padx=5)
age15_20_label.grid(row=12, column=0)
age25_32_label = Label(root, text="Age25_32",padx=5)
age25_32_label.grid(row=13, column=0)
age38_43_label = Label(root, text="Age38_43",padx=5)
age38_43_label.grid(row=14, column=0)
age48_53_label = Label(root, text="Age48_53",padx=5)
age48_53_label.grid(row=15, column=0)
age60_100_label = Label(root, text="Age60_100",padx=5)
age60_100_label.grid(row=16, column=0)
singer_name_label = Label(root, text="singer",padx=5)
singer_name_label.grid(row=17, column=0)
h_label = Label(root, text="Happy",padx=5)
h_label.grid(row=18, column=0)
s_label = Label(root, text="Sad",padx=5)
s_label.grid(row=19, column=0)
a_label = Label(root, text="Angry",padx=5)
a_label.grid(row=20, column=0)
cr_label = Label(root, text="cry",padx=5)
cr_label.grid(row=21, column=0)
su_label = Label(root, text="Surprise",padx=5)
su_label.grid(row=22, column=0)
delete_label = Label(root, text="Select ID")
delete_label.grid(row=11, column=2, pady=10)
#----------------------------info---------------------------------------
def update():
conn = sqlite3.connect('music4.db')
c = conn.cursor()
item_id = delete.get() #b3 is delete button
c.execute("""UPDATE music SET
songs = :songs,
age0_2 = :age0_2,
age4_6 = :age4_6,
age8_12 = :age8_12,
age15_20 = :age15_20,
age25_32 = :age25_32,
age38_43 = :age38_43,
age48_53 = :age48_53,
age60_100 = :age60_100,
singer_name = :singer_name,
happy = :h,
sad = :s,
angry = :a,
cry = :cr,
surprise = :su,
WHERE oid = :oid""",
{
'songs': songs_editor.get(),
'age0_2': age0_2_editor.get(),
'age4_6': age4_6_editor.get(),
'age8_12': age8_12_editor.get(),
'age15_20': age15_20_editor.get(),
'age25_32': age25_32_editor.get(),
'age38_43': age38_43_editor.get(),
'age48_53': age48_53_editor.get(),
'age60_100': age60_100_editor.get(),
'singer_name': singer_name_editor.get(),
'h': h_editor.get(),
's': s_editor.get(),
'a': a_editor.get(),
'cr': cr_editor.get(),
'su': su_editor.get(),
'oid': item_id
})
conn.commit()
conn.close()
def edit():
editor = Tk()
editor.title("Information")
editor.geometry("600x640")
conn = sqlite3.connect('music4.db')
c = conn.cursor()
item_id = delete.get()
c.execute("SELECT * FROM music WHERE oid = "+ item_id )
items = c.fetchall()
songs_editor = Entry(editor, width=50)
songs_editor.grid(row=8,column=1,pady=5)
age0_2_editor = Entry(editor, width=50)
age0_2_editor.grid(row=9, column=1,pady=5)
age4_6_editor = Entry(editor, width=50)
age4_6_editor.grid(row=10, column=1,pady=5)
age8_12_editor = Entry(editor, width=50)
age8_12_editor.grid(row=11, column=1,pady=5)
age15_20_editor = Entry(editor, width=50)
age15_20_editor.grid(row=12, column=1,pady=5)
age25_32_editor = Entry(editor, width=50)
age25_32_editor.grid(row=13, column=1,pady=5)
age38_43_editor = Entry(editor, width=50)
age38_43_editor.grid(row=14, column=1,pady=5)
age48_53_editor = Entry(editor, width=50)
age48_53_editor.grid(row=15, column=1,pady=5)
age60_100_editor = Entry(editor, width=50)
age60_100_editor.grid(row=16, column=1,pady=5)
singer_name_editor = Entry(editor, width=50)
singer_name_editor.grid(row=17, column=1,pady=5)
h_editor = Entry(editor, width=50)
h_editor.grid(row=17, column=1,pady=5)
s_editor = Entry(editor, width=50)
s_editor.grid(row=18, column=1,pady=5)
a_editor = Entry(editor, width=50)
a_editor.grid(row=19, column=1,pady=5)
cr_editor = Entry(editor, width=50)
cr_editor.grid(row=20, column=1,pady=5)
su_editor= Entry(editor, width=50)
su_editor.grid(row=21, column=1,pady=5)
#--------------------------------create text box label--------------------------------------------
songs_label = Label(editor, text="Songs",padx=5)
songs_label.grid(row=8, column=0)
age0_2_label = Label(editor, text="Age0_2",padx=5)
age0_2_label.grid(row=9, column=0)
age4_6_label = Label(editor, text="Age0_2",padx=5)
age4_6_label.grid(row=10, column=0)
age8_12_label = Label(editor, text="Age0_2",padx=5)
age8_12_label.grid(row=11, column=0)
age15_20_label = Label(editor, text="Age0_2",padx=5)
age15_20_label.grid(row=12, column=0)
age25_32_label = Label(editor, text="Age0_2",padx=5)
age25_32_label.grid(row=13, column=0)
age38_43_label = Label(editor, text="Age0_2",padx=5)
age38_43_label.grid(row=14, column=0)
age48_53_label = Label(editor, text="Age0_2",padx=5)
age48_53_label.grid(row=15, column=0)
age60_100_label = Label(editor, text="Age0_2",padx=5)
age60_100_label.grid(row=16, column=0)
singer_name_label = Label(editor, text="Age0_2",padx=5)
singer_name_label.grid(row=17, column=0)
h_label = Label(editor, text="Happy",padx=5)
h_label.grid(row=17, column=0)
s_label = Label(editor, text="Sad",padx=5)
s_label.grid(row=18, column=0)
a_label = Label(editor, text="Angry",padx=5)
a_label.grid(row=19, column=0)
cr_label = Label(editor, text="cry",padx=5)
cr_label.grid(row=20, column=0)
su_label = Label(editor, text="Surprise",padx=5)
su_label.grid(row=21, column=0)
for item in items:
songs_editor.insert(0, item[0])
age0_2_editor.insert(0, item[1])
age4_6_editor.insert(0, item[2])
age8_12_editor.insert(0, item[3])
age15_20_editor.insert(0, item[4])
age25_32_editor.insert(0, item[5])
age38_43_editor.insert(0, item[6])
age48_53_editor.insert(0, item[7])
age60_100_editor.insert(0, item[8])
singer_name_editor.insert(0, item[9])
h_editor.insert(0, item[10])
s_editor.insert(0, item[11])
a_editor.insert(0, item[12])
cr_editor.insert(0, item[13])
su_editor.insert(0, item[14])
b4_edit = Button(editor, text = "Info",padx=50,fg="white",pady=5,bg="blue")
b4_edit.grid(row=34, column=1)
#--------------------------------ADD TO DATABASE FUNCTION----------------------------
#add a new record to the table
def add_one():
conn = sqlite3.connect('music4.db')
c = conn.cursor()
c.execute("INSERT INTO music VALUES (:songs, :age0_2, :age4_6, :age8_12, :age15_20, :age25_32, :age38_43, :age48_53, :age60_100, :singer_name, :h, :s, :a, :cr, :su)",
{
'songs': songs.get(),
'age0_2': age0_2.get(),
'age4_6': age4_6.get(),
'age8_12': age8_12.get(),
'age15_20': age15_20.get(),
'age25_32': age25_32.get(),
'age38_43': age38_43.get(),
'age48_53': age48_53.get(),
'age60_100': age60_100.get(),
'singer_name': singer_name.get(),
'h': h.get(),
's': s.get(),
'a': a.get(),
'cr': cr.get(),
'su': su.get()
})
songs.delete(0, END)
age0_2.delete(0, END)
age4_6.delete(0, END)
age8_12.delete(0, END)
age15_20.delete(0, END)
age25_32.delete(0, END)
age38_43.delete(0, END)
age48_53.delete(0, END)
age60_100.delete(0, END)
singer_name.delete(0, END)
h.delete(0, END)
s.delete(0, END)
a.delete(0, END)
cr.delete(0, END)
su.delete(0, END)
conn.commit()
conn.close()
'''
def show_allsongs():
conn = sqlite3.connect('music4.db')
c = conn.cursor()
c.execute("SELECT rowid, * FROM music WHERE age25_32 LIKE '1' AND happy LIKE '1'")
items = c.fetchall()
#print(str(items))
SELECT rowid, * FROM music
for item in items:
# print(item)
print(str(item[0]) + "\t\t" + str(item[1]) + "\t\t" + str(item[2]) + "\t\t" + str(item[3]) + "\t\t" + str(item[4])+ "\t\t" + str(item[5] + str(item[6]) + "\t\t" + str(item[7]) + "\t\t" + str(item[8]) + "\t\t" + str(item[9]) + "\t\t" + str(item[10])+ "\t\t" + str(item[11])+ str(item[12])+ "\t\t" + str(item[13]))
# conn.commit()
# conn.close()
'''
def show_allSongs():
conn = sqlite3.connect('music4.db')
c = conn.cursor()
c.execute("SELECT rowid, * FROM music")
items = c.fetchall()
# print(items)
print_items = ''
for item in items:
print_items = str(item[0]) + " " + str(item[1]) + "\t\t\t|" + str(item[2]) + " " + str(item[3]) + " " + str(item[4])+ " " + str(item[5]) + " " + str(item[6]) + " " + str(item[7]) + " " + str(item[8]) + " " + str(item[9]) + " | " + str(item[11]) + " " + str(item[12]) + " " + str(item[13]) + " " + str(item[14]) + " " + str(item[15]) + " | " + str(item[10])
#print_items += "\n"
print(print_items)
print("---------------------------------------------------------------------------------------------------\n")
# b8_label = Label(root, text=print_items)
# b8_label.grid(row=22, column=0,columnspan=2)
conn.commit()
conn.close()
#--------------------------------------------------------------------------------------
######CREATE A TABLE FUNCTION
def create_table():
conn = sqlite3.connect('music4.db')
c = conn.cursor()
c.execute("""CREATE TABLE music(
songs text,
age0_2 text,
age4_6 text,
age8_12 text,
age15_20 text,
age25_32 text,
age38_43 text,
age48_53 text,
age60_100 text,
singer_name text,
happy text,
sad text,
cry text,
angry text,
surprise text
)""")
conn.commit()
conn.close()
#--------------------------------deleting record---------------------------------------------
def delete_one():
conn = sqlite3.connect('music4.db')
c = conn.cursor()
c.execute("DELETE from music WHERE rowid = " + delete.get())
delete.delete(0, END)
conn.commit()
conn.close()
label1 = Label(root, text = " ",pady=10)
b1 = Button(root, text = "Create table", command = create_table,padx=35,fg="white",pady=5,bg="green")
b2 = Button(root, text = "AddToDatabase", command = add_one,padx=25,fg="white",pady=5,bg="orange")
b3 = Button(root, text = "Delete", command = delete_one,padx=52.3,fg="white",pady=5,bg="Red")
b4 = Button(root, text = "Info", command = edit,padx=50,fg="white",pady=5,bg="blue")
b5 = Button(root, text = "DataBase Management System by Mohit Gupta",padx=125,pady=10,fg="White",bg="black")
b6 = Button(root, text = "Tools",state=DISABLED,padx=55,pady=10)
b7 = Button(root, text = "# USE FOR ADD TOOL ",state=DISABLED,padx=30,pady=10)
b8 = Button(root, text = "Displayall", command = show_allSongs,padx=25,fg="white",pady=5,bg="orange")
label1.grid(row=2, column=3)
b1.grid(row=6, column=3)
b2.grid(row=23, column=1)
b3.grid(row=12, column=3)
b4.grid(row=14, column=3)
b5.grid(row=1, column=1)
b6.grid(row=1, column=3)
b7.grid(row=2, column=1)
b8.grid(row=25, column=1)
# calling mainloop method which is used
# when your application is ready to run
# and it tells the code to keep displaying
root.mainloop() | 33.03937 | 398 | 0.640133 | import tkinter as tk
from tkinter.ttk import *
import sqlite3
from tkinter import *
root = tk.Tk()
root.title("DataBase Manager by Mohit Gupta")
root.geometry("800x640")
songs = Entry(root, width=50)
songs.grid(row=8,column=1,pady=5)
age0_2 = Entry(root, width=50)
age0_2.grid(row=9, column=1,pady=5)
age4_6 = Entry(root, width=50)
age4_6.grid(row=10, column=1,pady=5)
age8_12 = Entry(root, width=50)
age8_12.grid(row=11, column=1,pady=5)
age15_20 = Entry(root, width=50)
age15_20.grid(row=12, column=1,pady=5)
age25_32 = Entry(root, width=50)
age25_32.grid(row=13, column=1,pady=5)
age38_43 = Entry(root, width=50)
age38_43.grid(row=14, column=1,pady=5)
age48_53 = Entry(root, width=50)
age48_53.grid(row=15, column=1,pady=5)
age60_100 = Entry(root, width=50)
age60_100.grid(row=16, column=1,pady=5)
singer_name = Entry(root, width=50)
singer_name.grid(row=17, column=1,pady=5)
h = Entry(root, width=50)
h.grid(row=18, column=1,pady=5)
s = Entry(root, width=50)
s.grid(row=19, column=1,pady=5)
a = Entry(root, width=50)
a.grid(row=20, column=1,pady=5)
cr = Entry(root, width=50)
cr.grid(row=21, column=1,pady=5)
su = Entry(root, width=50)
su.grid(row=22, column=1,pady=5)
delete = Entry(root, width=20)
delete.grid(row=11, column=3, pady=5)
songs_label = Label(root, text="Songs",padx=5)
songs_label.grid(row=8, column=0)
age0_2_label = Label(root, text="Age0_2",padx=5)
age0_2_label.grid(row=9, column=0)
age4_6_label = Label(root, text="Age4_6",padx=5)
age4_6_label.grid(row=10, column=0)
age8_12_label = Label(root, text="Age8_12",padx=5)
age8_12_label.grid(row=11, column=0)
age15_20_label = Label(root, text="Age15_20",padx=5)
age15_20_label.grid(row=12, column=0)
age25_32_label = Label(root, text="Age25_32",padx=5)
age25_32_label.grid(row=13, column=0)
age38_43_label = Label(root, text="Age38_43",padx=5)
age38_43_label.grid(row=14, column=0)
age48_53_label = Label(root, text="Age48_53",padx=5)
age48_53_label.grid(row=15, column=0)
age60_100_label = Label(root, text="Age60_100",padx=5)
age60_100_label.grid(row=16, column=0)
singer_name_label = Label(root, text="singer",padx=5)
singer_name_label.grid(row=17, column=0)
h_label = Label(root, text="Happy",padx=5)
h_label.grid(row=18, column=0)
s_label = Label(root, text="Sad",padx=5)
s_label.grid(row=19, column=0)
a_label = Label(root, text="Angry",padx=5)
a_label.grid(row=20, column=0)
cr_label = Label(root, text="cry",padx=5)
cr_label.grid(row=21, column=0)
su_label = Label(root, text="Surprise",padx=5)
su_label.grid(row=22, column=0)
delete_label = Label(root, text="Select ID")
delete_label.grid(row=11, column=2, pady=10)
def update():
conn = sqlite3.connect('music4.db')
c = conn.cursor()
item_id = delete.get()
c.execute("""UPDATE music SET
songs = :songs,
age0_2 = :age0_2,
age4_6 = :age4_6,
age8_12 = :age8_12,
age15_20 = :age15_20,
age25_32 = :age25_32,
age38_43 = :age38_43,
age48_53 = :age48_53,
age60_100 = :age60_100,
singer_name = :singer_name,
happy = :h,
sad = :s,
angry = :a,
cry = :cr,
surprise = :su,
WHERE oid = :oid""",
{
'songs': songs_editor.get(),
'age0_2': age0_2_editor.get(),
'age4_6': age4_6_editor.get(),
'age8_12': age8_12_editor.get(),
'age15_20': age15_20_editor.get(),
'age25_32': age25_32_editor.get(),
'age38_43': age38_43_editor.get(),
'age48_53': age48_53_editor.get(),
'age60_100': age60_100_editor.get(),
'singer_name': singer_name_editor.get(),
'h': h_editor.get(),
's': s_editor.get(),
'a': a_editor.get(),
'cr': cr_editor.get(),
'su': su_editor.get(),
'oid': item_id
})
conn.commit()
conn.close()
def edit():
editor = Tk()
editor.title("Information")
editor.geometry("600x640")
conn = sqlite3.connect('music4.db')
c = conn.cursor()
item_id = delete.get()
c.execute("SELECT * FROM music WHERE oid = "+ item_id )
items = c.fetchall()
songs_editor = Entry(editor, width=50)
songs_editor.grid(row=8,column=1,pady=5)
age0_2_editor = Entry(editor, width=50)
age0_2_editor.grid(row=9, column=1,pady=5)
age4_6_editor = Entry(editor, width=50)
age4_6_editor.grid(row=10, column=1,pady=5)
age8_12_editor = Entry(editor, width=50)
age8_12_editor.grid(row=11, column=1,pady=5)
age15_20_editor = Entry(editor, width=50)
age15_20_editor.grid(row=12, column=1,pady=5)
age25_32_editor = Entry(editor, width=50)
age25_32_editor.grid(row=13, column=1,pady=5)
age38_43_editor = Entry(editor, width=50)
age38_43_editor.grid(row=14, column=1,pady=5)
age48_53_editor = Entry(editor, width=50)
age48_53_editor.grid(row=15, column=1,pady=5)
age60_100_editor = Entry(editor, width=50)
age60_100_editor.grid(row=16, column=1,pady=5)
singer_name_editor = Entry(editor, width=50)
singer_name_editor.grid(row=17, column=1,pady=5)
h_editor = Entry(editor, width=50)
h_editor.grid(row=17, column=1,pady=5)
s_editor = Entry(editor, width=50)
s_editor.grid(row=18, column=1,pady=5)
a_editor = Entry(editor, width=50)
a_editor.grid(row=19, column=1,pady=5)
cr_editor = Entry(editor, width=50)
cr_editor.grid(row=20, column=1,pady=5)
su_editor= Entry(editor, width=50)
su_editor.grid(row=21, column=1,pady=5)
songs_label = Label(editor, text="Songs",padx=5)
songs_label.grid(row=8, column=0)
age0_2_label = Label(editor, text="Age0_2",padx=5)
age0_2_label.grid(row=9, column=0)
age4_6_label = Label(editor, text="Age0_2",padx=5)
age4_6_label.grid(row=10, column=0)
age8_12_label = Label(editor, text="Age0_2",padx=5)
age8_12_label.grid(row=11, column=0)
age15_20_label = Label(editor, text="Age0_2",padx=5)
age15_20_label.grid(row=12, column=0)
age25_32_label = Label(editor, text="Age0_2",padx=5)
age25_32_label.grid(row=13, column=0)
age38_43_label = Label(editor, text="Age0_2",padx=5)
age38_43_label.grid(row=14, column=0)
age48_53_label = Label(editor, text="Age0_2",padx=5)
age48_53_label.grid(row=15, column=0)
age60_100_label = Label(editor, text="Age0_2",padx=5)
age60_100_label.grid(row=16, column=0)
singer_name_label = Label(editor, text="Age0_2",padx=5)
singer_name_label.grid(row=17, column=0)
h_label = Label(editor, text="Happy",padx=5)
h_label.grid(row=17, column=0)
s_label = Label(editor, text="Sad",padx=5)
s_label.grid(row=18, column=0)
a_label = Label(editor, text="Angry",padx=5)
a_label.grid(row=19, column=0)
cr_label = Label(editor, text="cry",padx=5)
cr_label.grid(row=20, column=0)
su_label = Label(editor, text="Surprise",padx=5)
su_label.grid(row=21, column=0)
for item in items:
songs_editor.insert(0, item[0])
age0_2_editor.insert(0, item[1])
age4_6_editor.insert(0, item[2])
age8_12_editor.insert(0, item[3])
age15_20_editor.insert(0, item[4])
age25_32_editor.insert(0, item[5])
age38_43_editor.insert(0, item[6])
age48_53_editor.insert(0, item[7])
age60_100_editor.insert(0, item[8])
singer_name_editor.insert(0, item[9])
h_editor.insert(0, item[10])
s_editor.insert(0, item[11])
a_editor.insert(0, item[12])
cr_editor.insert(0, item[13])
su_editor.insert(0, item[14])
b4_edit = Button(editor, text = "Info",padx=50,fg="white",pady=5,bg="blue")
b4_edit.grid(row=34, column=1)
def add_one():
conn = sqlite3.connect('music4.db')
c = conn.cursor()
c.execute("INSERT INTO music VALUES (:songs, :age0_2, :age4_6, :age8_12, :age15_20, :age25_32, :age38_43, :age48_53, :age60_100, :singer_name, :h, :s, :a, :cr, :su)",
{
'songs': songs.get(),
'age0_2': age0_2.get(),
'age4_6': age4_6.get(),
'age8_12': age8_12.get(),
'age15_20': age15_20.get(),
'age25_32': age25_32.get(),
'age38_43': age38_43.get(),
'age48_53': age48_53.get(),
'age60_100': age60_100.get(),
'singer_name': singer_name.get(),
'h': h.get(),
's': s.get(),
'a': a.get(),
'cr': cr.get(),
'su': su.get()
})
songs.delete(0, END)
age0_2.delete(0, END)
age4_6.delete(0, END)
age8_12.delete(0, END)
age15_20.delete(0, END)
age25_32.delete(0, END)
age38_43.delete(0, END)
age48_53.delete(0, END)
age60_100.delete(0, END)
singer_name.delete(0, END)
h.delete(0, END)
s.delete(0, END)
a.delete(0, END)
cr.delete(0, END)
su.delete(0, END)
conn.commit()
conn.close()
def show_allSongs():
conn = sqlite3.connect('music4.db')
c = conn.cursor()
c.execute("SELECT rowid, * FROM music")
items = c.fetchall()
print_items = ''
for item in items:
print_items = str(item[0]) + " " + str(item[1]) + "\t\t\t|" + str(item[2]) + " " + str(item[3]) + " " + str(item[4])+ " " + str(item[5]) + " " + str(item[6]) + " " + str(item[7]) + " " + str(item[8]) + " " + str(item[9]) + " | " + str(item[11]) + " " + str(item[12]) + " " + str(item[13]) + " " + str(item[14]) + " " + str(item[15]) + " | " + str(item[10])
print(print_items)
print("---------------------------------------------------------------------------------------------------\n")
conn.commit()
conn.close()
age0_2 text,
age4_6 text,
age8_12 text,
age15_20 text,
age25_32 text,
age38_43 text,
age48_53 text,
age60_100 text,
singer_name text,
happy text,
sad text,
cry text,
angry text,
surprise text
)""")
conn.commit()
conn.close()
def delete_one():
conn = sqlite3.connect('music4.db')
c = conn.cursor()
c.execute("DELETE from music WHERE rowid = " + delete.get())
delete.delete(0, END)
conn.commit()
conn.close()
label1 = Label(root, text = " ",pady=10)
b1 = Button(root, text = "Create table", command = create_table,padx=35,fg="white",pady=5,bg="green")
b2 = Button(root, text = "AddToDatabase", command = add_one,padx=25,fg="white",pady=5,bg="orange")
b3 = Button(root, text = "Delete", command = delete_one,padx=52.3,fg="white",pady=5,bg="Red")
b4 = Button(root, text = "Info", command = edit,padx=50,fg="white",pady=5,bg="blue")
b5 = Button(root, text = "DataBase Management System by Mohit Gupta",padx=125,pady=10,fg="White",bg="black")
b6 = Button(root, text = "Tools",state=DISABLED,padx=55,pady=10)
b7 = Button(root, text = "# USE FOR ADD TOOL ",state=DISABLED,padx=30,pady=10)
b8 = Button(root, text = "Displayall", command = show_allSongs,padx=25,fg="white",pady=5,bg="orange")
label1.grid(row=2, column=3)
b1.grid(row=6, column=3)
b2.grid(row=23, column=1)
b3.grid(row=12, column=3)
b4.grid(row=14, column=3)
b5.grid(row=1, column=1)
b6.grid(row=1, column=3)
b7.grid(row=2, column=1)
b8.grid(row=25, column=1)
root.mainloop() | true | true |
f7158e044b9155a5343668120d5af436908eaa72 | 8,428 | py | Python | synchronization/SyncNetInstance.py | PlatterDataset/feature | 2ebdc1b28498b709a0c91e60c19bfc731006bc50 | [
"MIT"
] | null | null | null | synchronization/SyncNetInstance.py | PlatterDataset/feature | 2ebdc1b28498b709a0c91e60c19bfc731006bc50 | [
"MIT"
] | null | null | null | synchronization/SyncNetInstance.py | PlatterDataset/feature | 2ebdc1b28498b709a0c91e60c19bfc731006bc50 | [
"MIT"
] | null | null | null | #!/usr/bin/python
#-*- coding: utf-8 -*-
# Video 25 FPS, Audio 16000HZ
import torch
import numpy
import time, pdb, argparse, subprocess, os, math, glob
import cv2
import python_speech_features
from scipy import signal
from scipy.io import wavfile
from SyncNetModel import *
from shutil import rmtree
# ==================== Get OFFSET ====================
def get_median(data1):
data = sorted(data1)
size = len(data)
if size % 2 == 0: # 判断列表长度为偶数
median = (data[size//2]+data[size//2-1])/2
data[0] = median
if size % 2 == 1: # 判断列表长度为奇数
median = data[(size-1)//2]
data[0] = median
return data[0]
def calc_pdist(feat1, feat2, vshift=40):
win_size = vshift*2+1
feat2p = torch.nn.functional.pad(feat2,(0,0,vshift,vshift))
dists = []
for i in range(0,len(feat1)):
dists.append(torch.nn.functional.pairwise_distance(feat1[[i],:].repeat(win_size, 1), feat2p[i:i+win_size,:]))
return dists
# ==================== MAIN DEF ====================
class SyncNetInstance(torch.nn.Module):
def __init__(self, dropout = 0, num_layers_in_fc_layers = 1024):
super(SyncNetInstance, self).__init__();
self.__S__ = S(num_layers_in_fc_layers = num_layers_in_fc_layers).cuda();
def evaluate(self, opt, videofile, num):
self.__S__.eval();
# ========== ==========
# Convert files
# ========== ==========
if os.path.exists(os.path.join(opt.tmp_dir,opt.reference)):
rmtree(os.path.join(opt.tmp_dir,opt.reference))
os.makedirs(os.path.join(opt.tmp_dir,opt.reference))
command = ("ffmpeg -y -i %s -threads 1 -f image2 %s" % (videofile,os.path.join(opt.tmp_dir,opt.reference,'%06d.jpg')))
output = subprocess.call(command, shell=True, stdout=None)
command = ("ffmpeg -y -i %s -async 1 -ac 1 -vn -acodec pcm_s16le -ar 16000 %s" % (videofile,os.path.join(opt.tmp_dir,opt.reference,'audio.wav')))
output = subprocess.call(command, shell=True, stdout=None)
# ========== ==========
# Load video
# ========== ==========
images = []
flist = glob.glob(os.path.join(opt.tmp_dir,opt.reference,'*.jpg'))
flist.sort()
for fname in flist:
images.append(cv2.imread(fname))
im = numpy.stack(images,axis=3)
im = numpy.expand_dims(im,axis=0)
im = numpy.transpose(im,(0,3,4,1,2))
imtv = torch.autograd.Variable(torch.from_numpy(im.astype(float)).float())
# ========== ==========
# Load audio
# ========== ==========
sample_rate, audio = wavfile.read(os.path.join(opt.tmp_dir,opt.reference,'audio.wav'))
mfcc = zip(*python_speech_features.mfcc(audio,sample_rate))
mfcc = numpy.stack([numpy.array(i) for i in mfcc])
torch.save(mfcc,'./mfcc_saver/mfcc'+str(num)+'.pt')
ww = open('./mfcc_saver/mfcc'+str(num)+'.txt','w')
ww.write(str(mfcc))
cc = numpy.expand_dims(numpy.expand_dims(mfcc,axis=0),axis=0)
cct = torch.autograd.Variable(torch.from_numpy(cc.astype(float)).float())
# ========== ==========
# Check audio and video input length
# ========== ==========
if (float(len(audio))/16000) != (float(len(images))/25) :
print("WARNING: Audio (%.4fs) and video (%.4fs) lengths are different."%(float(len(audio))/16000,float(len(images))/25))
min_length = min(len(images),math.floor(len(audio)/640))
# ========== ==========
# Generate video and audio feats
# ========== ==========
lastframe = min_length-5
im_feat = []
cc_feat = []
wr = open('./'+str(opt.reference)+'_'+str(num)+'_resultoff.txt','w')
tS = time.time()
for i in range(0,lastframe,opt.batch_size):
im_batch = [ imtv[:,:,vframe:vframe+5,:,:] for vframe in range(i,min(lastframe,i+opt.batch_size)) ]
im_in = torch.cat(im_batch,0)
im_out = self.__S__.forward_lip(im_in.cuda());
im_feat.append(im_out.data.cpu())
cc_batch = [ cct[:,:,:,vframe*4:vframe*4+20] for vframe in range(i,min(lastframe,i+opt.batch_size)) ]
cc_in = torch.cat(cc_batch,0)
cc_out = self.__S__.forward_aud(cc_in.cuda())
cc_feat.append(cc_out.data.cpu())
im_feat = torch.cat(im_feat,0)
cc_feat = torch.cat(cc_feat,0)
# ========== ==========
# Compute offset
# ========== ==========
print('Compute time %.3f sec.' % (time.time()-tS))
dists = calc_pdist(im_feat,cc_feat,vshift=opt.vshift)
mdist = torch.mean(torch.stack(dists,1),1)
off = []
avg_dist = []
for t in range(0,len(im_feat)):
tt = 10000
offy = 0
of = 0
of_m = 0
dis_mid = 0
dis_min = 1000000000
for k in range(0,len(dists[t])):
if t == 0:
avg_dist.append(dists[t][k])
else:
avg_dist[k] += dists[t][k]
if (t+1)% 100 == 0 or t == len(im_feat)-1:
if avg_dist[k] < dis_min:
dis_min = avg_dist[k]
of = k
if dists[t][k]<tt:
tt = dists[t][k]
offy = k
if (t+1)%100 == 0 or t == len(im_feat) -1:
dis_mid = get_median(avg_dist)
for k in range(len(avg_dist)):
avg_dist[k] = 0
wr.write(str(t%100)+' ')
wr.write(str((opt.vshift-of) * 0.04)+'s ')
if (t+1)%100 != 0:
wr.write("conf = "+str((dis_mid.item()-dis_min.item())/((t+1)%100))+'\n')#confidence改成medium
else:
wr.write("conf = "+str((dis_mid.item()-dis_min.item())/100)+'\n')
off.append(opt.vshift-offy)
off = numpy.array(off)
minval, minidx = torch.min(mdist,0)
offset = opt.vshift-minidx
conf = torch.median(mdist) - minval
fdist = numpy.stack([dist[minidx].numpy() for dist in dists])
# fdist = numpy.pad(fdist, (3,3), 'constant', constant_values=15)
fconf = torch.median(mdist).numpy() - fdist
fconfm = signal.medfilt(fconf,kernel_size=9)
numpy.set_printoptions(formatter={'float': '{: 0.3f}'.format})
print('Framewise conf: ')
print(fconfm)
print('AV offset: \t%d \nMin dist: \t%.3f\nConfidence: \t%.3f' % (offset,minval,conf))
dists_npy = numpy.array([ dist.numpy() for dist in dists ])
return off, conf.numpy(), dists_npy
def extract_feature(self, opt, videofile):
self.__S__.eval();
# ========== ==========
# Load video
# ========== ==========
cap = cv2.VideoCapture(videofile)
frame_num = 1;
images = []
while frame_num:
frame_num += 1
ret, image = cap.read()
if ret == 0:
break
images.append(image)
im = numpy.stack(images,axis=3)
im = numpy.expand_dims(im,axis=0)
im = numpy.transpose(im,(0,3,4,1,2))
imtv = torch.autograd.Variable(torch.from_numpy(im.astype(float)).float())
# ========== ==========
# Generate video feats
# ========== ==========
lastframe = len(images)-4
im_feat = []
tS = time.time()
for i in range(0,lastframe,opt.batch_size):
im_batch = [ imtv[:,:,vframe:vframe+5,:,:] for vframe in range(i,min(lastframe,i+opt.batch_size)) ]
im_in = torch.cat(im_batch,0)
im_out = self.__S__.forward_lipfeat(im_in.cuda());
im_feat.append(im_out.data.cpu())
im_feat = torch.cat(im_feat,0)
# ========== ==========
# Compute offset
# ========== ==========
print('Compute time %.3f sec.' % (time.time()-tS))
return im_feat
def loadParameters(self, path):
loaded_state = torch.load(path, map_location=lambda storage, loc: storage);
self_state = self.__S__.state_dict();
for name, param in loaded_state.items():
self_state[name].copy_(param);
| 32.666667 | 154 | 0.51495 |
import torch
import numpy
import time, pdb, argparse, subprocess, os, math, glob
import cv2
import python_speech_features
from scipy import signal
from scipy.io import wavfile
from SyncNetModel import *
from shutil import rmtree
def get_median(data1):
data = sorted(data1)
size = len(data)
if size % 2 == 0:
median = (data[size//2]+data[size//2-1])/2
data[0] = median
if size % 2 == 1:
median = data[(size-1)//2]
data[0] = median
return data[0]
def calc_pdist(feat1, feat2, vshift=40):
win_size = vshift*2+1
feat2p = torch.nn.functional.pad(feat2,(0,0,vshift,vshift))
dists = []
for i in range(0,len(feat1)):
dists.append(torch.nn.functional.pairwise_distance(feat1[[i],:].repeat(win_size, 1), feat2p[i:i+win_size,:]))
return dists
class SyncNetInstance(torch.nn.Module):
def __init__(self, dropout = 0, num_layers_in_fc_layers = 1024):
super(SyncNetInstance, self).__init__();
self.__S__ = S(num_layers_in_fc_layers = num_layers_in_fc_layers).cuda();
def evaluate(self, opt, videofile, num):
self.__S__.eval();
if os.path.exists(os.path.join(opt.tmp_dir,opt.reference)):
rmtree(os.path.join(opt.tmp_dir,opt.reference))
os.makedirs(os.path.join(opt.tmp_dir,opt.reference))
command = ("ffmpeg -y -i %s -threads 1 -f image2 %s" % (videofile,os.path.join(opt.tmp_dir,opt.reference,'%06d.jpg')))
output = subprocess.call(command, shell=True, stdout=None)
command = ("ffmpeg -y -i %s -async 1 -ac 1 -vn -acodec pcm_s16le -ar 16000 %s" % (videofile,os.path.join(opt.tmp_dir,opt.reference,'audio.wav')))
output = subprocess.call(command, shell=True, stdout=None)
images = []
flist = glob.glob(os.path.join(opt.tmp_dir,opt.reference,'*.jpg'))
flist.sort()
for fname in flist:
images.append(cv2.imread(fname))
im = numpy.stack(images,axis=3)
im = numpy.expand_dims(im,axis=0)
im = numpy.transpose(im,(0,3,4,1,2))
imtv = torch.autograd.Variable(torch.from_numpy(im.astype(float)).float())
sample_rate, audio = wavfile.read(os.path.join(opt.tmp_dir,opt.reference,'audio.wav'))
mfcc = zip(*python_speech_features.mfcc(audio,sample_rate))
mfcc = numpy.stack([numpy.array(i) for i in mfcc])
torch.save(mfcc,'./mfcc_saver/mfcc'+str(num)+'.pt')
ww = open('./mfcc_saver/mfcc'+str(num)+'.txt','w')
ww.write(str(mfcc))
cc = numpy.expand_dims(numpy.expand_dims(mfcc,axis=0),axis=0)
cct = torch.autograd.Variable(torch.from_numpy(cc.astype(float)).float())
if (float(len(audio))/16000) != (float(len(images))/25) :
print("WARNING: Audio (%.4fs) and video (%.4fs) lengths are different."%(float(len(audio))/16000,float(len(images))/25))
min_length = min(len(images),math.floor(len(audio)/640))
lastframe = min_length-5
im_feat = []
cc_feat = []
wr = open('./'+str(opt.reference)+'_'+str(num)+'_resultoff.txt','w')
tS = time.time()
for i in range(0,lastframe,opt.batch_size):
im_batch = [ imtv[:,:,vframe:vframe+5,:,:] for vframe in range(i,min(lastframe,i+opt.batch_size)) ]
im_in = torch.cat(im_batch,0)
im_out = self.__S__.forward_lip(im_in.cuda());
im_feat.append(im_out.data.cpu())
cc_batch = [ cct[:,:,:,vframe*4:vframe*4+20] for vframe in range(i,min(lastframe,i+opt.batch_size)) ]
cc_in = torch.cat(cc_batch,0)
cc_out = self.__S__.forward_aud(cc_in.cuda())
cc_feat.append(cc_out.data.cpu())
im_feat = torch.cat(im_feat,0)
cc_feat = torch.cat(cc_feat,0)
print('Compute time %.3f sec.' % (time.time()-tS))
dists = calc_pdist(im_feat,cc_feat,vshift=opt.vshift)
mdist = torch.mean(torch.stack(dists,1),1)
off = []
avg_dist = []
for t in range(0,len(im_feat)):
tt = 10000
offy = 0
of = 0
of_m = 0
dis_mid = 0
dis_min = 1000000000
for k in range(0,len(dists[t])):
if t == 0:
avg_dist.append(dists[t][k])
else:
avg_dist[k] += dists[t][k]
if (t+1)% 100 == 0 or t == len(im_feat)-1:
if avg_dist[k] < dis_min:
dis_min = avg_dist[k]
of = k
if dists[t][k]<tt:
tt = dists[t][k]
offy = k
if (t+1)%100 == 0 or t == len(im_feat) -1:
dis_mid = get_median(avg_dist)
for k in range(len(avg_dist)):
avg_dist[k] = 0
wr.write(str(t%100)+' ')
wr.write(str((opt.vshift-of) * 0.04)+'s ')
if (t+1)%100 != 0:
wr.write("conf = "+str((dis_mid.item()-dis_min.item())/((t+1)%100))+'\n')
else:
wr.write("conf = "+str((dis_mid.item()-dis_min.item())/100)+'\n')
off.append(opt.vshift-offy)
off = numpy.array(off)
minval, minidx = torch.min(mdist,0)
offset = opt.vshift-minidx
conf = torch.median(mdist) - minval
fdist = numpy.stack([dist[minidx].numpy() for dist in dists])
fconf = torch.median(mdist).numpy() - fdist
fconfm = signal.medfilt(fconf,kernel_size=9)
numpy.set_printoptions(formatter={'float': '{: 0.3f}'.format})
print('Framewise conf: ')
print(fconfm)
print('AV offset: \t%d \nMin dist: \t%.3f\nConfidence: \t%.3f' % (offset,minval,conf))
dists_npy = numpy.array([ dist.numpy() for dist in dists ])
return off, conf.numpy(), dists_npy
def extract_feature(self, opt, videofile):
self.__S__.eval();
cap = cv2.VideoCapture(videofile)
frame_num = 1;
images = []
while frame_num:
frame_num += 1
ret, image = cap.read()
if ret == 0:
break
images.append(image)
im = numpy.stack(images,axis=3)
im = numpy.expand_dims(im,axis=0)
im = numpy.transpose(im,(0,3,4,1,2))
imtv = torch.autograd.Variable(torch.from_numpy(im.astype(float)).float())
lastframe = len(images)-4
im_feat = []
tS = time.time()
for i in range(0,lastframe,opt.batch_size):
im_batch = [ imtv[:,:,vframe:vframe+5,:,:] for vframe in range(i,min(lastframe,i+opt.batch_size)) ]
im_in = torch.cat(im_batch,0)
im_out = self.__S__.forward_lipfeat(im_in.cuda());
im_feat.append(im_out.data.cpu())
im_feat = torch.cat(im_feat,0)
print('Compute time %.3f sec.' % (time.time()-tS))
return im_feat
def loadParameters(self, path):
loaded_state = torch.load(path, map_location=lambda storage, loc: storage);
self_state = self.__S__.state_dict();
for name, param in loaded_state.items():
self_state[name].copy_(param);
| true | true |
f7158e76c232bf5249188b7a0fe3dc8f0b03f00c | 405 | py | Python | turtlebot3_automatic_parking_vision/setup.py | herb-kuta-lge/turtlebot3_applications | b41f06fda13bcab43800e311c8df63aa0f075445 | [
"Apache-2.0"
] | 70 | 2017-06-14T16:48:51.000Z | 2022-03-15T02:44:14.000Z | turtlebot3_automatic_parking_vision/setup.py | herb-kuta-lge/turtlebot3_applications | b41f06fda13bcab43800e311c8df63aa0f075445 | [
"Apache-2.0"
] | 20 | 2018-06-04T12:06:30.000Z | 2021-09-10T14:01:25.000Z | turtlebot3_automatic_parking_vision/setup.py | herb-kuta-lge/turtlebot3_applications | b41f06fda13bcab43800e311c8df63aa0f075445 | [
"Apache-2.0"
] | 47 | 2017-10-31T23:51:19.000Z | 2022-03-23T12:38:48.000Z | ## ! DO NOT MANUALLY INVOKE THIS setup.py, USE CATKIN INSTEAD
## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
# fetch values from package.xml
setup_args = generate_distutils_setup(
packages=['turtlebot3_automatic_parking_vision'],
package_dir={'': 'src'}
)
setup(**setup_args)
| 28.928571 | 70 | 0.780247 | s_setup(
packages=['turtlebot3_automatic_parking_vision'],
package_dir={'': 'src'}
)
setup(**setup_args)
| true | true |
f7158e77cbc37e40ac0788e476409ce0f922c325 | 5,748 | py | Python | src/deepspeech_training/util/config.py | googleinterns/deepspeech-reconstruction | 72f28d1e9064d221b3421c302a8725a8c71859ee | [
"Apache-2.0"
] | 3 | 2021-08-20T16:40:09.000Z | 2022-02-08T23:17:52.000Z | src/deepspeech_training/util/config.py | googleinterns/deepspeech-reconstruction | 72f28d1e9064d221b3421c302a8725a8c71859ee | [
"Apache-2.0"
] | 1 | 2022-03-22T04:16:15.000Z | 2022-03-22T04:26:03.000Z | src/deepspeech_training/util/config.py | googleinterns/deepspeech-reconstruction | 72f28d1e9064d221b3421c302a8725a8c71859ee | [
"Apache-2.0"
] | 1 | 2021-04-28T21:51:12.000Z | 2021-04-28T21:51:12.000Z | from __future__ import absolute_import, division, print_function
import os
import sys
import tensorflow.compat.v1 as tfv1
from attrdict import AttrDict
from xdg import BaseDirectory as xdg
from src.flags import FLAGS
from .gpu import get_available_gpus
from .logging import log_error
from .text import Alphabet, UTF8Alphabet
from .helpers import parse_file_size
class ConfigSingleton:
_config = None
def __getattr__(self, name):
if not ConfigSingleton._config:
raise RuntimeError("Global configuration not yet initialized.")
if not hasattr(ConfigSingleton._config, name):
raise RuntimeError("Configuration option {} not found in config.".format(name))
return ConfigSingleton._config[name]
Config = ConfigSingleton() # pylint: disable=invalid-name
def initialize_globals():
c = AttrDict()
# Read-buffer
FLAGS.read_buffer = parse_file_size(FLAGS.read_buffer)
# Set default dropout rates
if FLAGS.dropout_rate2 < 0:
FLAGS.dropout_rate2 = FLAGS.dropout_rate
if FLAGS.dropout_rate3 < 0:
FLAGS.dropout_rate3 = FLAGS.dropout_rate
if FLAGS.dropout_rate6 < 0:
FLAGS.dropout_rate6 = FLAGS.dropout_rate
# Set default checkpoint dir
if not FLAGS.checkpoint_dir:
FLAGS.checkpoint_dir = xdg.save_data_path(os.path.join('deepspeech', 'checkpoints'))
if FLAGS.load_train not in ['last', 'best', 'init', 'auto']:
FLAGS.load_train = 'auto'
if FLAGS.load_evaluate not in ['last', 'best', 'auto']:
FLAGS.load_evaluate = 'auto'
# Set default summary dir
if not FLAGS.summary_dir:
FLAGS.summary_dir = xdg.save_data_path(os.path.join('deepspeech', 'summaries'))
# Standard session configuration that'll be used for all new sessions.
c.session_config = tfv1.ConfigProto(allow_soft_placement=True, log_device_placement=FLAGS.log_placement,
inter_op_parallelism_threads=FLAGS.inter_op_parallelism_threads,
intra_op_parallelism_threads=FLAGS.intra_op_parallelism_threads,
gpu_options=tfv1.GPUOptions(allow_growth=FLAGS.use_allow_growth))
# CPU device
c.cpu_device = '/cpu:0'
# Available GPU devices
c.available_devices = get_available_gpus(c.session_config)
# If there is no GPU available, we fall back to CPU based operation
if not c.available_devices:
c.available_devices = [c.cpu_device]
if FLAGS.utf8:
c.alphabet = UTF8Alphabet()
else:
c.alphabet = Alphabet(os.path.abspath(FLAGS.alphabet_config_path))
# Geometric Constants
# ===================
# For an explanation of the meaning of the geometric constants, please refer to
# doc/Geometry.md
# Number of MFCC features
c.n_input = 26 # TODO: Determine this programmatically from the sample rate
# The number of frames in the context
c.n_context = 9 # TODO: Determine the optimal value using a validation data set
# Number of units in hidden layers
c.n_hidden = FLAGS.n_hidden
c.n_hidden_1 = c.n_hidden
c.n_hidden_2 = c.n_hidden
c.n_hidden_5 = c.n_hidden
# LSTM cell state dimension
c.n_cell_dim = c.n_hidden
# The number of units in the third layer, which feeds in to the LSTM
c.n_hidden_3 = c.n_cell_dim
# Units in the sixth layer = number of characters in the target language plus one
c.n_hidden_6 = c.alphabet.size() + 1 # +1 for CTC blank label
# Size of audio window in samples
if (FLAGS.feature_win_len * FLAGS.audio_sample_rate) % 1000 != 0:
log_error('--feature_win_len value ({}) in milliseconds ({}) multiplied '
'by --audio_sample_rate value ({}) must be an integer value. Adjust '
'your --feature_win_len value or resample your audio accordingly.'
''.format(FLAGS.feature_win_len, FLAGS.feature_win_len / 1000, FLAGS.audio_sample_rate))
sys.exit(1)
c.audio_window_samples = FLAGS.audio_sample_rate * (FLAGS.feature_win_len / 1000)
# Stride for feature computations in samples
if (FLAGS.feature_win_step * FLAGS.audio_sample_rate) % 1000 != 0:
log_error('--feature_win_step value ({}) in milliseconds ({}) multiplied '
'by --audio_sample_rate value ({}) must be an integer value. Adjust '
'your --feature_win_step value or resample your audio accordingly.'
''.format(FLAGS.feature_win_step, FLAGS.feature_win_step / 1000, FLAGS.audio_sample_rate))
sys.exit(1)
c.audio_step_samples = FLAGS.audio_sample_rate * (FLAGS.feature_win_step / 1000)
if FLAGS.one_shot_infer:
if not os.path.exists(FLAGS.one_shot_infer):
log_error('Path specified in --one_shot_infer is not a valid file.')
sys.exit(1)
if FLAGS.train_cudnn and FLAGS.load_cudnn:
log_error('Trying to use --train_cudnn, but --load_cudnn '
'was also specified. The --load_cudnn flag is only '
'needed when converting a CuDNN RNN checkpoint to '
'a CPU-capable graph. If your system is capable of '
'using CuDNN RNN, you can just specify the CuDNN RNN '
'checkpoint normally with --save_checkpoint_dir.')
sys.exit(1)
# If separate save and load flags were not specified, default to load and save
# from the same dir.
if not FLAGS.save_checkpoint_dir:
FLAGS.save_checkpoint_dir = FLAGS.checkpoint_dir
if not FLAGS.load_checkpoint_dir:
FLAGS.load_checkpoint_dir = FLAGS.checkpoint_dir
ConfigSingleton._config = c # pylint: disable=protected-access
| 38.066225 | 108 | 0.677279 | from __future__ import absolute_import, division, print_function
import os
import sys
import tensorflow.compat.v1 as tfv1
from attrdict import AttrDict
from xdg import BaseDirectory as xdg
from src.flags import FLAGS
from .gpu import get_available_gpus
from .logging import log_error
from .text import Alphabet, UTF8Alphabet
from .helpers import parse_file_size
class ConfigSingleton:
_config = None
def __getattr__(self, name):
if not ConfigSingleton._config:
raise RuntimeError("Global configuration not yet initialized.")
if not hasattr(ConfigSingleton._config, name):
raise RuntimeError("Configuration option {} not found in config.".format(name))
return ConfigSingleton._config[name]
Config = ConfigSingleton()
def initialize_globals():
c = AttrDict()
FLAGS.read_buffer = parse_file_size(FLAGS.read_buffer)
if FLAGS.dropout_rate2 < 0:
FLAGS.dropout_rate2 = FLAGS.dropout_rate
if FLAGS.dropout_rate3 < 0:
FLAGS.dropout_rate3 = FLAGS.dropout_rate
if FLAGS.dropout_rate6 < 0:
FLAGS.dropout_rate6 = FLAGS.dropout_rate
if not FLAGS.checkpoint_dir:
FLAGS.checkpoint_dir = xdg.save_data_path(os.path.join('deepspeech', 'checkpoints'))
if FLAGS.load_train not in ['last', 'best', 'init', 'auto']:
FLAGS.load_train = 'auto'
if FLAGS.load_evaluate not in ['last', 'best', 'auto']:
FLAGS.load_evaluate = 'auto'
if not FLAGS.summary_dir:
FLAGS.summary_dir = xdg.save_data_path(os.path.join('deepspeech', 'summaries'))
c.session_config = tfv1.ConfigProto(allow_soft_placement=True, log_device_placement=FLAGS.log_placement,
inter_op_parallelism_threads=FLAGS.inter_op_parallelism_threads,
intra_op_parallelism_threads=FLAGS.intra_op_parallelism_threads,
gpu_options=tfv1.GPUOptions(allow_growth=FLAGS.use_allow_growth))
# CPU device
c.cpu_device = '/cpu:0'
# Available GPU devices
c.available_devices = get_available_gpus(c.session_config)
# If there is no GPU available, we fall back to CPU based operation
if not c.available_devices:
c.available_devices = [c.cpu_device]
if FLAGS.utf8:
c.alphabet = UTF8Alphabet()
else:
c.alphabet = Alphabet(os.path.abspath(FLAGS.alphabet_config_path))
# Geometric Constants
# ===================
# For an explanation of the meaning of the geometric constants, please refer to
# doc/Geometry.md
# Number of MFCC features
c.n_input = 26 # TODO: Determine this programmatically from the sample rate
# The number of frames in the context
c.n_context = 9 # TODO: Determine the optimal value using a validation data set
# Number of units in hidden layers
c.n_hidden = FLAGS.n_hidden
c.n_hidden_1 = c.n_hidden
c.n_hidden_2 = c.n_hidden
c.n_hidden_5 = c.n_hidden
# LSTM cell state dimension
c.n_cell_dim = c.n_hidden
# The number of units in the third layer, which feeds in to the LSTM
c.n_hidden_3 = c.n_cell_dim
# Units in the sixth layer = number of characters in the target language plus one
c.n_hidden_6 = c.alphabet.size() + 1 # +1 for CTC blank label
# Size of audio window in samples
if (FLAGS.feature_win_len * FLAGS.audio_sample_rate) % 1000 != 0:
log_error('--feature_win_len value ({}) in milliseconds ({}) multiplied '
'by --audio_sample_rate value ({}) must be an integer value. Adjust '
'your --feature_win_len value or resample your audio accordingly.'
''.format(FLAGS.feature_win_len, FLAGS.feature_win_len / 1000, FLAGS.audio_sample_rate))
sys.exit(1)
c.audio_window_samples = FLAGS.audio_sample_rate * (FLAGS.feature_win_len / 1000)
# Stride for feature computations in samples
if (FLAGS.feature_win_step * FLAGS.audio_sample_rate) % 1000 != 0:
log_error('--feature_win_step value ({}) in milliseconds ({}) multiplied '
'by --audio_sample_rate value ({}) must be an integer value. Adjust '
'your --feature_win_step value or resample your audio accordingly.'
''.format(FLAGS.feature_win_step, FLAGS.feature_win_step / 1000, FLAGS.audio_sample_rate))
sys.exit(1)
c.audio_step_samples = FLAGS.audio_sample_rate * (FLAGS.feature_win_step / 1000)
if FLAGS.one_shot_infer:
if not os.path.exists(FLAGS.one_shot_infer):
log_error('Path specified in --one_shot_infer is not a valid file.')
sys.exit(1)
if FLAGS.train_cudnn and FLAGS.load_cudnn:
log_error('Trying to use --train_cudnn, but --load_cudnn '
'was also specified. The --load_cudnn flag is only '
'needed when converting a CuDNN RNN checkpoint to '
'a CPU-capable graph. If your system is capable of '
'using CuDNN RNN, you can just specify the CuDNN RNN '
'checkpoint normally with --save_checkpoint_dir.')
sys.exit(1)
# If separate save and load flags were not specified, default to load and save
# from the same dir.
if not FLAGS.save_checkpoint_dir:
FLAGS.save_checkpoint_dir = FLAGS.checkpoint_dir
if not FLAGS.load_checkpoint_dir:
FLAGS.load_checkpoint_dir = FLAGS.checkpoint_dir
ConfigSingleton._config = c # pylint: disable=protected-access
| true | true |
f715901de4244e706505bcbc2ad3c07df8e07766 | 5,685 | py | Python | lib/itertools.py | ralic/grumpy3 | a471f7ba64167d5812c0f6701380f9f71fa937c3 | [
"Apache-2.0"
] | null | null | null | lib/itertools.py | ralic/grumpy3 | a471f7ba64167d5812c0f6701380f9f71fa937c3 | [
"Apache-2.0"
] | null | null | null | lib/itertools.py | ralic/grumpy3 | a471f7ba64167d5812c0f6701380f9f71fa937c3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for iterating over containers."""
import _collections
import sys
class chain(object):
def from_iterable(cls, iterables):
for it in iterables:
for element in it:
yield element
from_iterable = classmethod(from_iterable)
def __init__(self, *iterables):
if not iterables:
self.iterables = iter([[]])
else:
self.iterables = iter(iterables)
self.curriter = iter(next(self.iterables))
def __iter__(self):
return self
def __next__(self):
flag = True
while flag:
try:
ret = next(self.curriter)
flag = False
except StopIteration:
self.curriter = iter(next(self.iterables))
return ret
def compress(data, selectors):
return (d for d,s in zip(data, selectors) if s)
def count(start=0, step=1):
n = start
while True:
yield n
n += step
def cycle(iterable):
saved = []
for element in iterable:
yield element
saved.append(element)
while saved:
for element in saved:
yield element
def dropwhile(predicate, iterable):
iterable = iter(iterable)
for x in iterable:
if not predicate(x):
yield x
break
for x in iterable:
yield x
class groupby(object):
# [k for k, g in groupby('AAAABBBCCDAABBB')] --> A B C D A B
# [list(g) for k, g in groupby('AAAABBBCCD')] --> AAAA BBB CC D
def __init__(self, iterable, key=None):
if key is None:
key = lambda x: x
self.keyfunc = key
self.it = iter(iterable)
self.tgtkey = self.currkey = self.currvalue = object()
def __iter__(self):
return self
def __next__(self):
while self.currkey == self.tgtkey:
self.currvalue = next(self.it) # Exit on StopIteration
self.currkey = self.keyfunc(self.currvalue)
self.tgtkey = self.currkey
return (self.currkey, self._grouper(self.tgtkey))
def _grouper(self, tgtkey):
while self.currkey == tgtkey:
yield self.currvalue
self.currvalue = next(self.it) # Exit on StopIteration
self.currkey = self.keyfunc(self.currvalue)
def ifilter(predicate, iterable):
if predicate is None:
predicate = bool
for x in iterable:
if predicate(x):
yield x
def ifilterfalse(predicate, iterable):
if predicate is None:
predicate = bool
for x in iterable:
if not predicate(x):
yield x
def imap(function, *iterables):
iterables = list(map(iter, iterables))
while True:
args = [next(it) for it in iterables]
if function is None:
yield tuple(args)
else:
yield function(*args)
def islice(iterable, *args):
s = slice(*args)
it = iter(range(s.start or 0, s.stop or sys.maxsize, s.step or 1))
nexti = next(it)
for i, element in enumerate(iterable):
if i == nexti:
yield element
nexti = next(it)
def izip(*iterables):
iterators = list(map(iter, iterables))
while iterators:
yield tuple(map(next, iterators))
class ZipExhausted(Exception):
pass
def izip_longest(*args, **kwds):
# izip_longest('ABCD', 'xy', fillvalue='-') --> Ax By C- D-
fillvalue = kwds.get('fillvalue')
counter = [len(args) - 1]
def sentinel():
if not counter[0]:
raise ZipExhausted
counter[0] -= 1
yield fillvalue
fillers = repeat(fillvalue)
iterators = [chain(it, sentinel(), fillers) for it in args]
try:
while iterators:
yield tuple(map(next, iterators))
except ZipExhausted:
pass
def product(*args, **kwds):
# product('ABCD', 'xy') --> Ax Ay Bx By Cx Cy Dx Dy
# product(range(2), repeat=3) --> 000 001 010 011 100 101 110 111
pools = list(map(tuple, args)) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x+[y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
def permutations(iterable, r=None):
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
for indices in product(list(range(n)), repeat=r):
if len(set(indices)) == r:
yield tuple(pool[i] for i in indices)
def combinations(iterable, r):
pool = tuple(iterable)
n = len(pool)
for indices in permutations(list(range(n)), r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
def combinations_with_replacement(iterable, r):
pool = tuple(iterable)
n = len(pool)
for indices in product(list(range(n)), repeat=r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
def repeat(object, times=None):
if times is None:
while True:
yield object
else:
for i in range(times):
yield object
def starmap(function, iterable):
for args in iterable:
yield function(*args)
def takewhile(predicate, iterable):
for x in iterable:
if predicate(x):
yield x
else:
break
def tee(iterable, n=2):
it = iter(iterable)
deques = [_collections.deque() for i in range(n)]
def gen(mydeque):
while True:
if not mydeque:
newval = next(it)
for d in deques:
d.append(newval)
yield mydeque.popleft()
return tuple(gen(d) for d in deques)
| 23.491736 | 74 | 0.65277 |
import _collections
import sys
class chain(object):
def from_iterable(cls, iterables):
for it in iterables:
for element in it:
yield element
from_iterable = classmethod(from_iterable)
def __init__(self, *iterables):
if not iterables:
self.iterables = iter([[]])
else:
self.iterables = iter(iterables)
self.curriter = iter(next(self.iterables))
def __iter__(self):
return self
def __next__(self):
flag = True
while flag:
try:
ret = next(self.curriter)
flag = False
except StopIteration:
self.curriter = iter(next(self.iterables))
return ret
def compress(data, selectors):
return (d for d,s in zip(data, selectors) if s)
def count(start=0, step=1):
n = start
while True:
yield n
n += step
def cycle(iterable):
saved = []
for element in iterable:
yield element
saved.append(element)
while saved:
for element in saved:
yield element
def dropwhile(predicate, iterable):
iterable = iter(iterable)
for x in iterable:
if not predicate(x):
yield x
break
for x in iterable:
yield x
class groupby(object):
def __init__(self, iterable, key=None):
if key is None:
key = lambda x: x
self.keyfunc = key
self.it = iter(iterable)
self.tgtkey = self.currkey = self.currvalue = object()
def __iter__(self):
return self
def __next__(self):
while self.currkey == self.tgtkey:
self.currvalue = next(self.it)
self.currkey = self.keyfunc(self.currvalue)
self.tgtkey = self.currkey
return (self.currkey, self._grouper(self.tgtkey))
def _grouper(self, tgtkey):
while self.currkey == tgtkey:
yield self.currvalue
self.currvalue = next(self.it)
self.currkey = self.keyfunc(self.currvalue)
def ifilter(predicate, iterable):
if predicate is None:
predicate = bool
for x in iterable:
if predicate(x):
yield x
def ifilterfalse(predicate, iterable):
if predicate is None:
predicate = bool
for x in iterable:
if not predicate(x):
yield x
def imap(function, *iterables):
iterables = list(map(iter, iterables))
while True:
args = [next(it) for it in iterables]
if function is None:
yield tuple(args)
else:
yield function(*args)
def islice(iterable, *args):
s = slice(*args)
it = iter(range(s.start or 0, s.stop or sys.maxsize, s.step or 1))
nexti = next(it)
for i, element in enumerate(iterable):
if i == nexti:
yield element
nexti = next(it)
def izip(*iterables):
iterators = list(map(iter, iterables))
while iterators:
yield tuple(map(next, iterators))
class ZipExhausted(Exception):
pass
def izip_longest(*args, **kwds):
fillvalue = kwds.get('fillvalue')
counter = [len(args) - 1]
def sentinel():
if not counter[0]:
raise ZipExhausted
counter[0] -= 1
yield fillvalue
fillers = repeat(fillvalue)
iterators = [chain(it, sentinel(), fillers) for it in args]
try:
while iterators:
yield tuple(map(next, iterators))
except ZipExhausted:
pass
def product(*args, **kwds):
pools = list(map(tuple, args)) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x+[y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
def permutations(iterable, r=None):
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
for indices in product(list(range(n)), repeat=r):
if len(set(indices)) == r:
yield tuple(pool[i] for i in indices)
def combinations(iterable, r):
pool = tuple(iterable)
n = len(pool)
for indices in permutations(list(range(n)), r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
def combinations_with_replacement(iterable, r):
pool = tuple(iterable)
n = len(pool)
for indices in product(list(range(n)), repeat=r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
def repeat(object, times=None):
if times is None:
while True:
yield object
else:
for i in range(times):
yield object
def starmap(function, iterable):
for args in iterable:
yield function(*args)
def takewhile(predicate, iterable):
for x in iterable:
if predicate(x):
yield x
else:
break
def tee(iterable, n=2):
it = iter(iterable)
deques = [_collections.deque() for i in range(n)]
def gen(mydeque):
while True:
if not mydeque:
newval = next(it)
for d in deques:
d.append(newval)
yield mydeque.popleft()
return tuple(gen(d) for d in deques)
| true | true |
f7159091f18210b97ef9f6170f617a8643d4d010 | 1,414 | py | Python | hbi/server/tornado_server.py | Glutexo/host-inventory | 558b77eff633e5ec7cdb45393e767e4a05bca470 | [
"Apache-2.0"
] | 1 | 2018-09-17T13:57:55.000Z | 2018-09-17T13:57:55.000Z | hbi/server/tornado_server.py | Glutexo/host-inventory | 558b77eff633e5ec7cdb45393e767e4a05bca470 | [
"Apache-2.0"
] | 3 | 2018-10-02T10:05:12.000Z | 2018-10-10T09:33:47.000Z | hbi/server/tornado_server.py | Glutexo/host-inventory | 558b77eff633e5ec7cdb45393e767e4a05bca470 | [
"Apache-2.0"
] | 3 | 2018-08-15T16:50:51.000Z | 2018-09-26T08:52:44.000Z | import json, os
from threading import Thread
from tornado.ioloop import IOLoop
import tornado.web
from hbi.model import Host, Filter
from hbi.server import Service
class RootHandler(tornado.web.RequestHandler):
def get(self):
self.write("boop")
class EntitiesPoster(tornado.web.RequestHandler):
def post(self):
hosts_json = json.loads(self.request.body)
hosts = (Host.from_json(h) for h in hosts_json)
ret = self.application.service.create_or_update(hosts)
self.write(json.dumps([h.to_json() for h in ret]))
class EntitiesSearcher(tornado.web.RequestHandler):
def post(self):
filters_json = json.loads(self.request.body) if self.request.body else None
filters = [Filter.from_json(h) for h in filters_json] if filters_json else None
ret = self.application.service.get(filters)
self.write(json.dumps([h.to_json() for h in ret]))
def serve_tornado():
app = tornado.web.Application([
(r"/", RootHandler),
(r"/entities/search", EntitiesSearcher),
(r"/entities", EntitiesPoster),
])
app.listen(int(os.environ.get("PORT", "50051")))
app.service = Service()
loop = IOLoop.current()
class TornadoRunThread(Thread):
def run(self):
loop.start()
TornadoRunThread().start()
return app, loop
if __name__ == "__main__":
app, loop = serve_tornado()
| 25.709091 | 87 | 0.66761 | import json, os
from threading import Thread
from tornado.ioloop import IOLoop
import tornado.web
from hbi.model import Host, Filter
from hbi.server import Service
class RootHandler(tornado.web.RequestHandler):
def get(self):
self.write("boop")
class EntitiesPoster(tornado.web.RequestHandler):
def post(self):
hosts_json = json.loads(self.request.body)
hosts = (Host.from_json(h) for h in hosts_json)
ret = self.application.service.create_or_update(hosts)
self.write(json.dumps([h.to_json() for h in ret]))
class EntitiesSearcher(tornado.web.RequestHandler):
def post(self):
filters_json = json.loads(self.request.body) if self.request.body else None
filters = [Filter.from_json(h) for h in filters_json] if filters_json else None
ret = self.application.service.get(filters)
self.write(json.dumps([h.to_json() for h in ret]))
def serve_tornado():
app = tornado.web.Application([
(r"/", RootHandler),
(r"/entities/search", EntitiesSearcher),
(r"/entities", EntitiesPoster),
])
app.listen(int(os.environ.get("PORT", "50051")))
app.service = Service()
loop = IOLoop.current()
class TornadoRunThread(Thread):
def run(self):
loop.start()
TornadoRunThread().start()
return app, loop
if __name__ == "__main__":
app, loop = serve_tornado()
| true | true |
f71590e5707ba2a3e6cb07b4a5957c674ad9a1d3 | 4,112 | py | Python | members/management/commands/sent-invite.py | leonrenkema/makerspaceleiden-crm | 36ea20f5b9e263e8f30b1831ae4a2b1d5b926d3c | [
"Apache-2.0"
] | 5 | 2019-03-12T21:38:32.000Z | 2021-11-06T15:26:56.000Z | members/management/commands/sent-invite.py | leonrenkema/makerspaceleiden-crm | 36ea20f5b9e263e8f30b1831ae4a2b1d5b926d3c | [
"Apache-2.0"
] | 33 | 2019-01-21T15:54:50.000Z | 2021-05-18T17:54:52.000Z | members/management/commands/sent-invite.py | leonrenkema/makerspaceleiden-crm | 36ea20f5b9e263e8f30b1831ae4a2b1d5b926d3c | [
"Apache-2.0"
] | 5 | 2019-01-21T15:47:26.000Z | 2021-09-22T07:14:34.000Z | from django.core.management.base import BaseCommand, CommandError
from simple_history.models import HistoricalRecords
from members.models import User
from members.models import User
from django.contrib.auth.forms import PasswordResetForm
from django.conf import settings
from django.core.mail import EmailMessage
import sys, os
from datetime import datetime
"""
Sent invites; to just one user, or all users
in the system,
"""
def reset_password(
email,
reset=False,
from_email=settings.DEFAULT_FROM_EMAIL,
template="members/email_invite.txt",
subject_template="members/email_invite_subject.txt",
):
try:
user = User.objects.get(email=email)
except Exception as e:
print("No user with email address <{}> found.".format(email), file=sys.stderr)
return False
if reset:
# Set it to an unguessable one - as unusable blocks email sending.
# user.set_unusable_password()
user.set_password(User.objects.make_random_password())
user.changeReason = "Locked it from the sent-invite command."
user.save()
form = PasswordResetForm({"email": email})
if not form.is_valid():
raise Exception("Eh - internal issues")
try:
form.save(
from_email=from_email,
email_template_name=template,
subject_template_name=subject_template,
)
print("{} - Email sent.".format(email))
except Exception as e:
print("Sending to <{}> failed: {}".format(email, e), file=sys.stderr)
return False
return True
class Command(BaseCommand):
help = "Sent invite to email adddress(es) provided - or read them from stdin."
def add_arguments(self, parser):
parser.add_argument("email", nargs="*", type=str)
parser.add_argument(
"--all",
action="store_true",
dest="all",
help="Sent a poll to -everyone-. Ignores anything specified on stdin/arguments",
)
parser.add_argument(
"--reset",
action="store_true",
dest="reset",
help="Also reset/block the current account. So any (old) password will not work any longer.",
)
parser.add_argument(
"--save",
dest="save",
type=str,
help="Save the message as rfc822 blobs rather than sending. Useful as we sort out dkim on the server. Pass the output directory as an argument",
)
parser.add_argument(
"--nevers",
dest="nevers",
action="store_true",
help="Skip people that have logged in at least once. Only valid in conjunction wit the --all options.",
)
def handle(self, *args, **options):
rc = 0
if options["save"]:
settings.EMAIL_BACKEND = "django.core.mail.backends.filebased.EmailBackend"
settings.EMAIL_FILE_PATH = options["save"]
if options["all"]:
if options["email"]:
print(
"The option --all cannot be used with additional emails specified as arguments.",
file=sys.stderr,
)
rc = 1
else:
for user in User.objects.all():
if options["nevers"] and user.last_login:
print(
"Skipping - login {} seen {}".format(
user.name, user.last_login.strftime("%Y-%m-%d %H:%M:%S")
)
)
continue
rc |= not reset_password(user.email, options["reset"])
elif options["email"]:
for email in options["email"]:
rc |= not reset_password(email, options["reset"])
else:
for email in sys.stdin:
rc |= not reset_password(email, options["reset"])
# if options['save']:
# for f in os.listdir(options['save']):
# print(f)
sys.exit(rc)
| 33.16129 | 156 | 0.56785 | from django.core.management.base import BaseCommand, CommandError
from simple_history.models import HistoricalRecords
from members.models import User
from members.models import User
from django.contrib.auth.forms import PasswordResetForm
from django.conf import settings
from django.core.mail import EmailMessage
import sys, os
from datetime import datetime
def reset_password(
email,
reset=False,
from_email=settings.DEFAULT_FROM_EMAIL,
template="members/email_invite.txt",
subject_template="members/email_invite_subject.txt",
):
try:
user = User.objects.get(email=email)
except Exception as e:
print("No user with email address <{}> found.".format(email), file=sys.stderr)
return False
if reset:
user.set_password(User.objects.make_random_password())
user.changeReason = "Locked it from the sent-invite command."
user.save()
form = PasswordResetForm({"email": email})
if not form.is_valid():
raise Exception("Eh - internal issues")
try:
form.save(
from_email=from_email,
email_template_name=template,
subject_template_name=subject_template,
)
print("{} - Email sent.".format(email))
except Exception as e:
print("Sending to <{}> failed: {}".format(email, e), file=sys.stderr)
return False
return True
class Command(BaseCommand):
help = "Sent invite to email adddress(es) provided - or read them from stdin."
def add_arguments(self, parser):
parser.add_argument("email", nargs="*", type=str)
parser.add_argument(
"--all",
action="store_true",
dest="all",
help="Sent a poll to -everyone-. Ignores anything specified on stdin/arguments",
)
parser.add_argument(
"--reset",
action="store_true",
dest="reset",
help="Also reset/block the current account. So any (old) password will not work any longer.",
)
parser.add_argument(
"--save",
dest="save",
type=str,
help="Save the message as rfc822 blobs rather than sending. Useful as we sort out dkim on the server. Pass the output directory as an argument",
)
parser.add_argument(
"--nevers",
dest="nevers",
action="store_true",
help="Skip people that have logged in at least once. Only valid in conjunction wit the --all options.",
)
def handle(self, *args, **options):
rc = 0
if options["save"]:
settings.EMAIL_BACKEND = "django.core.mail.backends.filebased.EmailBackend"
settings.EMAIL_FILE_PATH = options["save"]
if options["all"]:
if options["email"]:
print(
"The option --all cannot be used with additional emails specified as arguments.",
file=sys.stderr,
)
rc = 1
else:
for user in User.objects.all():
if options["nevers"] and user.last_login:
print(
"Skipping - login {} seen {}".format(
user.name, user.last_login.strftime("%Y-%m-%d %H:%M:%S")
)
)
continue
rc |= not reset_password(user.email, options["reset"])
elif options["email"]:
for email in options["email"]:
rc |= not reset_password(email, options["reset"])
else:
for email in sys.stdin:
rc |= not reset_password(email, options["reset"])
sys.exit(rc)
| true | true |
f7159115d342958270b72c812e03dd46e1a80fe8 | 23,723 | py | Python | src/experiment_collection_core/service_pb2.py | AsciiShell/experiment_collection | 86397cae1427c49f30e8af2d6dfb7a15c3f3494d | [
"MIT"
] | 2 | 2020-09-30T21:42:35.000Z | 2020-11-21T17:58:40.000Z | src/experiment_collection_core/service_pb2.py | AsciiShell/experiment_collection | 86397cae1427c49f30e8af2d6dfb7a15c3f3494d | [
"MIT"
] | null | null | null | src/experiment_collection_core/service_pb2.py | AsciiShell/experiment_collection | 86397cae1427c49f30e8af2d6dfb7a15c3f3494d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: experiment_collection_core/service.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='experiment_collection_core/service.proto',
package='',
syntax='proto3',
serialized_options=b'\n\032experiment_collection_core',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n(experiment_collection_core/service.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"e\n\nExperiment\x12\x0c\n\x04name\x18\x01 \x01(\t\x12(\n\x04time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0e\n\x06params\x18\x03 \x01(\t\x12\x0f\n\x07metrics\x18\x04 \x01(\t\"H\n\x10SimpleExperiment\x12\r\n\x05token\x18\x01 \x01(\t\x12\x11\n\tnamespace\x18\x02 \x01(\t\x12\x12\n\nexperiment\x18\x03 \x01(\t\"3\n\x0fSimpleNamespace\x12\r\n\x05token\x18\x01 \x01(\t\x12\x11\n\tnamespace\x18\x02 \x01(\t\"\x1c\n\x0bSimpleToken\x12\r\n\x05token\x18\x01 \x01(\t\",\n\x0bSimpleReply\x12\x0e\n\x06status\x18\x01 \x01(\x08\x12\r\n\x05\x65rror\x18\x02 \x01(\t\"R\n\rAddExperiment\x12\r\n\x05token\x18\x01 \x01(\t\x12\x11\n\tnamespace\x18\x02 \x01(\t\x12\x1f\n\nexperiment\x18\x03 \x01(\x0b\x32\x0b.Experiment\"S\n\x10\x45xperimentsReply\x12\x0e\n\x06status\x18\x01 \x01(\x08\x12\r\n\x05\x65rror\x18\x02 \x01(\t\x12 \n\x0b\x65xperiments\x18\x03 \x03(\x0b\x32\x0b.Experiment\"K\n\x12GrantAccessRequest\x12\r\n\x05token\x18\x01 \x01(\t\x12\x11\n\tnamespace\x18\x02 \x01(\t\x12\x13\n\x0bother_token\x18\x03 \x01(\t\"b\n\x18ReserveExperimentRequest\x12\r\n\x05token\x18\x01 \x01(\t\x12\x11\n\tnamespace\x18\x02 \x01(\t\x12\x12\n\nexperiment\x18\x03 \x01(\t\x12\x10\n\x08\x64uration\x18\x04 \x01(\r2\xc3\x03\n\x11\x45xperimentService\x12\x32\n\x10\x43reateExperiment\x12\x0e.AddExperiment\x1a\x0c.SimpleReply\"\x00\x12>\n\x11ReserveExperiment\x12\x19.ReserveExperimentRequest\x1a\x0c.SimpleReply\"\x00\x12\x35\n\x10\x44\x65leteExperiment\x12\x11.SimpleExperiment\x1a\x0c.SimpleReply\"\x00\x12\x34\n\x0f\x43heckExperiment\x12\x11.SimpleExperiment\x1a\x0c.SimpleReply\"\x00\x12\x37\n\x0eGetExperiments\x12\x10.SimpleNamespace\x1a\x11.ExperimentsReply\"\x00\x12\x33\n\x0f\x43reateNamespace\x12\x10.SimpleNamespace\x1a\x0c.SimpleReply\"\x00\x12+\n\x0bRevokeToken\x12\x0c.SimpleToken\x1a\x0c.SimpleReply\"\x00\x12\x32\n\x0bGrantAccess\x12\x13.GrantAccessRequest\x1a\x0c.SimpleReply\"\x00\x42\x1c\n\x1a\x65xperiment_collection_coreb\x06proto3'
,
dependencies=[google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,])
_EXPERIMENT = _descriptor.Descriptor(
name='Experiment',
full_name='Experiment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='Experiment.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='time', full_name='Experiment.time', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='params', full_name='Experiment.params', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='metrics', full_name='Experiment.metrics', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=77,
serialized_end=178,
)
_SIMPLEEXPERIMENT = _descriptor.Descriptor(
name='SimpleExperiment',
full_name='SimpleExperiment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='token', full_name='SimpleExperiment.token', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='namespace', full_name='SimpleExperiment.namespace', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='experiment', full_name='SimpleExperiment.experiment', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=180,
serialized_end=252,
)
_SIMPLENAMESPACE = _descriptor.Descriptor(
name='SimpleNamespace',
full_name='SimpleNamespace',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='token', full_name='SimpleNamespace.token', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='namespace', full_name='SimpleNamespace.namespace', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=254,
serialized_end=305,
)
_SIMPLETOKEN = _descriptor.Descriptor(
name='SimpleToken',
full_name='SimpleToken',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='token', full_name='SimpleToken.token', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=307,
serialized_end=335,
)
_SIMPLEREPLY = _descriptor.Descriptor(
name='SimpleReply',
full_name='SimpleReply',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='SimpleReply.status', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='error', full_name='SimpleReply.error', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=337,
serialized_end=381,
)
_ADDEXPERIMENT = _descriptor.Descriptor(
name='AddExperiment',
full_name='AddExperiment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='token', full_name='AddExperiment.token', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='namespace', full_name='AddExperiment.namespace', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='experiment', full_name='AddExperiment.experiment', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=383,
serialized_end=465,
)
_EXPERIMENTSREPLY = _descriptor.Descriptor(
name='ExperimentsReply',
full_name='ExperimentsReply',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='ExperimentsReply.status', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='error', full_name='ExperimentsReply.error', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='experiments', full_name='ExperimentsReply.experiments', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=467,
serialized_end=550,
)
_GRANTACCESSREQUEST = _descriptor.Descriptor(
name='GrantAccessRequest',
full_name='GrantAccessRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='token', full_name='GrantAccessRequest.token', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='namespace', full_name='GrantAccessRequest.namespace', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='other_token', full_name='GrantAccessRequest.other_token', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=552,
serialized_end=627,
)
_RESERVEEXPERIMENTREQUEST = _descriptor.Descriptor(
name='ReserveExperimentRequest',
full_name='ReserveExperimentRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='token', full_name='ReserveExperimentRequest.token', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='namespace', full_name='ReserveExperimentRequest.namespace', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='experiment', full_name='ReserveExperimentRequest.experiment', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='duration', full_name='ReserveExperimentRequest.duration', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=629,
serialized_end=727,
)
_EXPERIMENT.fields_by_name['time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_ADDEXPERIMENT.fields_by_name['experiment'].message_type = _EXPERIMENT
_EXPERIMENTSREPLY.fields_by_name['experiments'].message_type = _EXPERIMENT
DESCRIPTOR.message_types_by_name['Experiment'] = _EXPERIMENT
DESCRIPTOR.message_types_by_name['SimpleExperiment'] = _SIMPLEEXPERIMENT
DESCRIPTOR.message_types_by_name['SimpleNamespace'] = _SIMPLENAMESPACE
DESCRIPTOR.message_types_by_name['SimpleToken'] = _SIMPLETOKEN
DESCRIPTOR.message_types_by_name['SimpleReply'] = _SIMPLEREPLY
DESCRIPTOR.message_types_by_name['AddExperiment'] = _ADDEXPERIMENT
DESCRIPTOR.message_types_by_name['ExperimentsReply'] = _EXPERIMENTSREPLY
DESCRIPTOR.message_types_by_name['GrantAccessRequest'] = _GRANTACCESSREQUEST
DESCRIPTOR.message_types_by_name['ReserveExperimentRequest'] = _RESERVEEXPERIMENTREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Experiment = _reflection.GeneratedProtocolMessageType('Experiment', (_message.Message,), {
'DESCRIPTOR' : _EXPERIMENT,
'__module__' : 'experiment_collection_core.service_pb2'
# @@protoc_insertion_point(class_scope:Experiment)
})
_sym_db.RegisterMessage(Experiment)
SimpleExperiment = _reflection.GeneratedProtocolMessageType('SimpleExperiment', (_message.Message,), {
'DESCRIPTOR' : _SIMPLEEXPERIMENT,
'__module__' : 'experiment_collection_core.service_pb2'
# @@protoc_insertion_point(class_scope:SimpleExperiment)
})
_sym_db.RegisterMessage(SimpleExperiment)
SimpleNamespace = _reflection.GeneratedProtocolMessageType('SimpleNamespace', (_message.Message,), {
'DESCRIPTOR' : _SIMPLENAMESPACE,
'__module__' : 'experiment_collection_core.service_pb2'
# @@protoc_insertion_point(class_scope:SimpleNamespace)
})
_sym_db.RegisterMessage(SimpleNamespace)
SimpleToken = _reflection.GeneratedProtocolMessageType('SimpleToken', (_message.Message,), {
'DESCRIPTOR' : _SIMPLETOKEN,
'__module__' : 'experiment_collection_core.service_pb2'
# @@protoc_insertion_point(class_scope:SimpleToken)
})
_sym_db.RegisterMessage(SimpleToken)
SimpleReply = _reflection.GeneratedProtocolMessageType('SimpleReply', (_message.Message,), {
'DESCRIPTOR' : _SIMPLEREPLY,
'__module__' : 'experiment_collection_core.service_pb2'
# @@protoc_insertion_point(class_scope:SimpleReply)
})
_sym_db.RegisterMessage(SimpleReply)
AddExperiment = _reflection.GeneratedProtocolMessageType('AddExperiment', (_message.Message,), {
'DESCRIPTOR' : _ADDEXPERIMENT,
'__module__' : 'experiment_collection_core.service_pb2'
# @@protoc_insertion_point(class_scope:AddExperiment)
})
_sym_db.RegisterMessage(AddExperiment)
ExperimentsReply = _reflection.GeneratedProtocolMessageType('ExperimentsReply', (_message.Message,), {
'DESCRIPTOR' : _EXPERIMENTSREPLY,
'__module__' : 'experiment_collection_core.service_pb2'
# @@protoc_insertion_point(class_scope:ExperimentsReply)
})
_sym_db.RegisterMessage(ExperimentsReply)
GrantAccessRequest = _reflection.GeneratedProtocolMessageType('GrantAccessRequest', (_message.Message,), {
'DESCRIPTOR' : _GRANTACCESSREQUEST,
'__module__' : 'experiment_collection_core.service_pb2'
# @@protoc_insertion_point(class_scope:GrantAccessRequest)
})
_sym_db.RegisterMessage(GrantAccessRequest)
ReserveExperimentRequest = _reflection.GeneratedProtocolMessageType('ReserveExperimentRequest', (_message.Message,), {
'DESCRIPTOR' : _RESERVEEXPERIMENTREQUEST,
'__module__' : 'experiment_collection_core.service_pb2'
# @@protoc_insertion_point(class_scope:ReserveExperimentRequest)
})
_sym_db.RegisterMessage(ReserveExperimentRequest)
DESCRIPTOR._options = None
_EXPERIMENTSERVICE = _descriptor.ServiceDescriptor(
name='ExperimentService',
full_name='ExperimentService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=730,
serialized_end=1181,
methods=[
_descriptor.MethodDescriptor(
name='CreateExperiment',
full_name='ExperimentService.CreateExperiment',
index=0,
containing_service=None,
input_type=_ADDEXPERIMENT,
output_type=_SIMPLEREPLY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='ReserveExperiment',
full_name='ExperimentService.ReserveExperiment',
index=1,
containing_service=None,
input_type=_RESERVEEXPERIMENTREQUEST,
output_type=_SIMPLEREPLY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='DeleteExperiment',
full_name='ExperimentService.DeleteExperiment',
index=2,
containing_service=None,
input_type=_SIMPLEEXPERIMENT,
output_type=_SIMPLEREPLY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='CheckExperiment',
full_name='ExperimentService.CheckExperiment',
index=3,
containing_service=None,
input_type=_SIMPLEEXPERIMENT,
output_type=_SIMPLEREPLY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetExperiments',
full_name='ExperimentService.GetExperiments',
index=4,
containing_service=None,
input_type=_SIMPLENAMESPACE,
output_type=_EXPERIMENTSREPLY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='CreateNamespace',
full_name='ExperimentService.CreateNamespace',
index=5,
containing_service=None,
input_type=_SIMPLENAMESPACE,
output_type=_SIMPLEREPLY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='RevokeToken',
full_name='ExperimentService.RevokeToken',
index=6,
containing_service=None,
input_type=_SIMPLETOKEN,
output_type=_SIMPLEREPLY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GrantAccess',
full_name='ExperimentService.GrantAccess',
index=7,
containing_service=None,
input_type=_GRANTACCESSREQUEST,
output_type=_SIMPLEREPLY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_EXPERIMENTSERVICE)
DESCRIPTOR.services_by_name['ExperimentService'] = _EXPERIMENTSERVICE
# @@protoc_insertion_point(module_scope)
| 39.21157 | 2,040 | 0.75842 |
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='experiment_collection_core/service.proto',
package='',
syntax='proto3',
serialized_options=b'\n\032experiment_collection_core',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n(experiment_collection_core/service.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"e\n\nExperiment\x12\x0c\n\x04name\x18\x01 \x01(\t\x12(\n\x04time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0e\n\x06params\x18\x03 \x01(\t\x12\x0f\n\x07metrics\x18\x04 \x01(\t\"H\n\x10SimpleExperiment\x12\r\n\x05token\x18\x01 \x01(\t\x12\x11\n\tnamespace\x18\x02 \x01(\t\x12\x12\n\nexperiment\x18\x03 \x01(\t\"3\n\x0fSimpleNamespace\x12\r\n\x05token\x18\x01 \x01(\t\x12\x11\n\tnamespace\x18\x02 \x01(\t\"\x1c\n\x0bSimpleToken\x12\r\n\x05token\x18\x01 \x01(\t\",\n\x0bSimpleReply\x12\x0e\n\x06status\x18\x01 \x01(\x08\x12\r\n\x05\x65rror\x18\x02 \x01(\t\"R\n\rAddExperiment\x12\r\n\x05token\x18\x01 \x01(\t\x12\x11\n\tnamespace\x18\x02 \x01(\t\x12\x1f\n\nexperiment\x18\x03 \x01(\x0b\x32\x0b.Experiment\"S\n\x10\x45xperimentsReply\x12\x0e\n\x06status\x18\x01 \x01(\x08\x12\r\n\x05\x65rror\x18\x02 \x01(\t\x12 \n\x0b\x65xperiments\x18\x03 \x03(\x0b\x32\x0b.Experiment\"K\n\x12GrantAccessRequest\x12\r\n\x05token\x18\x01 \x01(\t\x12\x11\n\tnamespace\x18\x02 \x01(\t\x12\x13\n\x0bother_token\x18\x03 \x01(\t\"b\n\x18ReserveExperimentRequest\x12\r\n\x05token\x18\x01 \x01(\t\x12\x11\n\tnamespace\x18\x02 \x01(\t\x12\x12\n\nexperiment\x18\x03 \x01(\t\x12\x10\n\x08\x64uration\x18\x04 \x01(\r2\xc3\x03\n\x11\x45xperimentService\x12\x32\n\x10\x43reateExperiment\x12\x0e.AddExperiment\x1a\x0c.SimpleReply\"\x00\x12>\n\x11ReserveExperiment\x12\x19.ReserveExperimentRequest\x1a\x0c.SimpleReply\"\x00\x12\x35\n\x10\x44\x65leteExperiment\x12\x11.SimpleExperiment\x1a\x0c.SimpleReply\"\x00\x12\x34\n\x0f\x43heckExperiment\x12\x11.SimpleExperiment\x1a\x0c.SimpleReply\"\x00\x12\x37\n\x0eGetExperiments\x12\x10.SimpleNamespace\x1a\x11.ExperimentsReply\"\x00\x12\x33\n\x0f\x43reateNamespace\x12\x10.SimpleNamespace\x1a\x0c.SimpleReply\"\x00\x12+\n\x0bRevokeToken\x12\x0c.SimpleToken\x1a\x0c.SimpleReply\"\x00\x12\x32\n\x0bGrantAccess\x12\x13.GrantAccessRequest\x1a\x0c.SimpleReply\"\x00\x42\x1c\n\x1a\x65xperiment_collection_coreb\x06proto3'
,
dependencies=[google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,])
_EXPERIMENT = _descriptor.Descriptor(
name='Experiment',
full_name='Experiment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='Experiment.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='time', full_name='Experiment.time', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='params', full_name='Experiment.params', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='metrics', full_name='Experiment.metrics', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=77,
serialized_end=178,
)
_SIMPLEEXPERIMENT = _descriptor.Descriptor(
name='SimpleExperiment',
full_name='SimpleExperiment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='token', full_name='SimpleExperiment.token', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='namespace', full_name='SimpleExperiment.namespace', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='experiment', full_name='SimpleExperiment.experiment', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=180,
serialized_end=252,
)
_SIMPLENAMESPACE = _descriptor.Descriptor(
name='SimpleNamespace',
full_name='SimpleNamespace',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='token', full_name='SimpleNamespace.token', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='namespace', full_name='SimpleNamespace.namespace', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=254,
serialized_end=305,
)
_SIMPLETOKEN = _descriptor.Descriptor(
name='SimpleToken',
full_name='SimpleToken',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='token', full_name='SimpleToken.token', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=307,
serialized_end=335,
)
_SIMPLEREPLY = _descriptor.Descriptor(
name='SimpleReply',
full_name='SimpleReply',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='SimpleReply.status', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='error', full_name='SimpleReply.error', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=337,
serialized_end=381,
)
_ADDEXPERIMENT = _descriptor.Descriptor(
name='AddExperiment',
full_name='AddExperiment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='token', full_name='AddExperiment.token', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='namespace', full_name='AddExperiment.namespace', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='experiment', full_name='AddExperiment.experiment', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=383,
serialized_end=465,
)
_EXPERIMENTSREPLY = _descriptor.Descriptor(
name='ExperimentsReply',
full_name='ExperimentsReply',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='ExperimentsReply.status', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='error', full_name='ExperimentsReply.error', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='experiments', full_name='ExperimentsReply.experiments', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=467,
serialized_end=550,
)
_GRANTACCESSREQUEST = _descriptor.Descriptor(
name='GrantAccessRequest',
full_name='GrantAccessRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='token', full_name='GrantAccessRequest.token', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='namespace', full_name='GrantAccessRequest.namespace', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='other_token', full_name='GrantAccessRequest.other_token', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=552,
serialized_end=627,
)
_RESERVEEXPERIMENTREQUEST = _descriptor.Descriptor(
name='ReserveExperimentRequest',
full_name='ReserveExperimentRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='token', full_name='ReserveExperimentRequest.token', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='namespace', full_name='ReserveExperimentRequest.namespace', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='experiment', full_name='ReserveExperimentRequest.experiment', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='duration', full_name='ReserveExperimentRequest.duration', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=629,
serialized_end=727,
)
_EXPERIMENT.fields_by_name['time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_ADDEXPERIMENT.fields_by_name['experiment'].message_type = _EXPERIMENT
_EXPERIMENTSREPLY.fields_by_name['experiments'].message_type = _EXPERIMENT
DESCRIPTOR.message_types_by_name['Experiment'] = _EXPERIMENT
DESCRIPTOR.message_types_by_name['SimpleExperiment'] = _SIMPLEEXPERIMENT
DESCRIPTOR.message_types_by_name['SimpleNamespace'] = _SIMPLENAMESPACE
DESCRIPTOR.message_types_by_name['SimpleToken'] = _SIMPLETOKEN
DESCRIPTOR.message_types_by_name['SimpleReply'] = _SIMPLEREPLY
DESCRIPTOR.message_types_by_name['AddExperiment'] = _ADDEXPERIMENT
DESCRIPTOR.message_types_by_name['ExperimentsReply'] = _EXPERIMENTSREPLY
DESCRIPTOR.message_types_by_name['GrantAccessRequest'] = _GRANTACCESSREQUEST
DESCRIPTOR.message_types_by_name['ReserveExperimentRequest'] = _RESERVEEXPERIMENTREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Experiment = _reflection.GeneratedProtocolMessageType('Experiment', (_message.Message,), {
'DESCRIPTOR' : _EXPERIMENT,
'__module__' : 'experiment_collection_core.service_pb2'
# @@protoc_insertion_point(class_scope:Experiment)
})
_sym_db.RegisterMessage(Experiment)
SimpleExperiment = _reflection.GeneratedProtocolMessageType('SimpleExperiment', (_message.Message,), {
'DESCRIPTOR' : _SIMPLEEXPERIMENT,
'__module__' : 'experiment_collection_core.service_pb2'
# @@protoc_insertion_point(class_scope:SimpleExperiment)
})
_sym_db.RegisterMessage(SimpleExperiment)
SimpleNamespace = _reflection.GeneratedProtocolMessageType('SimpleNamespace', (_message.Message,), {
'DESCRIPTOR' : _SIMPLENAMESPACE,
'__module__' : 'experiment_collection_core.service_pb2'
# @@protoc_insertion_point(class_scope:SimpleNamespace)
})
_sym_db.RegisterMessage(SimpleNamespace)
SimpleToken = _reflection.GeneratedProtocolMessageType('SimpleToken', (_message.Message,), {
'DESCRIPTOR' : _SIMPLETOKEN,
'__module__' : 'experiment_collection_core.service_pb2'
# @@protoc_insertion_point(class_scope:SimpleToken)
})
_sym_db.RegisterMessage(SimpleToken)
SimpleReply = _reflection.GeneratedProtocolMessageType('SimpleReply', (_message.Message,), {
'DESCRIPTOR' : _SIMPLEREPLY,
'__module__' : 'experiment_collection_core.service_pb2'
# @@protoc_insertion_point(class_scope:SimpleReply)
})
_sym_db.RegisterMessage(SimpleReply)
AddExperiment = _reflection.GeneratedProtocolMessageType('AddExperiment', (_message.Message,), {
'DESCRIPTOR' : _ADDEXPERIMENT,
'__module__' : 'experiment_collection_core.service_pb2'
# @@protoc_insertion_point(class_scope:AddExperiment)
})
_sym_db.RegisterMessage(AddExperiment)
ExperimentsReply = _reflection.GeneratedProtocolMessageType('ExperimentsReply', (_message.Message,), {
'DESCRIPTOR' : _EXPERIMENTSREPLY,
'__module__' : 'experiment_collection_core.service_pb2'
# @@protoc_insertion_point(class_scope:ExperimentsReply)
})
_sym_db.RegisterMessage(ExperimentsReply)
GrantAccessRequest = _reflection.GeneratedProtocolMessageType('GrantAccessRequest', (_message.Message,), {
'DESCRIPTOR' : _GRANTACCESSREQUEST,
'__module__' : 'experiment_collection_core.service_pb2'
# @@protoc_insertion_point(class_scope:GrantAccessRequest)
})
_sym_db.RegisterMessage(GrantAccessRequest)
ReserveExperimentRequest = _reflection.GeneratedProtocolMessageType('ReserveExperimentRequest', (_message.Message,), {
'DESCRIPTOR' : _RESERVEEXPERIMENTREQUEST,
'__module__' : 'experiment_collection_core.service_pb2'
# @@protoc_insertion_point(class_scope:ReserveExperimentRequest)
})
_sym_db.RegisterMessage(ReserveExperimentRequest)
DESCRIPTOR._options = None
_EXPERIMENTSERVICE = _descriptor.ServiceDescriptor(
name='ExperimentService',
full_name='ExperimentService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=730,
serialized_end=1181,
methods=[
_descriptor.MethodDescriptor(
name='CreateExperiment',
full_name='ExperimentService.CreateExperiment',
index=0,
containing_service=None,
input_type=_ADDEXPERIMENT,
output_type=_SIMPLEREPLY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='ReserveExperiment',
full_name='ExperimentService.ReserveExperiment',
index=1,
containing_service=None,
input_type=_RESERVEEXPERIMENTREQUEST,
output_type=_SIMPLEREPLY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='DeleteExperiment',
full_name='ExperimentService.DeleteExperiment',
index=2,
containing_service=None,
input_type=_SIMPLEEXPERIMENT,
output_type=_SIMPLEREPLY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='CheckExperiment',
full_name='ExperimentService.CheckExperiment',
index=3,
containing_service=None,
input_type=_SIMPLEEXPERIMENT,
output_type=_SIMPLEREPLY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetExperiments',
full_name='ExperimentService.GetExperiments',
index=4,
containing_service=None,
input_type=_SIMPLENAMESPACE,
output_type=_EXPERIMENTSREPLY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='CreateNamespace',
full_name='ExperimentService.CreateNamespace',
index=5,
containing_service=None,
input_type=_SIMPLENAMESPACE,
output_type=_SIMPLEREPLY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='RevokeToken',
full_name='ExperimentService.RevokeToken',
index=6,
containing_service=None,
input_type=_SIMPLETOKEN,
output_type=_SIMPLEREPLY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GrantAccess',
full_name='ExperimentService.GrantAccess',
index=7,
containing_service=None,
input_type=_GRANTACCESSREQUEST,
output_type=_SIMPLEREPLY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_EXPERIMENTSERVICE)
DESCRIPTOR.services_by_name['ExperimentService'] = _EXPERIMENTSERVICE
# @@protoc_insertion_point(module_scope)
| true | true |
f715925d591bd9957fdc6799ded885a4c997bb33 | 6,877 | py | Python | p7/MIPSMicroSystem/my_files/test/auto_test.py | t0ush1/ComputerOrganization | 8093949bbd3e48678cea832133e9bf8990bbdf27 | [
"MIT"
] | 2 | 2022-03-06T06:05:24.000Z | 2022-03-10T09:08:08.000Z | p7/MIPSMicroSystem/my_files/test/auto_test.py | t0ush1/ComputerOrganization | 8093949bbd3e48678cea832133e9bf8990bbdf27 | [
"MIT"
] | null | null | null | p7/MIPSMicroSystem/my_files/test/auto_test.py | t0ush1/ComputerOrganization | 8093949bbd3e48678cea832133e9bf8990bbdf27 | [
"MIT"
] | null | null | null | #############################################################
# win10 64bit
# python 3.9.6
#
# author: toush1 (20373944 he tianran)
#############################################################
import os
import re
# software path
xilinxPath = "G:\\ISE\\ise\\14.7\\ISE_DS\\ISE\\"
marsPath = "G:\\mars\\Mars_test.jar"
# prj path and test mode
myPrjPath = "D:\\study\\CO\\p7\\MIPSMicroSystem\\"
otherPrjPath = "D:\\study\\CO\\p7\\szxCPU\\"
start = 0
tot = 1
interrupt = 0x301c # if 0 not interrupt; if -1 interrupt all; if 0x3000 interrupt at 0x3000
# dump text and handler (and run in Mars)
def runMars(asm, codeFilePath, out):
path = os.path.dirname(codeFilePath) + "\\"
code = path + "code.tmp"
handler = path + "handler.tmp"
os.system("java -jar " + marsPath + " db nc mc CompactDataAtZero a dump .text HexText " + code + " " + asm)
os.system("java -jar " + marsPath + " db nc mc CompactDataAtZero a dump 0x00004180-0x00005180 HexText " + handler + " " + asm)
# os.system("java -jar " + marsPath + " " + asm + " 4096 db nc mc CompactDataAtZero > " + out)
with open(code, "r") as codeSrc, open(handler, "r") as handlerSrc, open(codeFilePath, "w") as codeDst:
codeText = codeSrc.read()
textLen = len(codeText.splitlines())
codeDst.write(codeText)
for i in range(len(codeText.splitlines()), 1120):
codeDst.write("00000000\n")
codeDst.write(handlerSrc.read())
os.remove(code)
os.remove(handler)
return textLen
# gnrt prj and tcl file
def initISE(prj):
verilogPath = prj + "my_files\\cpu\\"
prjFilePath = prj + "mips.prj"
tclFilePath = prj + "mips.tcl"
with open(prjFilePath, "w") as prjFile, open(tclFilePath, "w") as tclFile:
for root, dirs, files in os.walk(verilogPath):
for fileName in files:
if re.match(r"[\w]*\.v", fileName):
prjFile.write("Verilog work " + root + "\\" + fileName + "\n")
tclFile.write("run 200us" + "\n" + "exit")
# change interrupt position in testbench
def changeIntPos(tbPath, intPos):
text = ""
with open(tbPath, "r") as testbench:
text = testbench.read()
if intPos == 0:
text = text.replace("need_interrupt = 1", "need_interrupt = 0")
else:
text = text.replace("need_interrupt = 0", "need_interrupt = 1")
text = re.sub(r"fixed_macroscopic_pc == 32'h[0-9a-f]+",
"fixed_macroscopic_pc == 32'h" + str(hex(intPos)).removeprefix("0x"), text)
with open(tbPath, "w") as testbench:
testbench.write(text)
# compile and run in ISE
def runISE(prj, out):
prjFilePath = prj + "mips.prj"
tclFilePath = prj + "mips.tcl"
exeFilePath = prj + "mips.exe"
logFilePath = prj + "log.txt"
os.chdir(prj)
os.environ['XILINX'] = xilinxPath
os.system(xilinxPath + "bin\\nt64\\fuse -nodebug -prj " + prjFilePath + " -o " + exeFilePath + " mips_tb > " + logFilePath)
os.system(exeFilePath + " -nolog -tclbatch " + tclFilePath + " > " + out)
# cmp myAns and stdAns
def cmp(interrupt, my, std, cmpRes):
with open(my, "r") as myFile, open(std, "r") as stdFile, open(cmpRes, "a") as out:
myLogs = re.findall("\@[^\n]*", myFile.read())
stdLogs = re.findall("\@[^\n]*", stdFile.read())
if interrupt != 0:
out.write("interrupt at " + str(hex(interrupt)) + " : \n")
print("interrupt at " + str(hex(interrupt)) + " : ")
else:
out.write("no interrupt : \n")
print("no interrupt : ")
for i in range(len(stdLogs)):
if i < len(myLogs) and myLogs[i] != stdLogs[i]:
out.write("\tOn Line " + str(i+1) + "\n")
out.write("\tGet\t\t: " + myLogs[i] + "\n")
out.write("\tExpect\t: " + stdLogs[i] + "\n")
print("\tOn Line " + str(i+1))
print("\tGet\t: " + myLogs[i])
print("\tExpect\t: " + stdLogs[i])
return False
elif i >= len(myLogs):
out.write("\tmyLogs is too short\n")
print("\tmyLogs is too short")
return False
if len(myLogs) > len(stdLogs):
out.write("\tmyLogs is too long\n")
print("\tmyLogs is too long")
return False
return True
# main
initISE(myPrjPath)
initISE(otherPrjPath)
testdataPath = myPrjPath + "my_files\\test\\data\\"
cmpResPath = testdataPath + "cmp_res.txt"
myTbPath = myPrjPath + "my_files\\cpu\\mips_tb.v"
otherTbPath = otherPrjPath + "my_files\\cpu\\mips_tb.v"
if os.path.exists(cmpResPath):
os.remove(cmpResPath)
for i in range(start, start + tot):
testpointPath = testdataPath + "testpoint\\testpoint" + str(i) + ".asm"
codePath = testdataPath + "code\\code" + str(i) + ".txt"
stdAnsPath = testdataPath + "std_ans\\std_ans" + str(i) + ".txt"
testAnsPath = testdataPath + "test_ans\\test_ans" + str(i) + ".txt"
textLen = runMars(testpointPath, codePath, stdAnsPath) - 4
with open(codePath, "r") as codeSrc, open(myPrjPath + "code.txt", "w") as codeDst1, open(otherPrjPath + "code.txt", "w") as codeDst2:
code = codeSrc.read()
codeDst1.write(code)
codeDst2.write(code)
with open(cmpResPath, "a") as out:
out.write("\n----------------------------------------------------------------\n")
out.write("\nin testpoint" + str(i) + " : \n\n")
print("\n----------------------------------------------------------------")
print("\nin testpoint" + str(i) + " : \n")
isAC = True
if interrupt == 0:
changeIntPos(myTbPath, 0)
changeIntPos(otherTbPath, 0)
runISE(myPrjPath, testAnsPath)
runISE(otherPrjPath, stdAnsPath)
isAC = cmp(0, testAnsPath, stdAnsPath, cmpResPath)
elif interrupt == -1:
for j in range(1, textLen):
intPos = j * 4 + 0x3000
changeIntPos(myTbPath, intPos)
changeIntPos(otherTbPath, intPos)
runISE(myPrjPath, testAnsPath)
runISE(otherPrjPath, stdAnsPath)
if not cmp(intPos, testAnsPath, stdAnsPath, cmpResPath):
isAC = False
break
else:
changeIntPos(myTbPath, interrupt)
changeIntPos(otherTbPath, interrupt)
runISE(myPrjPath, testAnsPath)
runISE(otherPrjPath, stdAnsPath)
isAC = cmp(interrupt, testAnsPath, stdAnsPath, cmpResPath)
if isAC:
with open(cmpResPath, "a") as out:
out.write("\n\tAll Accepted\n")
print("\n\tAll Accepted")
print("\n----------------------------------------------------------------") | 40.452941 | 138 | 0.54137 | out.write("\tmyLogs is too long\n")
print("\tmyLogs is too long")
return False
return True
initISE(myPrjPath)
initISE(otherPrjPath)
testdataPath = myPrjPath + "my_files\\test\\data\\"
cmpResPath = testdataPath + "cmp_res.txt"
myTbPath = myPrjPath + "my_files\\cpu\\mips_tb.v"
otherTbPath = otherPrjPath + "my_files\\cpu\\mips_tb.v"
if os.path.exists(cmpResPath):
os.remove(cmpResPath)
for i in range(start, start + tot):
testpointPath = testdataPath + "testpoint\\testpoint" + str(i) + ".asm"
codePath = testdataPath + "code\\code" + str(i) + ".txt"
stdAnsPath = testdataPath + "std_ans\\std_ans" + str(i) + ".txt"
testAnsPath = testdataPath + "test_ans\\test_ans" + str(i) + ".txt"
textLen = runMars(testpointPath, codePath, stdAnsPath) - 4
with open(codePath, "r") as codeSrc, open(myPrjPath + "code.txt", "w") as codeDst1, open(otherPrjPath + "code.txt", "w") as codeDst2:
code = codeSrc.read()
codeDst1.write(code)
codeDst2.write(code)
with open(cmpResPath, "a") as out:
out.write("\n----------------------------------------------------------------\n")
out.write("\nin testpoint" + str(i) + " : \n\n")
print("\n----------------------------------------------------------------")
print("\nin testpoint" + str(i) + " : \n")
isAC = True
if interrupt == 0:
changeIntPos(myTbPath, 0)
changeIntPos(otherTbPath, 0)
runISE(myPrjPath, testAnsPath)
runISE(otherPrjPath, stdAnsPath)
isAC = cmp(0, testAnsPath, stdAnsPath, cmpResPath)
elif interrupt == -1:
for j in range(1, textLen):
intPos = j * 4 + 0x3000
changeIntPos(myTbPath, intPos)
changeIntPos(otherTbPath, intPos)
runISE(myPrjPath, testAnsPath)
runISE(otherPrjPath, stdAnsPath)
if not cmp(intPos, testAnsPath, stdAnsPath, cmpResPath):
isAC = False
break
else:
changeIntPos(myTbPath, interrupt)
changeIntPos(otherTbPath, interrupt)
runISE(myPrjPath, testAnsPath)
runISE(otherPrjPath, stdAnsPath)
isAC = cmp(interrupt, testAnsPath, stdAnsPath, cmpResPath)
if isAC:
with open(cmpResPath, "a") as out:
out.write("\n\tAll Accepted\n")
print("\n\tAll Accepted")
print("\n----------------------------------------------------------------") | true | true |
f715928065109e697649bf15722ccc0e6c0edfa4 | 7,114 | py | Python | test/functional/tests/fault_injection/test_cache_insert_error.py | andreatomassetti/open-cas-linux | 6a6a0267d76dca86de8695a959991ecefdc0ddf8 | [
"BSD-3-Clause"
] | null | null | null | test/functional/tests/fault_injection/test_cache_insert_error.py | andreatomassetti/open-cas-linux | 6a6a0267d76dca86de8695a959991ecefdc0ddf8 | [
"BSD-3-Clause"
] | 1 | 2022-03-21T22:05:26.000Z | 2022-03-21T22:05:26.000Z | test/functional/tests/fault_injection/test_cache_insert_error.py | andreatomassetti/open-cas-linux | 6a6a0267d76dca86de8695a959991ecefdc0ddf8 | [
"BSD-3-Clause"
] | null | null | null | #
# Copyright(c) 2019-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import pytest
from api.cas import casadm
from api.cas.cache_config import (
CacheMode,
CacheLineSize,
SeqCutOffPolicy,
CleaningPolicy,
CacheStatus,
)
from core.test_run import TestRun
from storage_devices.disk import DiskTypeSet, DiskType, DiskTypeLowerThan
from test_tools.device_mapper import ErrorDevice, DmTable
from test_tools.fio.fio import Fio
from test_tools.fio.fio_param import ReadWrite, IoEngine, ErrorFilter, VerifyMethod
from test_utils.os_utils import Udev
from test_utils.size import Size, Unit
@pytest.mark.parametrizex("cache_line_size", CacheLineSize)
@pytest.mark.parametrizex("cache_mode", CacheMode)
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_cache_insert_error(cache_mode, cache_line_size):
"""
title: Cache insert test with error device
description: |
Validate CAS ability to handle write errors while it tries to insert
cache lines. For lazy writes cache modes (WO, WB) issue only reads.
pass_criteria:
- No I/O errors returned to the user
- Cache write error statistics are counted properly
- No cache line gets inserted into cache
"""
with TestRun.step("Prepare core and cache"):
cache, core, core_device = prepare_configuration(cache_mode, cache_line_size)
fio_cmd = (
Fio()
.create_command()
.io_engine(IoEngine.libaio)
.size(core.size)
.block_size(cache_line_size)
.target(core)
.direct()
)
if cache_mode in [CacheMode.WB, CacheMode.WO]:
fio_cmd = fio_cmd.read_write(ReadWrite.randread)
else:
fio_cmd = fio_cmd.read_write(ReadWrite.randrw).verify_pattern().verify(VerifyMethod.pattern)
with TestRun.step("Run fio and verify no errors present"):
fio_errors = fio_cmd.run()[0].total_errors()
if fio_errors != 0:
TestRun.fail(f"Some I/O ended with errors {fio_errors}")
with TestRun.step("Check error statistics on cache"):
stats = cache.get_statistics()
occupancy = cache.get_occupancy().get_value()
if occupancy != 0:
TestRun.fail(f"Occupancy is not zero, but {occupancy}")
cache_writes = stats.block_stats.cache.writes / cache_line_size.value
cache_errors = stats.error_stats.cache.total
if cache_writes != cache_errors:
TestRun.fail(
f"Cache errors ({cache_errors}) should equal to number of"
f" requests to cache ({cache_writes})"
)
if cache_mode not in [CacheMode.WB, CacheMode.WO]:
with TestRun.step("Verify core device contents for non-lazy-writes cache modes"):
cache.stop()
fio_cmd.target(core_device).verify_only().run()
@pytest.mark.parametrizex("cache_line_size", CacheLineSize)
@pytest.mark.parametrizex("cache_mode", [CacheMode.WB, CacheMode.WO])
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_cache_write_lazy_insert_error(cache_mode, cache_line_size):
"""
title: Cache insert test with error device for writes on lazy writes cache mode
description: |
Validate CAS ability to handle write errors while it tries to insert
cache lines. This test is exclusively for lazy writes cache modes.
pass_criteria:
- I/O errors returned to user
- Cache automatically stops after encountering errors
- No cache line gets inserted into cache
"""
with TestRun.step("Prepare core and cache"):
cache, core, _ = prepare_configuration(cache_mode, cache_line_size)
with TestRun.step("Run fio and verify errors are present"):
fio_errors = (
Fio()
.create_command()
.io_engine(IoEngine.libaio)
.size(core.size)
.block_size(cache_line_size)
.read_write(ReadWrite.randwrite)
.target(core)
.continue_on_error(ErrorFilter.io)
.direct()
.run()[0]
.total_errors()
)
if fio_errors == 0:
TestRun.fail(f"No I/O ended with error")
with TestRun.step("Check error statistics and state on cache"):
stats = cache.get_statistics()
occupancy = cache.get_occupancy().get_value()
if occupancy != 0:
TestRun.fail(f"Occupancy is not zero, but {occupancy}")
cache_writes = stats.block_stats.cache.writes / cache_line_size.value
cache_errors = stats.error_stats.cache.total
if cache_writes != 1:
TestRun.fail(f"There only should be one cache write attempt before cache stop")
if cache_writes != cache_errors:
TestRun.fail(
f"Cache errors ({cache_errors}) should equal to number of requests to"
f" cache ({cache_writes})"
)
state = cache.get_status()
if state != CacheStatus.not_running:
TestRun.fail(f"Cache should be in 'Not running' state, and it's {state}")
def prepare_configuration(cache_mode, cache_line_size):
cache_device = TestRun.disks["cache"]
core_device = TestRun.disks["core"]
with TestRun.step("Creating cache partition"):
cache_device.create_partitions([Size(50, Unit.MebiByte)])
with TestRun.step("Creating cache error device"):
error_device = ErrorDevice("error", cache_device.partitions[0])
with TestRun.step("Starting cache to check metadata offset"):
cache = casadm.start_cache(error_device, cache_line_size=cache_line_size, force=True)
cache_size = cache.size
cache.stop()
with TestRun.step("Setting errors on non-metadata area"):
error_device.change_table(
DmTable.error_table(
offset=(cache_device.partitions[0].size - cache_size).get_value(Unit.Blocks512),
size=cache_size,
).fill_gaps(cache_device.partitions[0])
)
with TestRun.step("Create core partition with size of usable cache space"):
core_device.create_partitions([cache_size])
with TestRun.step("Starting and configuring cache"):
cache = casadm.start_cache(
error_device, cache_mode=cache_mode, cache_line_size=cache_line_size, force=True
)
result = cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)
if result.exit_code:
TestRun.LOGGER.exception("Couldn't set seq cutoff policy")
result = cache.set_cleaning_policy(CleaningPolicy.nop)
if result.exit_code:
TestRun.LOGGER.exception("Couldn't set cleaning policy")
with TestRun.step("Stopping udev"):
Udev.disable()
with TestRun.step("Adding core device"):
core = cache.add_core(core_dev=core_device.partitions[0])
return cache, core, core_device.partitions[0]
| 38.247312 | 100 | 0.66826 |
import pytest
from api.cas import casadm
from api.cas.cache_config import (
CacheMode,
CacheLineSize,
SeqCutOffPolicy,
CleaningPolicy,
CacheStatus,
)
from core.test_run import TestRun
from storage_devices.disk import DiskTypeSet, DiskType, DiskTypeLowerThan
from test_tools.device_mapper import ErrorDevice, DmTable
from test_tools.fio.fio import Fio
from test_tools.fio.fio_param import ReadWrite, IoEngine, ErrorFilter, VerifyMethod
from test_utils.os_utils import Udev
from test_utils.size import Size, Unit
@pytest.mark.parametrizex("cache_line_size", CacheLineSize)
@pytest.mark.parametrizex("cache_mode", CacheMode)
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_cache_insert_error(cache_mode, cache_line_size):
with TestRun.step("Prepare core and cache"):
cache, core, core_device = prepare_configuration(cache_mode, cache_line_size)
fio_cmd = (
Fio()
.create_command()
.io_engine(IoEngine.libaio)
.size(core.size)
.block_size(cache_line_size)
.target(core)
.direct()
)
if cache_mode in [CacheMode.WB, CacheMode.WO]:
fio_cmd = fio_cmd.read_write(ReadWrite.randread)
else:
fio_cmd = fio_cmd.read_write(ReadWrite.randrw).verify_pattern().verify(VerifyMethod.pattern)
with TestRun.step("Run fio and verify no errors present"):
fio_errors = fio_cmd.run()[0].total_errors()
if fio_errors != 0:
TestRun.fail(f"Some I/O ended with errors {fio_errors}")
with TestRun.step("Check error statistics on cache"):
stats = cache.get_statistics()
occupancy = cache.get_occupancy().get_value()
if occupancy != 0:
TestRun.fail(f"Occupancy is not zero, but {occupancy}")
cache_writes = stats.block_stats.cache.writes / cache_line_size.value
cache_errors = stats.error_stats.cache.total
if cache_writes != cache_errors:
TestRun.fail(
f"Cache errors ({cache_errors}) should equal to number of"
f" requests to cache ({cache_writes})"
)
if cache_mode not in [CacheMode.WB, CacheMode.WO]:
with TestRun.step("Verify core device contents for non-lazy-writes cache modes"):
cache.stop()
fio_cmd.target(core_device).verify_only().run()
@pytest.mark.parametrizex("cache_line_size", CacheLineSize)
@pytest.mark.parametrizex("cache_mode", [CacheMode.WB, CacheMode.WO])
@pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand]))
@pytest.mark.require_disk("core", DiskTypeLowerThan("cache"))
def test_cache_write_lazy_insert_error(cache_mode, cache_line_size):
with TestRun.step("Prepare core and cache"):
cache, core, _ = prepare_configuration(cache_mode, cache_line_size)
with TestRun.step("Run fio and verify errors are present"):
fio_errors = (
Fio()
.create_command()
.io_engine(IoEngine.libaio)
.size(core.size)
.block_size(cache_line_size)
.read_write(ReadWrite.randwrite)
.target(core)
.continue_on_error(ErrorFilter.io)
.direct()
.run()[0]
.total_errors()
)
if fio_errors == 0:
TestRun.fail(f"No I/O ended with error")
with TestRun.step("Check error statistics and state on cache"):
stats = cache.get_statistics()
occupancy = cache.get_occupancy().get_value()
if occupancy != 0:
TestRun.fail(f"Occupancy is not zero, but {occupancy}")
cache_writes = stats.block_stats.cache.writes / cache_line_size.value
cache_errors = stats.error_stats.cache.total
if cache_writes != 1:
TestRun.fail(f"There only should be one cache write attempt before cache stop")
if cache_writes != cache_errors:
TestRun.fail(
f"Cache errors ({cache_errors}) should equal to number of requests to"
f" cache ({cache_writes})"
)
state = cache.get_status()
if state != CacheStatus.not_running:
TestRun.fail(f"Cache should be in 'Not running' state, and it's {state}")
def prepare_configuration(cache_mode, cache_line_size):
cache_device = TestRun.disks["cache"]
core_device = TestRun.disks["core"]
with TestRun.step("Creating cache partition"):
cache_device.create_partitions([Size(50, Unit.MebiByte)])
with TestRun.step("Creating cache error device"):
error_device = ErrorDevice("error", cache_device.partitions[0])
with TestRun.step("Starting cache to check metadata offset"):
cache = casadm.start_cache(error_device, cache_line_size=cache_line_size, force=True)
cache_size = cache.size
cache.stop()
with TestRun.step("Setting errors on non-metadata area"):
error_device.change_table(
DmTable.error_table(
offset=(cache_device.partitions[0].size - cache_size).get_value(Unit.Blocks512),
size=cache_size,
).fill_gaps(cache_device.partitions[0])
)
with TestRun.step("Create core partition with size of usable cache space"):
core_device.create_partitions([cache_size])
with TestRun.step("Starting and configuring cache"):
cache = casadm.start_cache(
error_device, cache_mode=cache_mode, cache_line_size=cache_line_size, force=True
)
result = cache.set_seq_cutoff_policy(SeqCutOffPolicy.never)
if result.exit_code:
TestRun.LOGGER.exception("Couldn't set seq cutoff policy")
result = cache.set_cleaning_policy(CleaningPolicy.nop)
if result.exit_code:
TestRun.LOGGER.exception("Couldn't set cleaning policy")
with TestRun.step("Stopping udev"):
Udev.disable()
with TestRun.step("Adding core device"):
core = cache.add_core(core_dev=core_device.partitions[0])
return cache, core, core_device.partitions[0]
| true | true |
f71592ac0589f8a0a4e9faf12a0a0f6c0ac061b2 | 2,240 | py | Python | importo/fields/html.py | torchbox/django-importo | 57c96951af624d2f6c9128c5689d55f1cc1f7019 | [
"BSD-3-Clause"
] | 1 | 2021-12-09T15:10:50.000Z | 2021-12-09T15:10:50.000Z | importo/fields/html.py | torchbox/django-importo | 57c96951af624d2f6c9128c5689d55f1cc1f7019 | [
"BSD-3-Clause"
] | null | null | null | importo/fields/html.py | torchbox/django-importo | 57c96951af624d2f6c9128c5689d55f1cc1f7019 | [
"BSD-3-Clause"
] | null | null | null | from typing import Any, Mapping, Sequence
from urllib.parse import unquote_plus
import bleach
from importo.fields.base import Field
from importo.utils.html import tidy_html
class HTMLField(Field):
allowed_tags = [
"a",
"abbr",
"acronym",
"b",
"bdi",
"blockquote",
"cite",
"code",
"dd",
"dl",
"dt",
"em",
"h2",
"h3",
"h4",
"h5",
"i",
"li",
"ol",
"p",
"small",
"span",
"strong",
"ul",
]
allowed_attrs = {
"a": ["class", "href", "target", "title"],
"abbr": ["title"],
"acronym": ["title"],
"cite": ["dir", "lang", "title"],
"span": ["dir", "class", "lang", "title"],
"h2": ["dir", "class", "lang", "title"],
"h3": ["dir", "class", "lang", "title"],
"h4": ["dir", "class", "lang", "title"],
"h5": ["dir", "class", "lang", "title"],
}
def __init__(
self,
*args,
allowed_tags: Sequence[str] = None,
allowed_attrs: Mapping[str, str] = None,
remove_empty_paragraphs: bool = True,
remove_excess_whitespace: bool = True,
remove_linebreaks: bool = False,
**kwargs,
):
if allowed_tags is not None:
self.allowed_tags = allowed_tags
if allowed_attrs is not None:
self.allowed_attrs = allowed_attrs
self.remove_empty_paragraphs = remove_empty_paragraphs
self.remove_excess_whitespace = remove_excess_whitespace
self.remove_linebreaks = remove_linebreaks
super().__init__(*args, **kwargs)
def to_python(self, value: Any) -> str:
value = unquote_plus(str(value))
# TODO: Add some way for the field to highlight/log when HTML is stripped
value = bleach.clean(
value, tags=self.allowed_tags, attributes=self.allowed_attrs, strip=True
)
return tidy_html(
value,
remove_empty_paragraphs=self.remove_empty_paragraphs,
remove_excess_whitespace=self.remove_excess_whitespace,
remove_linebreaks=self.remove_linebreaks,
)
| 27.317073 | 84 | 0.537054 | from typing import Any, Mapping, Sequence
from urllib.parse import unquote_plus
import bleach
from importo.fields.base import Field
from importo.utils.html import tidy_html
class HTMLField(Field):
allowed_tags = [
"a",
"abbr",
"acronym",
"b",
"bdi",
"blockquote",
"cite",
"code",
"dd",
"dl",
"dt",
"em",
"h2",
"h3",
"h4",
"h5",
"i",
"li",
"ol",
"p",
"small",
"span",
"strong",
"ul",
]
allowed_attrs = {
"a": ["class", "href", "target", "title"],
"abbr": ["title"],
"acronym": ["title"],
"cite": ["dir", "lang", "title"],
"span": ["dir", "class", "lang", "title"],
"h2": ["dir", "class", "lang", "title"],
"h3": ["dir", "class", "lang", "title"],
"h4": ["dir", "class", "lang", "title"],
"h5": ["dir", "class", "lang", "title"],
}
def __init__(
self,
*args,
allowed_tags: Sequence[str] = None,
allowed_attrs: Mapping[str, str] = None,
remove_empty_paragraphs: bool = True,
remove_excess_whitespace: bool = True,
remove_linebreaks: bool = False,
**kwargs,
):
if allowed_tags is not None:
self.allowed_tags = allowed_tags
if allowed_attrs is not None:
self.allowed_attrs = allowed_attrs
self.remove_empty_paragraphs = remove_empty_paragraphs
self.remove_excess_whitespace = remove_excess_whitespace
self.remove_linebreaks = remove_linebreaks
super().__init__(*args, **kwargs)
def to_python(self, value: Any) -> str:
value = unquote_plus(str(value))
value = bleach.clean(
value, tags=self.allowed_tags, attributes=self.allowed_attrs, strip=True
)
return tidy_html(
value,
remove_empty_paragraphs=self.remove_empty_paragraphs,
remove_excess_whitespace=self.remove_excess_whitespace,
remove_linebreaks=self.remove_linebreaks,
)
| true | true |
f71595154e1ed423c34fbdbea424fd5fd9cd6d53 | 1,245 | py | Python | myroot/global_config.py | pinoylearnpython/dev | 3fd904c594a8c5cab7fd1fe2ad775fd519410a8a | [
"MIT"
] | 2 | 2019-10-29T07:41:38.000Z | 2020-01-31T16:46:15.000Z | myroot/global_config.py | pinoylearnpython/dev | 3fd904c594a8c5cab7fd1fe2ad775fd519410a8a | [
"MIT"
] | null | null | null | myroot/global_config.py | pinoylearnpython/dev | 3fd904c594a8c5cab7fd1fe2ad775fd519410a8a | [
"MIT"
] | 2 | 2019-04-23T04:40:07.000Z | 2020-02-17T09:11:48.000Z | from django.conf import settings
def global_settings(request):
""" Return custom constant global variables to be
used widely for all of our apps. """
# Current user logged in info
cur_user_id = 0
cur_user_name = ''
cur_user_full_name = ''
if request.user.is_authenticated:
# Get user info
cur_user_id = request.user.id
cur_user_name = request.user.username
cur_user_full_name = request.user.first_name + " " + request.user.last_name
return{
'BASE_URL': settings.BASE_URL,
'SITE_SHORT_NAME': settings.SITE_SHORT_NAME,
'SITE_FULL_NAME': settings.SITE_FULL_NAME,
'SITE_YEAR_STARTED': settings.SITE_YEAR_STARTED,
'SITE_URL_HOME': settings.SITE_URL_HOME,
'SITE_SLOGAN': settings.SITE_SLOGAN,
'SITE_CONTACT_US': settings.SITE_CONTACT_US,
'MIN_CHARS_SEARCH': settings.MIN_CHARS_SEARCH,
'APP_URL_TOP_LOGO': settings.APP_URL_TOP_LOGO,
'GRECAP_SITE_KEY': settings.GRECAP_SITE_KEY,
'DEFAULT_AVATAR': settings.DEFAULT_AVATAR,
'CUR_USER_ID': cur_user_id,
'CUR_USER_name': cur_user_name,
'CUR_USER_full_name': cur_user_full_name.strip(),
}
| 35.571429 | 84 | 0.665863 | from django.conf import settings
def global_settings(request):
cur_user_id = 0
cur_user_name = ''
cur_user_full_name = ''
if request.user.is_authenticated:
cur_user_id = request.user.id
cur_user_name = request.user.username
cur_user_full_name = request.user.first_name + " " + request.user.last_name
return{
'BASE_URL': settings.BASE_URL,
'SITE_SHORT_NAME': settings.SITE_SHORT_NAME,
'SITE_FULL_NAME': settings.SITE_FULL_NAME,
'SITE_YEAR_STARTED': settings.SITE_YEAR_STARTED,
'SITE_URL_HOME': settings.SITE_URL_HOME,
'SITE_SLOGAN': settings.SITE_SLOGAN,
'SITE_CONTACT_US': settings.SITE_CONTACT_US,
'MIN_CHARS_SEARCH': settings.MIN_CHARS_SEARCH,
'APP_URL_TOP_LOGO': settings.APP_URL_TOP_LOGO,
'GRECAP_SITE_KEY': settings.GRECAP_SITE_KEY,
'DEFAULT_AVATAR': settings.DEFAULT_AVATAR,
'CUR_USER_ID': cur_user_id,
'CUR_USER_name': cur_user_name,
'CUR_USER_full_name': cur_user_full_name.strip(),
}
| true | true |
f7159641e3e977f8f51e5cc647c57a31d0966efe | 1,025 | py | Python | server/src/models.py | Jobegiar99/Garden-Palooza | 694acaf42a56f3ecfb2fa3912e3777ad44e3126e | [
"MIT"
] | 1 | 2021-08-02T23:33:50.000Z | 2021-08-02T23:33:50.000Z | server/src/models.py | Jobegiar99/Garden-Palooza | 694acaf42a56f3ecfb2fa3912e3777ad44e3126e | [
"MIT"
] | 61 | 2021-08-03T00:13:24.000Z | 2021-08-20T17:38:36.000Z | server/src/models.py | Jobegiar99/Garden-Palooza | 694acaf42a56f3ecfb2fa3912e3777ad44e3126e | [
"MIT"
] | 1 | 2021-08-22T03:32:42.000Z | 2021-08-22T03:32:42.000Z | # flake8: noqa
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.dialects import postgresql
db = SQLAlchemy()
class UserModel(db.Model):
__tablename__ = "user"
username = db.Column(db.String(36), primary_key=True)
password = db.Column(db.String(30))
def __init__(self, username, password):
self.username = username
self.password = password
def __repr__(self):
return f"<User {self.username}>"
class GardenModel(db.Model):
__tablename__ = "garden"
gardenName = db.Column(db.String(30), primary_key=True)
ownerName = db.Column(db.String(36), db.ForeignKey("user.username"))
# will improve this if we have enough time
firstLayer = db.Column(postgresql.ARRAY(db.Integer()))
secondLayer = db.Column(postgresql.ARRAY(db.Integer()))
def __init__(gardenName, ownerName, firstLayer, secondLayer):
self.gardenName = gardenName
self.ownerName = ownerName
self.firstLayer = firstLayer
self.secondLayer = secondLayer
| 27.702703 | 72 | 0.693659 |
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.dialects import postgresql
db = SQLAlchemy()
class UserModel(db.Model):
__tablename__ = "user"
username = db.Column(db.String(36), primary_key=True)
password = db.Column(db.String(30))
def __init__(self, username, password):
self.username = username
self.password = password
def __repr__(self):
return f"<User {self.username}>"
class GardenModel(db.Model):
__tablename__ = "garden"
gardenName = db.Column(db.String(30), primary_key=True)
ownerName = db.Column(db.String(36), db.ForeignKey("user.username"))
firstLayer = db.Column(postgresql.ARRAY(db.Integer()))
secondLayer = db.Column(postgresql.ARRAY(db.Integer()))
def __init__(gardenName, ownerName, firstLayer, secondLayer):
self.gardenName = gardenName
self.ownerName = ownerName
self.firstLayer = firstLayer
self.secondLayer = secondLayer
| true | true |
f715967f3c28b129f56ec6481c8bda553b44d472 | 963 | py | Python | lpot/ux/components/model/tensorflow/frozen_pb.py | intelkevinputnam/lpot-docs | 1ff32b4d89074a6bd133ba531f7c0cea3b73152f | [
"Apache-2.0"
] | null | null | null | lpot/ux/components/model/tensorflow/frozen_pb.py | intelkevinputnam/lpot-docs | 1ff32b4d89074a6bd133ba531f7c0cea3b73152f | [
"Apache-2.0"
] | null | null | null | lpot/ux/components/model/tensorflow/frozen_pb.py | intelkevinputnam/lpot-docs | 1ff32b4d89074a6bd133ba531f7c0cea3b73152f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensorflow frozen pb model."""
from ..model_type_getter import get_model_type
from .model import TensorflowModel as TFModel
class FrozenPbModel(TFModel):
"""Frozen pb model."""
@staticmethod
def supports_path(path: str) -> bool:
"""Check if given path is of supported model."""
return "frozen_pb" == get_model_type(path)
| 34.392857 | 74 | 0.73001 |
from ..model_type_getter import get_model_type
from .model import TensorflowModel as TFModel
class FrozenPbModel(TFModel):
@staticmethod
def supports_path(path: str) -> bool:
return "frozen_pb" == get_model_type(path)
| true | true |
f715970fd90159b33cf104a6f896c9d635be8d7d | 981 | py | Python | kafka_to_elastic/kafka_historique_montants_to_elastic.py | Neemys/BigCoin | 13d76eaccf66fd8a50820bb835fe8b69c39a28af | [
"Apache-2.0"
] | null | null | null | kafka_to_elastic/kafka_historique_montants_to_elastic.py | Neemys/BigCoin | 13d76eaccf66fd8a50820bb835fe8b69c39a28af | [
"Apache-2.0"
] | 10 | 2018-03-22T09:21:11.000Z | 2018-04-11T08:50:58.000Z | kafka_to_elastic/kafka_historique_montants_to_elastic.py | Neemys/BigCoin | 13d76eaccf66fd8a50820bb835fe8b69c39a28af | [
"Apache-2.0"
] | 2 | 2018-03-30T09:52:48.000Z | 2018-04-11T13:13:36.000Z | from bigcoin import bc_kafka,bc_elasticsearch
import json
import datetime
import signal
def generate_elastic_insert_from_messages(messages):
for message in messages:
json_message = json.loads(message)
#value are in satoshi
yield {
'_index' : 'transaction_idx',
'_type': 'transaction',
'_id': json_message['index'],
'_source': {
'date': datetime.datetime.utcfromtimestamp(json_message['timestamp']),
'value': float(json_message["value"])/ 100000000,
'data_type': 'historique'
}
}
def main():
bc_consumer = bc_kafka.BCKafkaConsumer("historique_montants","python_historique_montants_consumer")
bc_es = bc_elasticsearch.BCElasticsearch()
while True:
messages = bc_consumer.get_messages()
if len(messages) == 0:
break
bc_es.send_messages(generate_elastic_insert_from_messages(messages))
bc_consumer.set_messages_read()
#Wait forever for a restart (will be killed then restarted)
signal.pause()
if __name__ == '__main__':
main()
| 26.513514 | 100 | 0.746177 | from bigcoin import bc_kafka,bc_elasticsearch
import json
import datetime
import signal
def generate_elastic_insert_from_messages(messages):
for message in messages:
json_message = json.loads(message)
yield {
'_index' : 'transaction_idx',
'_type': 'transaction',
'_id': json_message['index'],
'_source': {
'date': datetime.datetime.utcfromtimestamp(json_message['timestamp']),
'value': float(json_message["value"])/ 100000000,
'data_type': 'historique'
}
}
def main():
bc_consumer = bc_kafka.BCKafkaConsumer("historique_montants","python_historique_montants_consumer")
bc_es = bc_elasticsearch.BCElasticsearch()
while True:
messages = bc_consumer.get_messages()
if len(messages) == 0:
break
bc_es.send_messages(generate_elastic_insert_from_messages(messages))
bc_consumer.set_messages_read()
signal.pause()
if __name__ == '__main__':
main()
| true | true |
f715986ba969fafbf1bb6c8a7b6a6295ca3828db | 1,546 | py | Python | mltemplate/ci/stages.py | vmarkovtsev/ml-repo-template | bf3596e2a1c319166092c1fd263ec28ceacc1dd1 | [
"MIT"
] | null | null | null | mltemplate/ci/stages.py | vmarkovtsev/ml-repo-template | bf3596e2a1c319166092c1fd263ec28ceacc1dd1 | [
"MIT"
] | null | null | null | mltemplate/ci/stages.py | vmarkovtsev/ml-repo-template | bf3596e2a1c319166092c1fd263ec28ceacc1dd1 | [
"MIT"
] | null | null | null | from mltemplate.ci.core import Stage
from mltemplate.ci.jobs import BumpVersionJob, PypiDeployJob, RunTestsJob, StyleCheckJob
class BumpVersionStage(Stage):
def __init__(self, name="bump-version", **kwargs):
super(BumpVersionStage, self).__init__(
name=name, jobs=[BumpVersionJob(stage=name, **kwargs)]
)
self.set_job_stages(name)
class StyleCheckStage(Stage):
def __init__(self, name="style", **kwargs):
super(StyleCheckStage, self).__init__(
name=name, jobs=[StyleCheckJob(stage=name, **kwargs)]
)
self.set_job_stages(name)
class PytestStage(Stage):
def __init__(self, name="test", python_versions=None, **kwargs):
self.python_versions = [3.6, 3.7, 3.8] if python_versions is None else python_versions
jobs = self._init_jobs(stage=name, **kwargs)
super(PytestStage, self).__init__(name=name, jobs=jobs)
def _init_jobs(self, stage, **kwargs):
def init_test(v, codecov):
job = RunTestsJob(python_version=v, stage=stage, **kwargs)
if codecov:
job["after_success"] = ["codecov"]
return job
last_item = len(self.python_versions) - 1
return [init_test(v, i == last_item) for i, v in enumerate(self.python_versions)]
class PypiDeployStage(Stage):
def __init__(self, name="deploy", **kwargs):
super(PypiDeployStage, self).__init__(
name=name, jobs=[PypiDeployJob(stage=name, **kwargs)]
)
self.set_job_stages(name)
| 35.136364 | 94 | 0.650065 | from mltemplate.ci.core import Stage
from mltemplate.ci.jobs import BumpVersionJob, PypiDeployJob, RunTestsJob, StyleCheckJob
class BumpVersionStage(Stage):
def __init__(self, name="bump-version", **kwargs):
super(BumpVersionStage, self).__init__(
name=name, jobs=[BumpVersionJob(stage=name, **kwargs)]
)
self.set_job_stages(name)
class StyleCheckStage(Stage):
def __init__(self, name="style", **kwargs):
super(StyleCheckStage, self).__init__(
name=name, jobs=[StyleCheckJob(stage=name, **kwargs)]
)
self.set_job_stages(name)
class PytestStage(Stage):
def __init__(self, name="test", python_versions=None, **kwargs):
self.python_versions = [3.6, 3.7, 3.8] if python_versions is None else python_versions
jobs = self._init_jobs(stage=name, **kwargs)
super(PytestStage, self).__init__(name=name, jobs=jobs)
def _init_jobs(self, stage, **kwargs):
def init_test(v, codecov):
job = RunTestsJob(python_version=v, stage=stage, **kwargs)
if codecov:
job["after_success"] = ["codecov"]
return job
last_item = len(self.python_versions) - 1
return [init_test(v, i == last_item) for i, v in enumerate(self.python_versions)]
class PypiDeployStage(Stage):
def __init__(self, name="deploy", **kwargs):
super(PypiDeployStage, self).__init__(
name=name, jobs=[PypiDeployJob(stage=name, **kwargs)]
)
self.set_job_stages(name)
| true | true |
f7159a946ae2267a79e3a78a56dd34aec97345e1 | 1,130 | py | Python | simulator/event.py | djpetti/molecube | b7267803f080ed62e158fc5c1cfcff6beb709de7 | [
"MIT"
] | 2 | 2018-09-11T21:09:22.000Z | 2018-10-05T08:35:58.000Z | simulator/event.py | djpetti/molecube | b7267803f080ed62e158fc5c1cfcff6beb709de7 | [
"MIT"
] | 24 | 2018-09-09T22:51:26.000Z | 2018-11-29T22:49:57.000Z | simulator/event.py | djpetti/molecube | b7267803f080ed62e158fc5c1cfcff6beb709de7 | [
"MIT"
] | 1 | 2018-10-16T20:01:20.000Z | 2018-10-16T20:01:20.000Z | class Event(object):
""" Represents a GUI event. """
def __init__(self, tk_event):
"""
Args:
tk_event: The underlying Tkinter event to wrap. """
self._tk_event = tk_event
@classmethod
def get_identifier(cls):
"""
Returns:
The Tkinter identifier for this event. """
raise NotImplementedError("Must be implemented by subclass.")
class MouseEvent(Event):
""" Event involving the mouse. """
def get_pos(self):
"""
Returns:
The position of the mouse during the event, as (x, y). """
return (self._tk_event.x, self._tk_event.y)
class MouseDragEvent(MouseEvent):
""" Emitted every time the mouse is dragged with the primary button held down.
"""
@classmethod
def get_identifier(cls):
return "<B1-Motion>"
class MousePressEvent(MouseEvent):
""" Emitted every time the primary mouse button is pressed. """
@classmethod
def get_identifier(cls):
return "<Button-1>"
class MouseReleaseEvent(MouseEvent):
""" Emitted every time the primary mouse button is released. """
@classmethod
def get_identifier(cls):
return "<ButtonRelease-1>"
| 24.042553 | 80 | 0.676106 | class Event(object):
def __init__(self, tk_event):
self._tk_event = tk_event
@classmethod
def get_identifier(cls):
raise NotImplementedError("Must be implemented by subclass.")
class MouseEvent(Event):
def get_pos(self):
return (self._tk_event.x, self._tk_event.y)
class MouseDragEvent(MouseEvent):
@classmethod
def get_identifier(cls):
return "<B1-Motion>"
class MousePressEvent(MouseEvent):
@classmethod
def get_identifier(cls):
return "<Button-1>"
class MouseReleaseEvent(MouseEvent):
@classmethod
def get_identifier(cls):
return "<ButtonRelease-1>"
| true | true |
f7159b75d0cdb78ddc25a9f3959376ef6d82d188 | 16,663 | py | Python | connexion/operations/abstract.py | eyalkaspi/connexion | 9e07c9d5ba554119c38e17d3afc120eec0c1e390 | [
"Apache-2.0"
] | null | null | null | connexion/operations/abstract.py | eyalkaspi/connexion | 9e07c9d5ba554119c38e17d3afc120eec0c1e390 | [
"Apache-2.0"
] | null | null | null | connexion/operations/abstract.py | eyalkaspi/connexion | 9e07c9d5ba554119c38e17d3afc120eec0c1e390 | [
"Apache-2.0"
] | null | null | null | import abc
import logging
from connexion.operations.secure import SecureOperation
from ..decorators.metrics import UWSGIMetricsCollector
from ..decorators.parameter import parameter_to_arg
from ..decorators.produces import BaseSerializer, Produces
from ..decorators.response import ResponseValidator
from ..decorators.validation import ParameterValidator, RequestBodyValidator
from ..utils import all_json, is_nullable, make_type
logger = logging.getLogger('connexion.operations.abstract')
DEFAULT_MIMETYPE = 'application/json'
VALIDATOR_MAP = {
'parameter': ParameterValidator,
'body': RequestBodyValidator,
'response': ResponseValidator,
}
class AbstractOperation(SecureOperation, metaclass=abc.ABCMeta):
"""
An API routes requests to an Operation by a (path, method) pair.
The operation uses a resolver to resolve its handler function.
We use the provided spec to do a bunch of heavy lifting before
(and after) we call security_schemes handler.
The registered handler function ends up looking something like:
@secure_endpoint
@validate_inputs
@deserialize_function_inputs
@serialize_function_outputs
@validate_outputs
def user_provided_handler_function(important, stuff):
if important:
serious_business(stuff)
"""
def __init__(self, api, method, path, operation, resolver,
app_security=None, security_schemes=None,
validate_responses=False, strict_validation=False,
randomize_endpoint=None, validator_map=None,
format_converters=None, pythonic_params=False,
uri_parser_class=None, pass_context_arg_name=None):
"""
:param api: api that this operation is attached to
:type api: apis.AbstractAPI
:param method: HTTP method
:type method: str
:param path:
:type path: str
:param operation: swagger operation object
:type operation: dict
:param resolver: Callable that maps operationID to a function
:param app_produces: list of content types the application can return by default
:param app_security: list of security rules the application uses by default
:type app_security: list
:param security_schemes: `Security Definitions Object
<https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#security-definitions-object>`_
:type security_schemes: dict
:param validate_responses: True enables validation. Validation errors generate HTTP 500 responses.
:type validate_responses: bool
:param strict_validation: True enables validation on invalid request parameters
:type strict_validation: bool
:param randomize_endpoint: number of random characters to append to operation name
:type randomize_endpoint: integer
:param validator_map: Custom validators for the types "parameter", "body" and "response".
:type validator_map: dict
:param format_converters: Custom value converters based on the schema format of properties.
:type format_converters: dict
:param pythonic_params: When True CamelCase parameters are converted to snake_case and an underscore is appended
to any shadowed built-ins
:type pythonic_params: bool
:param uri_parser_class: class to use for uri parseing
:type uri_parser_class: AbstractURIParser
:param pass_context_arg_name: If not None will try to inject the request context to the function using this
name.
:type pass_context_arg_name: str|None
"""
self._api = api
self._method = method
self._path = path
self._operation = operation
self._resolver = resolver
self._security = app_security
self._security_schemes = security_schemes
self._validate_responses = validate_responses
self._strict_validation = strict_validation
self._pythonic_params = pythonic_params
self._uri_parser_class = uri_parser_class
self._pass_context_arg_name = pass_context_arg_name
self._randomize_endpoint = randomize_endpoint
self._operation_id = self._operation.get("operationId")
self._resolution = resolver.resolve(self)
self._operation_id = self._resolution.operation_id
self._responses = self._operation.get("responses", {})
self._validator_map = dict(VALIDATOR_MAP)
self._validator_map.update(validator_map or {})
self._format_converters = format_converters or {}
@property
def method(self):
"""
The HTTP method for this operation (ex. GET, POST)
"""
return self._method
@property
def path(self):
"""
The path of the operation, relative to the API base path
"""
return self._path
@property
def responses(self):
"""
Returns the responses for this operation
"""
return self._responses
@property
def validator_map(self):
"""
Validators to use for parameter, body, and response validation
"""
return self._validator_map
@property
def format_converters(self):
"""
Converters to use to convert input type based on the schema format
attribute.
"""
return self._format_converters
@property
def operation_id(self):
"""
The operation id used to indentify the operation internally to the app
"""
return self._operation_id
@property
def randomize_endpoint(self):
"""
number of random digits to generate and append to the operation_id.
"""
return self._randomize_endpoint
@property
def router_controller(self):
"""
The router controller to use (python module where handler functions live)
"""
return self._router_controller
@property
def strict_validation(self):
"""
If True, validate all requests against the spec
"""
return self._strict_validation
@property
def pythonic_params(self):
"""
If True, convert CamelCase into pythonic_variable_names
"""
return self._pythonic_params
@property
def validate_responses(self):
"""
If True, check the response against the response schema, and return an
error if the response does not validate.
"""
return self._validate_responses
@staticmethod
def _get_file_arguments(files, arguments, has_kwargs=False):
return {k: v for k, v in files.items() if k in arguments or has_kwargs}
@abc.abstractmethod
def _get_val_from_param(self, value, query_defn):
"""
Convert input parameters into the correct type
"""
def _query_args_helper(self, query_defns, query_arguments,
function_arguments, has_kwargs, sanitize):
res = {}
for key, value in query_arguments.items():
key = sanitize(key)
if not has_kwargs and key not in function_arguments:
logger.debug("Query Parameter '%s' not in function arguments", key)
else:
logger.debug("Query Parameter '%s' in function arguments", key)
try:
query_defn = query_defns[key]
except KeyError: # pragma: no cover
logger.error("Function argument '{}' not defined in specification".format(key))
else:
logger.debug('%s is a %s', key, query_defn)
res.update({key: self._get_val_from_param(value, query_defn)})
return res
@abc.abstractmethod
def _get_query_arguments(self, query, arguments, has_kwargs, sanitize):
"""
extract handler function arguments from the query parameters
"""
@abc.abstractmethod
def _get_body_argument(self, body, arguments, has_kwargs, sanitize):
"""
extract handler function arguments from the request body
"""
def _get_path_arguments(self, path_params, sanitize):
"""
extract handler function arguments from path parameters
"""
kwargs = {}
path_defns = {p["name"]: p for p in self.parameters if p["in"] == "path"}
for key, value in path_params.items():
sanitized_key = sanitize(key)
if key in path_defns:
kwargs[sanitized_key] = self._get_val_from_param(value, path_defns[key])
else: # Assume path params mechanism used for injection
kwargs[sanitized_key] = value
return kwargs
@abc.abstractproperty
def parameters(self):
"""
Returns the parameters for this operation
"""
@abc.abstractproperty
def produces(self):
"""
Content-Types that the operation produces
"""
@abc.abstractproperty
def consumes(self):
"""
Content-Types that the operation consumes
"""
@abc.abstractproperty
def body_schema(self):
"""
The body schema definition for this operation.
"""
@abc.abstractproperty
def body_definition(self):
"""
The body definition for this operation.
:rtype: dict
"""
def get_arguments(self, path_params, query_params, body, files, arguments,
has_kwargs, sanitize):
"""
get arguments for handler function
"""
ret = {}
ret.update(self._get_path_arguments(path_params, sanitize))
ret.update(self._get_query_arguments(query_params, arguments,
has_kwargs, sanitize))
if self.method.upper() in ["PATCH", "POST", "PUT"]:
ret.update(self._get_body_argument(body, arguments,
has_kwargs, sanitize))
ret.update(self._get_file_arguments(files, arguments, has_kwargs))
return ret
def response_definition(self, status_code=None,
content_type=None):
"""
response definition for this endpoint
"""
content_type = content_type or self.get_mimetype()
response_definition = self.responses.get(
str(status_code),
self.responses.get("default", {})
)
return response_definition
@abc.abstractmethod
def response_schema(self, status_code=None, content_type=None):
"""
response schema for this endpoint
"""
@abc.abstractmethod
def example_response(self, status_code=None, content_type=None):
"""
Returns an example from the spec
"""
@abc.abstractmethod
def get_path_parameter_types(self):
"""
Returns the types for parameters in the path
"""
@abc.abstractmethod
def with_definitions(self, schema):
"""
Returns the given schema, but with the definitions from the spec
attached. This allows any remaining references to be resolved by a
validator (for example).
"""
def get_mimetype(self):
"""
If the endpoint has no 'produces' then the default is
'application/json'.
:rtype str
"""
if all_json(self.produces):
try:
return self.produces[0]
except IndexError:
return DEFAULT_MIMETYPE
elif len(self.produces) == 1:
return self.produces[0]
else:
return DEFAULT_MIMETYPE
@property
def _uri_parsing_decorator(self):
"""
Returns a decorator that parses request data and handles things like
array types, and duplicate parameter definitions.
"""
return self._uri_parser_class(self.parameters, self.body_definition)
@property
def function(self):
"""
Operation function with decorators
:rtype: types.FunctionType
"""
function = parameter_to_arg(
self, self._resolution.function, self.pythonic_params,
self._pass_context_arg_name
)
if self.validate_responses:
logger.debug('... Response validation enabled.')
response_decorator = self.__response_validation_decorator
logger.debug('... Adding response decorator (%r)', response_decorator)
function = response_decorator(function)
produces_decorator = self.__content_type_decorator
logger.debug('... Adding produces decorator (%r)', produces_decorator)
function = produces_decorator(function)
for validation_decorator in self.__validation_decorators:
function = validation_decorator(function)
uri_parsing_decorator = self._uri_parsing_decorator
function = uri_parsing_decorator(function)
# NOTE: the security decorator should be applied last to check auth before anything else :-)
security_decorator = self.security_decorator
logger.debug('... Adding security decorator (%r)', security_decorator)
function = security_decorator(function)
function = self._request_response_decorator(function)
if UWSGIMetricsCollector.is_available(): # pragma: no cover
decorator = UWSGIMetricsCollector(self.path, self.method)
function = decorator(function)
return function
@property
def __content_type_decorator(self):
"""
Get produces decorator.
If the operation mimetype format is json then the function return value is jsonified
From Swagger Specification:
**Produces**
A list of MIME types the operation can produce. This overrides the produces definition at the Swagger Object.
An empty value MAY be used to clear the global definition.
:rtype: types.FunctionType
"""
logger.debug('... Produces: %s', self.produces, extra=vars(self))
mimetype = self.get_mimetype()
if all_json(self.produces): # endpoint will return json
logger.debug('... Produces json', extra=vars(self))
# TODO: Refactor this.
return lambda f: f
elif len(self.produces) == 1:
logger.debug('... Produces %s', mimetype, extra=vars(self))
decorator = Produces(mimetype)
return decorator
else:
return BaseSerializer()
@property
def __validation_decorators(self):
"""
:rtype: types.FunctionType
"""
ParameterValidator = self.validator_map['parameter']
RequestBodyValidator = self.validator_map['body']
if self.parameters:
yield ParameterValidator(self.parameters,
self.api,
strict_validation=self.strict_validation)
if self.body_schema:
yield RequestBodyValidator(self.body_schema, self.consumes, self.api,
is_nullable(self.body_definition),
strict_validation=self.strict_validation)
@property
def __response_validation_decorator(self):
"""
Get a decorator for validating the generated Response.
:rtype: types.FunctionType
"""
ResponseValidator = self.validator_map['response']
return ResponseValidator(self, self.get_mimetype())
def convert_type(self, value, _type, _format=None):
"""
Convert the input value to the corresponding python type.
:param value: The raw input value from the HTTP request.
:param _type: The type of the property as defined in the schema.
:param _format: The optional format of the property as defined in the schema.
:return: The input value converted to the python type.
"""
typed_value = make_type(value, _type)
type_converters = self.format_converters.get(_type)
if not type_converters:
return typed_value
format_converter = type_converters.get(_format)
if not format_converter:
return typed_value
return format_converter(_type, _format, value)
def json_loads(self, data):
"""
A wrapper for calling the API specific JSON loader.
:param data: The JSON data in textual form.
:type data: bytes
"""
return self.api.json_loads(data)
| 34.932914 | 120 | 0.63434 | import abc
import logging
from connexion.operations.secure import SecureOperation
from ..decorators.metrics import UWSGIMetricsCollector
from ..decorators.parameter import parameter_to_arg
from ..decorators.produces import BaseSerializer, Produces
from ..decorators.response import ResponseValidator
from ..decorators.validation import ParameterValidator, RequestBodyValidator
from ..utils import all_json, is_nullable, make_type
logger = logging.getLogger('connexion.operations.abstract')
DEFAULT_MIMETYPE = 'application/json'
VALIDATOR_MAP = {
'parameter': ParameterValidator,
'body': RequestBodyValidator,
'response': ResponseValidator,
}
class AbstractOperation(SecureOperation, metaclass=abc.ABCMeta):
def __init__(self, api, method, path, operation, resolver,
app_security=None, security_schemes=None,
validate_responses=False, strict_validation=False,
randomize_endpoint=None, validator_map=None,
format_converters=None, pythonic_params=False,
uri_parser_class=None, pass_context_arg_name=None):
self._api = api
self._method = method
self._path = path
self._operation = operation
self._resolver = resolver
self._security = app_security
self._security_schemes = security_schemes
self._validate_responses = validate_responses
self._strict_validation = strict_validation
self._pythonic_params = pythonic_params
self._uri_parser_class = uri_parser_class
self._pass_context_arg_name = pass_context_arg_name
self._randomize_endpoint = randomize_endpoint
self._operation_id = self._operation.get("operationId")
self._resolution = resolver.resolve(self)
self._operation_id = self._resolution.operation_id
self._responses = self._operation.get("responses", {})
self._validator_map = dict(VALIDATOR_MAP)
self._validator_map.update(validator_map or {})
self._format_converters = format_converters or {}
@property
def method(self):
return self._method
@property
def path(self):
return self._path
@property
def responses(self):
return self._responses
@property
def validator_map(self):
return self._validator_map
@property
def format_converters(self):
return self._format_converters
@property
def operation_id(self):
return self._operation_id
@property
def randomize_endpoint(self):
return self._randomize_endpoint
@property
def router_controller(self):
return self._router_controller
@property
def strict_validation(self):
return self._strict_validation
@property
def pythonic_params(self):
return self._pythonic_params
@property
def validate_responses(self):
return self._validate_responses
@staticmethod
def _get_file_arguments(files, arguments, has_kwargs=False):
return {k: v for k, v in files.items() if k in arguments or has_kwargs}
@abc.abstractmethod
def _get_val_from_param(self, value, query_defn):
def _query_args_helper(self, query_defns, query_arguments,
function_arguments, has_kwargs, sanitize):
res = {}
for key, value in query_arguments.items():
key = sanitize(key)
if not has_kwargs and key not in function_arguments:
logger.debug("Query Parameter '%s' not in function arguments", key)
else:
logger.debug("Query Parameter '%s' in function arguments", key)
try:
query_defn = query_defns[key]
except KeyError:
logger.error("Function argument '{}' not defined in specification".format(key))
else:
logger.debug('%s is a %s', key, query_defn)
res.update({key: self._get_val_from_param(value, query_defn)})
return res
@abc.abstractmethod
def _get_query_arguments(self, query, arguments, has_kwargs, sanitize):
@abc.abstractmethod
def _get_body_argument(self, body, arguments, has_kwargs, sanitize):
def _get_path_arguments(self, path_params, sanitize):
kwargs = {}
path_defns = {p["name"]: p for p in self.parameters if p["in"] == "path"}
for key, value in path_params.items():
sanitized_key = sanitize(key)
if key in path_defns:
kwargs[sanitized_key] = self._get_val_from_param(value, path_defns[key])
else:
kwargs[sanitized_key] = value
return kwargs
@abc.abstractproperty
def parameters(self):
@abc.abstractproperty
def produces(self):
@abc.abstractproperty
def consumes(self):
@abc.abstractproperty
def body_schema(self):
@abc.abstractproperty
def body_definition(self):
def get_arguments(self, path_params, query_params, body, files, arguments,
has_kwargs, sanitize):
ret = {}
ret.update(self._get_path_arguments(path_params, sanitize))
ret.update(self._get_query_arguments(query_params, arguments,
has_kwargs, sanitize))
if self.method.upper() in ["PATCH", "POST", "PUT"]:
ret.update(self._get_body_argument(body, arguments,
has_kwargs, sanitize))
ret.update(self._get_file_arguments(files, arguments, has_kwargs))
return ret
def response_definition(self, status_code=None,
content_type=None):
content_type = content_type or self.get_mimetype()
response_definition = self.responses.get(
str(status_code),
self.responses.get("default", {})
)
return response_definition
@abc.abstractmethod
def response_schema(self, status_code=None, content_type=None):
@abc.abstractmethod
def example_response(self, status_code=None, content_type=None):
@abc.abstractmethod
def get_path_parameter_types(self):
@abc.abstractmethod
def with_definitions(self, schema):
def get_mimetype(self):
if all_json(self.produces):
try:
return self.produces[0]
except IndexError:
return DEFAULT_MIMETYPE
elif len(self.produces) == 1:
return self.produces[0]
else:
return DEFAULT_MIMETYPE
@property
def _uri_parsing_decorator(self):
return self._uri_parser_class(self.parameters, self.body_definition)
@property
def function(self):
function = parameter_to_arg(
self, self._resolution.function, self.pythonic_params,
self._pass_context_arg_name
)
if self.validate_responses:
logger.debug('... Response validation enabled.')
response_decorator = self.__response_validation_decorator
logger.debug('... Adding response decorator (%r)', response_decorator)
function = response_decorator(function)
produces_decorator = self.__content_type_decorator
logger.debug('... Adding produces decorator (%r)', produces_decorator)
function = produces_decorator(function)
for validation_decorator in self.__validation_decorators:
function = validation_decorator(function)
uri_parsing_decorator = self._uri_parsing_decorator
function = uri_parsing_decorator(function)
security_decorator = self.security_decorator
logger.debug('... Adding security decorator (%r)', security_decorator)
function = security_decorator(function)
function = self._request_response_decorator(function)
if UWSGIMetricsCollector.is_available():
decorator = UWSGIMetricsCollector(self.path, self.method)
function = decorator(function)
return function
@property
def __content_type_decorator(self):
logger.debug('... Produces: %s', self.produces, extra=vars(self))
mimetype = self.get_mimetype()
if all_json(self.produces):
logger.debug('... Produces json', extra=vars(self))
return lambda f: f
elif len(self.produces) == 1:
logger.debug('... Produces %s', mimetype, extra=vars(self))
decorator = Produces(mimetype)
return decorator
else:
return BaseSerializer()
@property
def __validation_decorators(self):
ParameterValidator = self.validator_map['parameter']
RequestBodyValidator = self.validator_map['body']
if self.parameters:
yield ParameterValidator(self.parameters,
self.api,
strict_validation=self.strict_validation)
if self.body_schema:
yield RequestBodyValidator(self.body_schema, self.consumes, self.api,
is_nullable(self.body_definition),
strict_validation=self.strict_validation)
@property
def __response_validation_decorator(self):
ResponseValidator = self.validator_map['response']
return ResponseValidator(self, self.get_mimetype())
def convert_type(self, value, _type, _format=None):
typed_value = make_type(value, _type)
type_converters = self.format_converters.get(_type)
if not type_converters:
return typed_value
format_converter = type_converters.get(_format)
if not format_converter:
return typed_value
return format_converter(_type, _format, value)
def json_loads(self, data):
return self.api.json_loads(data)
| true | true |
f7159bc7a6e447bf791158449870039af24b7945 | 2,451 | py | Python | examples/python/lis2ds12.py | moredu/upm | d6f76ff8c231417666594214679c49399513112e | [
"MIT"
] | 619 | 2015-01-14T23:50:18.000Z | 2019-11-08T14:04:33.000Z | examples/python/lis2ds12.py | moredu/upm | d6f76ff8c231417666594214679c49399513112e | [
"MIT"
] | 576 | 2015-01-02T09:55:14.000Z | 2019-11-12T15:31:10.000Z | examples/python/lis2ds12.py | moredu/upm | d6f76ff8c231417666594214679c49399513112e | [
"MIT"
] | 494 | 2015-01-14T18:33:56.000Z | 2019-11-07T10:08:15.000Z | #!/usr/bin/env python
# Author: Jon Trulson <jtrulson@ics.com>
# Copyright (c) 2016-2017 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import time, sys, signal, atexit
from upm import pyupm_lis2ds12 as sensorObj
def main():
# Instantiate a LIS2DS12 instance using default i2c bus and address
sensor = sensorObj.LIS2DS12()
# For SPI, bus 0, you would pass -1 as the address, and a valid pin for CS:
# LIS2DS12(0, -1, 10);
## Exit handlers ##
# This function stops python from printing a stacktrace when you
# hit control-C
def SIGINTHandler(signum, frame):
raise SystemExit
# This function lets you run code on exit
def exitHandler():
print("Exiting")
sys.exit(0)
# Register exit handlers
atexit.register(exitHandler)
signal.signal(signal.SIGINT, SIGINTHandler)
# now output data every 250 milliseconds
while (1):
sensor.update()
data = sensor.getAccelerometer()
print("Accelerometer x:", data[0], end=' ')
print(" y:", data[1], end=' ')
print(" z:", data[2], end=' ')
print(" g")
# we show both C and F for temperature
print("Compensation Temperature:", sensor.getTemperature(), "C /", end=' ')
print(sensor.getTemperature(True), "F")
print()
time.sleep(.250)
if __name__ == '__main__':
main()
| 35.521739 | 83 | 0.696042 |
from __future__ import print_function
import time, sys, signal, atexit
from upm import pyupm_lis2ds12 as sensorObj
def main():
sensor = sensorObj.LIS2DS12()
GINTHandler(signum, frame):
raise SystemExit
def exitHandler():
print("Exiting")
sys.exit(0)
atexit.register(exitHandler)
signal.signal(signal.SIGINT, SIGINTHandler)
while (1):
sensor.update()
data = sensor.getAccelerometer()
print("Accelerometer x:", data[0], end=' ')
print(" y:", data[1], end=' ')
print(" z:", data[2], end=' ')
print(" g")
print("Compensation Temperature:", sensor.getTemperature(), "C /", end=' ')
print(sensor.getTemperature(True), "F")
print()
time.sleep(.250)
if __name__ == '__main__':
main()
| true | true |
f7159c0f90f16cb4e374669e5d3e907a7304876f | 8,515 | py | Python | pcdet/datasets/augmentor/data_augmentor.py | Jasonkks/mlcnet | 8f89c860c709733c8baa663607004fc48d76291d | [
"Apache-2.0"
] | 18 | 2021-11-30T15:19:53.000Z | 2022-03-30T15:15:57.000Z | pcdet/datasets/augmentor/data_augmentor.py | Jasonkks/mlcnet | 8f89c860c709733c8baa663607004fc48d76291d | [
"Apache-2.0"
] | 2 | 2021-12-10T06:38:18.000Z | 2022-03-27T21:45:53.000Z | pcdet/datasets/augmentor/data_augmentor.py | Jasonkks/mlcnet | 8f89c860c709733c8baa663607004fc48d76291d | [
"Apache-2.0"
] | 3 | 2021-12-01T06:25:52.000Z | 2022-01-21T14:13:51.000Z | from functools import partial
import torch
import random
import numpy as np
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
from ...utils import common_utils, box_utils
from . import augmentor_utils, database_sampler
class DataAugmentor(object):
def __init__(self, root_path, augmentor_configs, class_names, logger=None):
self.root_path = root_path
self.class_names = class_names
self.logger = logger
self.data_augmentor_queue = []
aug_config_list = augmentor_configs if isinstance(augmentor_configs, list) \
else augmentor_configs.AUG_CONFIG_LIST
for cur_cfg in aug_config_list:
if not isinstance(augmentor_configs, list):
if cur_cfg.NAME in augmentor_configs.DISABLE_AUG_LIST:
continue
cur_augmentor = getattr(self, cur_cfg.NAME)(config=cur_cfg)
self.data_augmentor_queue.append(cur_augmentor)
def gt_sampling(self, config=None):
db_sampler = database_sampler.DataBaseSampler(
root_path=self.root_path,
sampler_cfg=config,
class_names=self.class_names,
logger=self.logger
)
return db_sampler
def __getstate__(self):
d = dict(self.__dict__)
del d['logger']
return d
def __setstate__(self, d):
self.__dict__.update(d)
def object_size_normalization(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.object_size_normalization, config=config)
gt_boxes, points = data_dict['gt_boxes'], data_dict['points']
if gt_boxes.shape[1] > 7:
gt_boxes = gt_boxes[:,:7]
offset = np.array(config['OFFSET'])
# get masks of points inside boxes
point_masks = roiaware_pool3d_utils.points_in_boxes_cpu(
torch.from_numpy(points[:, 0:3]), torch.from_numpy(gt_boxes)).numpy()
num_obj = gt_boxes.shape[0]
obj_points_list = []
gt_boxes_size = gt_boxes[:, 3:6]
new_gt_boxes_size = gt_boxes_size + offset
scale_factor = new_gt_boxes_size / gt_boxes_size
# scale the objects
for i in range(num_obj):
point_mask = point_masks[i]
obj_points = points[point_mask > 0] # get object points within the gt box
obj_points[:, :3] -= gt_boxes[i, :3] # relative to box center
obj_points[:, :3] *= scale_factor[i] # scale
obj_points[:, :3] += gt_boxes[i, :3] # back to global coordinate
obj_points_list.append(obj_points)
# remove points inside boxes
points = box_utils.remove_points_in_boxes3d(points, gt_boxes)
# scale the boxes
gt_boxes[:, 3:6] *= scale_factor
# remove points inside boxes
points = box_utils.remove_points_in_boxes3d(points, gt_boxes)
# merge points
# points = box_utils.remove_points_in_boxes3d(points, gt_boxes)
obj_points = np.concatenate(obj_points_list, axis=0)
points = np.concatenate([points, obj_points], axis=0)
data_dict['points'] = points
data_dict['gt_boxes'][:,:7] = gt_boxes
return data_dict
def random_world_flip(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_world_flip, config=config)
gt_boxes = data_dict['gt_boxes'] if 'gt_boxes' in data_dict else None
points = data_dict['points']
for cur_axis in config['ALONG_AXIS_LIST']:
assert cur_axis in ['x', 'y']
if 'gt_boxes' in data_dict:
gt_boxes, points, world_flip_enabled = getattr(augmentor_utils, 'random_flip_along_%s' % cur_axis)(
gt_boxes, points, return_enable=True
)
else:
points, world_flip_enabled = getattr(augmentor_utils, 'random_flip_along_%s_points' % cur_axis)(
points, return_enable=True
)
if 'gt_boxes' in data_dict:
data_dict['gt_boxes'] = gt_boxes
data_dict['points'] = points
data_dict['world_flip_enabled'] = world_flip_enabled
return data_dict
def random_world_rotation(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_world_rotation, config=config)
rot_range = config['WORLD_ROT_ANGLE']
if not isinstance(rot_range, list):
rot_range = [-rot_range, rot_range]
if 'gt_boxes' in data_dict:
gt_boxes, points, world_rotation = augmentor_utils.global_rotation(
data_dict['gt_boxes'], data_dict['points'], rot_range=rot_range, return_rotation=True
)
else:
points, world_rotation = augmentor_utils.global_rotation_points(
data_dict['points'], rot_range=rot_range, return_rotation=True
)
if 'gt_boxes' in data_dict:
data_dict['gt_boxes'] = gt_boxes
data_dict['points'] = points
data_dict['world_rotation'] = world_rotation
return data_dict
def random_world_scaling(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_world_scaling, config=config)
if 'gt_boxes' in data_dict:
gt_boxes, points, scale_ratio = augmentor_utils.global_scaling(
data_dict['gt_boxes'], data_dict['points'], config['WORLD_SCALE_RANGE']
)
else:
points, scale_ratio = augmentor_utils.global_scaling_points(data_dict['points'], config['WORLD_SCALE_RANGE'])
data_dict['world_scaling'] = scale_ratio
if 'gt_boxes' in data_dict:
data_dict['gt_boxes'] = gt_boxes
data_dict['points'] = points
return data_dict
def random_world_scaling_xyz(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_world_scaling_xyz, config=config)
gt_boxes = data_dict['gt_boxes']
points = data_dict['points']
scale_range = config['SCALE_RANGE']
noise_scale = np.random.uniform(scale_range[0], scale_range[1], 3)
points[:, :3] *= noise_scale
gt_boxes[:, :3] *= noise_scale
gt_boxes[:, 3:6] *= noise_scale
data_dict['points'] = points
data_dict['gt_boxes'] = gt_boxes
data_dict['world_scaling_xyz'] = noise_scale
return data_dict
def jitter_point_cloud(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.jitter_point_cloud, config=config)
sigma = config['SIGMA']
clip = config['CLIP']
assert(clip > 0)
points = data_dict['points']
jittered_data = np.clip(sigma * np.random.randn(points.shape[0], points.shape[1]), -1*clip, clip)
points += jittered_data
data_dict['points'] = points
data_dict['jittered'] = True
data_dict['jitter_values'] = jittered_data
return data_dict
def random_world_shift(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_world_shift, config=config)
shift_range = config['RANGE']
shifts = np.random.uniform(-shift_range, shift_range, 3)
data_dict['points'] += shifts
data_dict['world_shifts'] = shifts
return data_dict
def forward(self, data_dict, augment=True):
"""
Args:
data_dict:
points: (N, 3 + C_in)
gt_boxes: optional, (N, 7) [x, y, z, dx, dy, dz, heading]
gt_names: optional, (N), string
...
Returns:
"""
if augment:
for cur_augmentor in self.data_augmentor_queue:
data_dict = cur_augmentor(data_dict=data_dict)
if 'gt_boxes' in data_dict:
data_dict['gt_boxes'][:, 6] = common_utils.limit_period(
data_dict['gt_boxes'][:, 6], offset=0.5, period=2 * np.pi
)
if 'road_plane' in data_dict:
data_dict.pop('road_plane')
if 'gt_boxes' in data_dict and 'gt_boxes_mask' in data_dict:
gt_boxes_mask = data_dict['gt_boxes_mask']
data_dict['gt_boxes'] = data_dict['gt_boxes'][gt_boxes_mask]
data_dict['gt_names'] = data_dict['gt_names'][gt_boxes_mask]
data_dict.pop('gt_boxes_mask')
return data_dict
| 40.165094 | 121 | 0.622548 | from functools import partial
import torch
import random
import numpy as np
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
from ...utils import common_utils, box_utils
from . import augmentor_utils, database_sampler
class DataAugmentor(object):
def __init__(self, root_path, augmentor_configs, class_names, logger=None):
self.root_path = root_path
self.class_names = class_names
self.logger = logger
self.data_augmentor_queue = []
aug_config_list = augmentor_configs if isinstance(augmentor_configs, list) \
else augmentor_configs.AUG_CONFIG_LIST
for cur_cfg in aug_config_list:
if not isinstance(augmentor_configs, list):
if cur_cfg.NAME in augmentor_configs.DISABLE_AUG_LIST:
continue
cur_augmentor = getattr(self, cur_cfg.NAME)(config=cur_cfg)
self.data_augmentor_queue.append(cur_augmentor)
def gt_sampling(self, config=None):
db_sampler = database_sampler.DataBaseSampler(
root_path=self.root_path,
sampler_cfg=config,
class_names=self.class_names,
logger=self.logger
)
return db_sampler
def __getstate__(self):
d = dict(self.__dict__)
del d['logger']
return d
def __setstate__(self, d):
self.__dict__.update(d)
def object_size_normalization(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.object_size_normalization, config=config)
gt_boxes, points = data_dict['gt_boxes'], data_dict['points']
if gt_boxes.shape[1] > 7:
gt_boxes = gt_boxes[:,:7]
offset = np.array(config['OFFSET'])
point_masks = roiaware_pool3d_utils.points_in_boxes_cpu(
torch.from_numpy(points[:, 0:3]), torch.from_numpy(gt_boxes)).numpy()
num_obj = gt_boxes.shape[0]
obj_points_list = []
gt_boxes_size = gt_boxes[:, 3:6]
new_gt_boxes_size = gt_boxes_size + offset
scale_factor = new_gt_boxes_size / gt_boxes_size
for i in range(num_obj):
point_mask = point_masks[i]
obj_points = points[point_mask > 0]
obj_points[:, :3] -= gt_boxes[i, :3]
obj_points[:, :3] *= scale_factor[i]
obj_points[:, :3] += gt_boxes[i, :3]
obj_points_list.append(obj_points)
points = box_utils.remove_points_in_boxes3d(points, gt_boxes)
gt_boxes[:, 3:6] *= scale_factor
points = box_utils.remove_points_in_boxes3d(points, gt_boxes)
obj_points = np.concatenate(obj_points_list, axis=0)
points = np.concatenate([points, obj_points], axis=0)
data_dict['points'] = points
data_dict['gt_boxes'][:,:7] = gt_boxes
return data_dict
def random_world_flip(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_world_flip, config=config)
gt_boxes = data_dict['gt_boxes'] if 'gt_boxes' in data_dict else None
points = data_dict['points']
for cur_axis in config['ALONG_AXIS_LIST']:
assert cur_axis in ['x', 'y']
if 'gt_boxes' in data_dict:
gt_boxes, points, world_flip_enabled = getattr(augmentor_utils, 'random_flip_along_%s' % cur_axis)(
gt_boxes, points, return_enable=True
)
else:
points, world_flip_enabled = getattr(augmentor_utils, 'random_flip_along_%s_points' % cur_axis)(
points, return_enable=True
)
if 'gt_boxes' in data_dict:
data_dict['gt_boxes'] = gt_boxes
data_dict['points'] = points
data_dict['world_flip_enabled'] = world_flip_enabled
return data_dict
def random_world_rotation(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_world_rotation, config=config)
rot_range = config['WORLD_ROT_ANGLE']
if not isinstance(rot_range, list):
rot_range = [-rot_range, rot_range]
if 'gt_boxes' in data_dict:
gt_boxes, points, world_rotation = augmentor_utils.global_rotation(
data_dict['gt_boxes'], data_dict['points'], rot_range=rot_range, return_rotation=True
)
else:
points, world_rotation = augmentor_utils.global_rotation_points(
data_dict['points'], rot_range=rot_range, return_rotation=True
)
if 'gt_boxes' in data_dict:
data_dict['gt_boxes'] = gt_boxes
data_dict['points'] = points
data_dict['world_rotation'] = world_rotation
return data_dict
def random_world_scaling(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_world_scaling, config=config)
if 'gt_boxes' in data_dict:
gt_boxes, points, scale_ratio = augmentor_utils.global_scaling(
data_dict['gt_boxes'], data_dict['points'], config['WORLD_SCALE_RANGE']
)
else:
points, scale_ratio = augmentor_utils.global_scaling_points(data_dict['points'], config['WORLD_SCALE_RANGE'])
data_dict['world_scaling'] = scale_ratio
if 'gt_boxes' in data_dict:
data_dict['gt_boxes'] = gt_boxes
data_dict['points'] = points
return data_dict
def random_world_scaling_xyz(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_world_scaling_xyz, config=config)
gt_boxes = data_dict['gt_boxes']
points = data_dict['points']
scale_range = config['SCALE_RANGE']
noise_scale = np.random.uniform(scale_range[0], scale_range[1], 3)
points[:, :3] *= noise_scale
gt_boxes[:, :3] *= noise_scale
gt_boxes[:, 3:6] *= noise_scale
data_dict['points'] = points
data_dict['gt_boxes'] = gt_boxes
data_dict['world_scaling_xyz'] = noise_scale
return data_dict
def jitter_point_cloud(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.jitter_point_cloud, config=config)
sigma = config['SIGMA']
clip = config['CLIP']
assert(clip > 0)
points = data_dict['points']
jittered_data = np.clip(sigma * np.random.randn(points.shape[0], points.shape[1]), -1*clip, clip)
points += jittered_data
data_dict['points'] = points
data_dict['jittered'] = True
data_dict['jitter_values'] = jittered_data
return data_dict
def random_world_shift(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_world_shift, config=config)
shift_range = config['RANGE']
shifts = np.random.uniform(-shift_range, shift_range, 3)
data_dict['points'] += shifts
data_dict['world_shifts'] = shifts
return data_dict
def forward(self, data_dict, augment=True):
if augment:
for cur_augmentor in self.data_augmentor_queue:
data_dict = cur_augmentor(data_dict=data_dict)
if 'gt_boxes' in data_dict:
data_dict['gt_boxes'][:, 6] = common_utils.limit_period(
data_dict['gt_boxes'][:, 6], offset=0.5, period=2 * np.pi
)
if 'road_plane' in data_dict:
data_dict.pop('road_plane')
if 'gt_boxes' in data_dict and 'gt_boxes_mask' in data_dict:
gt_boxes_mask = data_dict['gt_boxes_mask']
data_dict['gt_boxes'] = data_dict['gt_boxes'][gt_boxes_mask]
data_dict['gt_names'] = data_dict['gt_names'][gt_boxes_mask]
data_dict.pop('gt_boxes_mask')
return data_dict
| true | true |
f7159c350fdf2aa74b7565b424ed07b5ef99b118 | 733 | py | Python | services/migrations/0010_auto_20170729_0711.py | iesteban/bitcoin_bazaar_backend | 2aa7c61d8727dae3a9be4b19c4b2aa49ec7ecaa0 | [
"MIT"
] | 18 | 2017-03-08T06:30:55.000Z | 2020-05-08T17:30:20.000Z | services/migrations/0010_auto_20170729_0711.py | iesteban/bitcoin_bazaar_backend | 2aa7c61d8727dae3a9be4b19c4b2aa49ec7ecaa0 | [
"MIT"
] | 871 | 2017-03-06T21:03:59.000Z | 2022-03-28T19:46:44.000Z | services/migrations/0010_auto_20170729_0711.py | iesteban/bitcoin_bazaar_backend | 2aa7c61d8727dae3a9be4b19c4b2aa49ec7ecaa0 | [
"MIT"
] | 5 | 2017-07-07T12:10:47.000Z | 2020-05-13T15:57:56.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-07-29 07:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('services', '0009_auto_20170617_1557'),
]
operations = [
migrations.AddField(
model_name='category',
name='name_en',
field=models.CharField(help_text='A name for the category.', max_length=100, null=True, unique=True),
),
migrations.AddField(
model_name='category',
name='name_es',
field=models.CharField(help_text='A name for the category.', max_length=100, null=True, unique=True),
),
]
| 28.192308 | 113 | 0.618008 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('services', '0009_auto_20170617_1557'),
]
operations = [
migrations.AddField(
model_name='category',
name='name_en',
field=models.CharField(help_text='A name for the category.', max_length=100, null=True, unique=True),
),
migrations.AddField(
model_name='category',
name='name_es',
field=models.CharField(help_text='A name for the category.', max_length=100, null=True, unique=True),
),
]
| true | true |
f7159d3af512db4cfc343827849b64501a5eca32 | 2,770 | py | Python | apps/project/subviews/bug.py | gvizquel/pyerp | c859f7293cabd1003f79112463cee93ac89fccba | [
"MIT"
] | null | null | null | apps/project/subviews/bug.py | gvizquel/pyerp | c859f7293cabd1003f79112463cee93ac89fccba | [
"MIT"
] | 11 | 2020-06-05T22:50:37.000Z | 2022-02-10T09:05:56.000Z | apps/project/subviews/bug.py | gvizquel/pyerp | c859f7293cabd1003f79112463cee93ac89fccba | [
"MIT"
] | null | null | null | # Librerias Django
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import redirect
from django.urls import reverse
from django.views.generic import DetailView, ListView
from django.views.generic.edit import CreateView, UpdateView
# Librerias en carpetas locales
from ..submodels.bug import PyBug
""" BEGIN BUG """
BUG_FIELDS = [
{'string': 'Nombre', 'field': 'name'},
{'string': 'Estado', 'field': 'state'},
{'string': 'Usuario', 'field': 'user_id'},
{'string': 'Notas', 'field': 'note'},
]
BUG_FIELDS_SHORT = ['name','state','user_id','note']
class BugListView(LoginRequiredMixin, ListView):
model = PyBug
template_name = 'erp/list.html'
login_url = "/erp/login"
def get_context_data(self, **kwargs):
context = super(BugListView, self).get_context_data(**kwargs)
context['title'] = 'Errores'
context['detail_url'] = 'bug-detail'
context['add_url'] = 'bug-add'
context['fields'] = BUG_FIELDS
return context
class BugDetailView(LoginRequiredMixin, DetailView):
model = PyBug
template_name = 'erp/detail.html'
login_url = "/erp/login"
def get_context_data(self, **kwargs):
context = super(BugDetailView, self).get_context_data(**kwargs)
context['title'] = context['object'].name
context['breadcrumbs'] = [{'url': 'bug', 'name': 'Error'}]
context['update_url'] = 'bug-update'
context['delete_url'] = 'bug-delete'
context['fields'] = BUG_FIELDS
return context
class BugCreateView(LoginRequiredMixin, CreateView):
model = PyBug
fields = BUG_FIELDS_SHORT
template_name = 'erp/form.html'
login_url = "/erp/login"
def get_context_data(self, **kwargs):
context = super(BugCreateView, self).get_context_data(**kwargs)
context['title'] = 'Crear Error'
context['breadcrumbs'] = [{'url': 'bug', 'name': 'Error'}]
context['back_url'] = reverse('bug')
return context
class BugUpdateView(LoginRequiredMixin, UpdateView):
model = PyBug
fields = BUG_FIELDS_SHORT
template_name = 'erp/form.html'
login_url = "/erp/login"
def get_context_data(self, **kwargs):
context = super(BugUpdateView, self).get_context_data(**kwargs)
context['title'] = context['object'].name
context['breadcrumbs'] = [{'url': 'bug', 'name': 'Error'}]
context['back_url'] = reverse('bug-detail', kwargs={'pk': context['object'].pk})
return context
@login_required(login_url="/erp/login")
def DeleteBug(self, pk):
bug = PyBug.objects.get(id=pk)
bug.delete()
return redirect(reverse('bug'))
| 33.780488 | 88 | 0.648014 |
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import redirect
from django.urls import reverse
from django.views.generic import DetailView, ListView
from django.views.generic.edit import CreateView, UpdateView
from ..submodels.bug import PyBug
BUG_FIELDS = [
{'string': 'Nombre', 'field': 'name'},
{'string': 'Estado', 'field': 'state'},
{'string': 'Usuario', 'field': 'user_id'},
{'string': 'Notas', 'field': 'note'},
]
BUG_FIELDS_SHORT = ['name','state','user_id','note']
class BugListView(LoginRequiredMixin, ListView):
model = PyBug
template_name = 'erp/list.html'
login_url = "/erp/login"
def get_context_data(self, **kwargs):
context = super(BugListView, self).get_context_data(**kwargs)
context['title'] = 'Errores'
context['detail_url'] = 'bug-detail'
context['add_url'] = 'bug-add'
context['fields'] = BUG_FIELDS
return context
class BugDetailView(LoginRequiredMixin, DetailView):
model = PyBug
template_name = 'erp/detail.html'
login_url = "/erp/login"
def get_context_data(self, **kwargs):
context = super(BugDetailView, self).get_context_data(**kwargs)
context['title'] = context['object'].name
context['breadcrumbs'] = [{'url': 'bug', 'name': 'Error'}]
context['update_url'] = 'bug-update'
context['delete_url'] = 'bug-delete'
context['fields'] = BUG_FIELDS
return context
class BugCreateView(LoginRequiredMixin, CreateView):
model = PyBug
fields = BUG_FIELDS_SHORT
template_name = 'erp/form.html'
login_url = "/erp/login"
def get_context_data(self, **kwargs):
context = super(BugCreateView, self).get_context_data(**kwargs)
context['title'] = 'Crear Error'
context['breadcrumbs'] = [{'url': 'bug', 'name': 'Error'}]
context['back_url'] = reverse('bug')
return context
class BugUpdateView(LoginRequiredMixin, UpdateView):
model = PyBug
fields = BUG_FIELDS_SHORT
template_name = 'erp/form.html'
login_url = "/erp/login"
def get_context_data(self, **kwargs):
context = super(BugUpdateView, self).get_context_data(**kwargs)
context['title'] = context['object'].name
context['breadcrumbs'] = [{'url': 'bug', 'name': 'Error'}]
context['back_url'] = reverse('bug-detail', kwargs={'pk': context['object'].pk})
return context
@login_required(login_url="/erp/login")
def DeleteBug(self, pk):
bug = PyBug.objects.get(id=pk)
bug.delete()
return redirect(reverse('bug'))
| true | true |
f7159d5a2d920dc9cc5bb8cc18005b68413166a5 | 2,828 | py | Python | Efficient-3DCNNs/thop/count_hooks.py | reetikaag/human-activity-recognition | 1e6760a88ca52fe9a8a8ca60d000cd3426851156 | [
"MIT"
] | null | null | null | Efficient-3DCNNs/thop/count_hooks.py | reetikaag/human-activity-recognition | 1e6760a88ca52fe9a8a8ca60d000cd3426851156 | [
"MIT"
] | null | null | null | Efficient-3DCNNs/thop/count_hooks.py | reetikaag/human-activity-recognition | 1e6760a88ca52fe9a8a8ca60d000cd3426851156 | [
"MIT"
] | null | null | null | import argparse
import torch
import torch.nn as nn
multiply_adds = 1
def count_conv2d(m, x, y):
# TODO: add support for pad and dilation
x = x[0]
cin = m.in_channels
cout = m.out_channels
kh, kw = m.kernel_size
batch_size = x.size()[0]
out_w = y.size(2)
out_h = y.size(3)
# ops per output element
# kernel_mul = kh * kw * cin
# kernel_add = kh * kw * cin - 1
kernel_ops = multiply_adds * kh * kw * cin // m.groups
bias_ops = 1 if m.bias is not None else 0
ops_per_element = kernel_ops + bias_ops
# total ops
# num_out_elements = y.numel()
output_elements = batch_size * out_w * out_h * cout
total_ops = output_elements * ops_per_element
# in case same conv is used multiple times
m.total_ops += torch.Tensor([int(total_ops)])
def count_conv3d(m, x, y):
# TODO: add support for pad and dilation
x = x[0]
cin = m.in_channels
cout = m.out_channels
kd, kh, kw = m.kernel_size
batch_size = x.size()[0]
out_d = y.size(2)
out_w = y.size(3)
out_h = y.size(4)
# ops per output element
# kernel_mul = kh * kw * cin
# kernel_add = kh * kw * cin - 1
kernel_ops = multiply_adds * kd * kh * kw * cin // m.groups
bias_ops = 1 if m.bias is not None else 0
ops_per_element = kernel_ops + bias_ops
# total ops
# num_out_elements = y.numel()
output_elements = batch_size * out_d * out_w * out_h * cout
total_ops = output_elements * ops_per_element
# in case same conv is used multiple times
m.total_ops += torch.Tensor([int(total_ops)]).to("cuda")
def count_bn2d(m, x, y):
x = x[0]
nelements = x.numel()
total_sub = nelements
total_div = nelements
total_ops = total_sub + total_div
m.total_ops += torch.Tensor([int(total_ops)]).to("cuda")
def count_relu(m, x, y):
x = x[0]
nelements = x.numel()
total_ops = nelements
m.total_ops += torch.Tensor([int(total_ops)]).to("cuda")
def count_softmax(m, x, y):
x = x[0]
batch_size, nfeatures = x.size()
total_exp = nfeatures
total_add = nfeatures - 1
total_div = nfeatures
total_ops = batch_size * (total_exp + total_add + total_div)
m.total_ops += torch.Tensor([int(total_ops)]).to("cuda")
def count_maxpool(m, x, y):
kernel_ops = torch.prod(torch.Tensor([m.kernel_size])) - 1
num_elements = y.numel()
total_ops = kernel_ops * num_elements
m.total_ops += torch.Tensor([int(total_ops)]).to("cuda")
def count_avgpool(m, x, y):
total_add = torch.prod(torch.Tensor([m.kernel_size])) - 1
total_div = 1
kernel_ops = total_add + total_div
num_elements = y.numel()
total_ops = kernel_ops * num_elements
m.total_ops += torch.Tensor([int(total_ops)]).to("cuda")
def count_linear(m, x, y):
# per output element
total_mul = m.in_features
total_add = m.in_features - 1
num_elements = y.numel()
total_ops = (total_mul + total_add) * num_elements
m.total_ops += torch.Tensor([int(total_ops)]).to("cuda")
| 22.624 | 61 | 0.684936 | import argparse
import torch
import torch.nn as nn
multiply_adds = 1
def count_conv2d(m, x, y):
x = x[0]
cin = m.in_channels
cout = m.out_channels
kh, kw = m.kernel_size
batch_size = x.size()[0]
out_w = y.size(2)
out_h = y.size(3)
kernel_ops = multiply_adds * kh * kw * cin // m.groups
bias_ops = 1 if m.bias is not None else 0
ops_per_element = kernel_ops + bias_ops
output_elements = batch_size * out_w * out_h * cout
total_ops = output_elements * ops_per_element
m.total_ops += torch.Tensor([int(total_ops)])
def count_conv3d(m, x, y):
x = x[0]
cin = m.in_channels
cout = m.out_channels
kd, kh, kw = m.kernel_size
batch_size = x.size()[0]
out_d = y.size(2)
out_w = y.size(3)
out_h = y.size(4)
kernel_ops = multiply_adds * kd * kh * kw * cin // m.groups
bias_ops = 1 if m.bias is not None else 0
ops_per_element = kernel_ops + bias_ops
output_elements = batch_size * out_d * out_w * out_h * cout
total_ops = output_elements * ops_per_element
m.total_ops += torch.Tensor([int(total_ops)]).to("cuda")
def count_bn2d(m, x, y):
x = x[0]
nelements = x.numel()
total_sub = nelements
total_div = nelements
total_ops = total_sub + total_div
m.total_ops += torch.Tensor([int(total_ops)]).to("cuda")
def count_relu(m, x, y):
x = x[0]
nelements = x.numel()
total_ops = nelements
m.total_ops += torch.Tensor([int(total_ops)]).to("cuda")
def count_softmax(m, x, y):
x = x[0]
batch_size, nfeatures = x.size()
total_exp = nfeatures
total_add = nfeatures - 1
total_div = nfeatures
total_ops = batch_size * (total_exp + total_add + total_div)
m.total_ops += torch.Tensor([int(total_ops)]).to("cuda")
def count_maxpool(m, x, y):
kernel_ops = torch.prod(torch.Tensor([m.kernel_size])) - 1
num_elements = y.numel()
total_ops = kernel_ops * num_elements
m.total_ops += torch.Tensor([int(total_ops)]).to("cuda")
def count_avgpool(m, x, y):
total_add = torch.prod(torch.Tensor([m.kernel_size])) - 1
total_div = 1
kernel_ops = total_add + total_div
num_elements = y.numel()
total_ops = kernel_ops * num_elements
m.total_ops += torch.Tensor([int(total_ops)]).to("cuda")
def count_linear(m, x, y):
total_mul = m.in_features
total_add = m.in_features - 1
num_elements = y.numel()
total_ops = (total_mul + total_add) * num_elements
m.total_ops += torch.Tensor([int(total_ops)]).to("cuda")
| true | true |
f7159d9791448f06c36331ba8b9839a880d17d19 | 4,281 | py | Python | ambari-agent/src/test/python/ambari_agent/TestClusterConfigurationCache.py | tqrg-bot/ambari | 05cd35982b30f424cec0b5b9d93bc4709880a3bc | [
"Apache-2.0"
] | null | null | null | ambari-agent/src/test/python/ambari_agent/TestClusterConfigurationCache.py | tqrg-bot/ambari | 05cd35982b30f424cec0b5b9d93bc4709880a3bc | [
"Apache-2.0"
] | null | null | null | ambari-agent/src/test/python/ambari_agent/TestClusterConfigurationCache.py | tqrg-bot/ambari | 05cd35982b30f424cec0b5b9d93bc4709880a3bc | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import os
import sys
from ambari_agent.ClusterConfigurationCache import ClusterConfigurationCache
from mock.mock import MagicMock, patch, mock_open, ANY
from unittest import TestCase
class TestClusterConfigurationCache(TestCase):
o_flags = os.O_WRONLY | os.O_CREAT
perms = 0o600
def setUp(self):
# save original open() method for later use
self.original_open = open
def tearDown(self):
sys.stdout == sys.__stdout__
@patch("os.path.exists", new = MagicMock(return_value=True))
@patch("os.path.isfile", new = MagicMock(return_value=True))
def test_cluster_configuration_cache_initialization(self):
configuration_json = '{ "c1" : { "foo-site" : { "foo" : "bar", "foobar" : "baz" } } }'
open_mock = mock_open(read_data=configuration_json)
with patch("__builtin__.open", open_mock):
cluster_configuration = ClusterConfigurationCache(os.path.join(os.sep, "tmp", "bar", "baz"))
open_mock.assert_called_with(os.sep + "tmp" + os.sep + "bar" + os.sep + "baz" + os.sep + "configurations.json", 'r')
self.assertEqual('bar', cluster_configuration.get_configuration_value('c1', 'foo-site/foo') )
self.assertEqual('baz', cluster_configuration.get_configuration_value('c1', 'foo-site/foobar') )
self.assertEqual(None, cluster_configuration.get_configuration_value('c1', 'INVALID') )
self.assertEqual(None, cluster_configuration.get_configuration_value('c1', 'INVALID/INVALID') )
self.assertEqual(None, cluster_configuration.get_configuration_value('INVALID', 'foo-site/foo') )
self.assertEqual(None, cluster_configuration.get_configuration_value('INVALID', 'foo-site/foobar') )
pass
@patch("ambari_simplejson.dump")
def test_cluster_configuration_update(self, json_dump_mock):
cluster_configuration = self.__get_cluster_configuration()
configuration = {'foo-site' :
{ 'bar': 'rendered-bar', 'baz' : 'rendered-baz' }
}
osopen_mock, osfdopen_mock = self.__update_cluster_configuration(cluster_configuration, configuration)
osopen_mock.assert_called_with(os.sep + "tmp" + os.sep + "bar" + os.sep + "baz" + os.sep + "configurations.json",
TestClusterConfigurationCache.o_flags,
TestClusterConfigurationCache.perms);
osfdopen_mock.assert_called_with(11, "w")
json_dump_mock.assert_called_with({'c1': {'foo-site': {'baz': 'rendered-baz', 'bar': 'rendered-bar'}}}, ANY, indent=2)
pass
def __get_cluster_configuration(self):
"""
Gets an instance of the cluster cache where the file read and write
operations have been mocked out
:return:
"""
with patch("__builtin__.open") as open_mock:
open_mock.side_effect = self.open_side_effect
cluster_configuration = ClusterConfigurationCache(os.path.join(os.sep, "tmp", "bar", "baz"))
return cluster_configuration
@patch("os.open")
@patch("os.fdopen")
def __update_cluster_configuration(self, cluster_configuration, configuration, osfdopen_mock, osopen_mock):
"""
Updates the configuration cache, using as mock file as the disk based
cache so that a file is not created during tests
:return:
"""
osopen_mock.return_value = 11
cluster_configuration.update_cache("c1", configuration)
return osopen_mock, osfdopen_mock
def open_side_effect(self, file, mode):
if mode == 'w':
file_mock = MagicMock()
return file_mock
else:
return self.original_open(file, mode)
| 38.918182 | 122 | 0.723896 |
import os
import sys
from ambari_agent.ClusterConfigurationCache import ClusterConfigurationCache
from mock.mock import MagicMock, patch, mock_open, ANY
from unittest import TestCase
class TestClusterConfigurationCache(TestCase):
o_flags = os.O_WRONLY | os.O_CREAT
perms = 0o600
def setUp(self):
self.original_open = open
def tearDown(self):
sys.stdout == sys.__stdout__
@patch("os.path.exists", new = MagicMock(return_value=True))
@patch("os.path.isfile", new = MagicMock(return_value=True))
def test_cluster_configuration_cache_initialization(self):
configuration_json = '{ "c1" : { "foo-site" : { "foo" : "bar", "foobar" : "baz" } } }'
open_mock = mock_open(read_data=configuration_json)
with patch("__builtin__.open", open_mock):
cluster_configuration = ClusterConfigurationCache(os.path.join(os.sep, "tmp", "bar", "baz"))
open_mock.assert_called_with(os.sep + "tmp" + os.sep + "bar" + os.sep + "baz" + os.sep + "configurations.json", 'r')
self.assertEqual('bar', cluster_configuration.get_configuration_value('c1', 'foo-site/foo') )
self.assertEqual('baz', cluster_configuration.get_configuration_value('c1', 'foo-site/foobar') )
self.assertEqual(None, cluster_configuration.get_configuration_value('c1', 'INVALID') )
self.assertEqual(None, cluster_configuration.get_configuration_value('c1', 'INVALID/INVALID') )
self.assertEqual(None, cluster_configuration.get_configuration_value('INVALID', 'foo-site/foo') )
self.assertEqual(None, cluster_configuration.get_configuration_value('INVALID', 'foo-site/foobar') )
pass
@patch("ambari_simplejson.dump")
def test_cluster_configuration_update(self, json_dump_mock):
cluster_configuration = self.__get_cluster_configuration()
configuration = {'foo-site' :
{ 'bar': 'rendered-bar', 'baz' : 'rendered-baz' }
}
osopen_mock, osfdopen_mock = self.__update_cluster_configuration(cluster_configuration, configuration)
osopen_mock.assert_called_with(os.sep + "tmp" + os.sep + "bar" + os.sep + "baz" + os.sep + "configurations.json",
TestClusterConfigurationCache.o_flags,
TestClusterConfigurationCache.perms);
osfdopen_mock.assert_called_with(11, "w")
json_dump_mock.assert_called_with({'c1': {'foo-site': {'baz': 'rendered-baz', 'bar': 'rendered-bar'}}}, ANY, indent=2)
pass
def __get_cluster_configuration(self):
with patch("__builtin__.open") as open_mock:
open_mock.side_effect = self.open_side_effect
cluster_configuration = ClusterConfigurationCache(os.path.join(os.sep, "tmp", "bar", "baz"))
return cluster_configuration
@patch("os.open")
@patch("os.fdopen")
def __update_cluster_configuration(self, cluster_configuration, configuration, osfdopen_mock, osopen_mock):
osopen_mock.return_value = 11
cluster_configuration.update_cache("c1", configuration)
return osopen_mock, osfdopen_mock
def open_side_effect(self, file, mode):
if mode == 'w':
file_mock = MagicMock()
return file_mock
else:
return self.original_open(file, mode)
| true | true |
f7159dc06a6352dac967128fe0aa532b3e17b5a1 | 355 | py | Python | nsd1802/python/day04/seqop.py | MrWangwf/nsd1806 | 069e993b0bb64cb21adc2a25aa56f6da674453bc | [
"Apache-2.0"
] | null | null | null | nsd1802/python/day04/seqop.py | MrWangwf/nsd1806 | 069e993b0bb64cb21adc2a25aa56f6da674453bc | [
"Apache-2.0"
] | null | null | null | nsd1802/python/day04/seqop.py | MrWangwf/nsd1806 | 069e993b0bb64cb21adc2a25aa56f6da674453bc | [
"Apache-2.0"
] | null | null | null | from random import randint
alist = list() # []
list('hello') # ['h', 'e', 'l', 'l', 'o']
list((10, 20, 30)) # [10, 20, 30] 元组转列表
astr = str() # ''
str(10) # '10'
str(['h', 'e', 'l', 'l', 'o']) # 将列表转成字符串
atuple = tuple() # ()
tuple('hello') # ('h', 'e', 'l', 'l', 'o')
num_list = [randint(1, 100) for i in range(10)]
max(num_list)
min(num_list)
| 25.357143 | 47 | 0.498592 | from random import randint
alist = list()
list('hello')
list((10, 20, 30))
astr = str()
str(10)
str(['h', 'e', 'l', 'l', 'o'])
atuple = tuple()
tuple('hello')
num_list = [randint(1, 100) for i in range(10)]
max(num_list)
min(num_list)
| true | true |
f7159dfd3e5220cdf838857e63b85ecb77e79ba3 | 11,938 | py | Python | venv/Lib/site-packages/praw/endpoints.py | Dartok-SD/Dartok-SD-s-reddit-bot | dc7a3215c062ed95b9f44bc207383e776c1692ea | [
"MIT"
] | null | null | null | venv/Lib/site-packages/praw/endpoints.py | Dartok-SD/Dartok-SD-s-reddit-bot | dc7a3215c062ed95b9f44bc207383e776c1692ea | [
"MIT"
] | 1 | 2020-11-26T18:38:13.000Z | 2020-11-27T15:25:49.000Z | praw/endpoints.py | leviroth/praw | 8f05dd2a9188cbaf1fba067e429ad6552d952059 | [
"BSD-2-Clause"
] | null | null | null | """List of API endpoints PRAW knows about."""
# flake8: noqa
# fmt: off
API_PATH = {
"about_edited": "r/{subreddit}/about/edited/",
"about_log": "r/{subreddit}/about/log/",
"about_modqueue": "r/{subreddit}/about/modqueue/",
"about_reports": "r/{subreddit}/about/reports/",
"about_spam": "r/{subreddit}/about/spam/",
"about_sticky": "r/{subreddit}/about/sticky/",
"about_stylesheet": "r/{subreddit}/about/stylesheet/",
"about_traffic": "r/{subreddit}/about/traffic/",
"about_unmoderated": "r/{subreddit}/about/unmoderated/",
"accept_mod_invite": "r/{subreddit}/api/accept_moderator_invite",
"approve": "api/approve/",
"block": "api/block",
"block_user": "/api/block_user/",
"blocked": "prefs/blocked/",
"collapse": "api/collapse_message/",
"collection": "api/v1/collections/collection",
"collection_add_post": "api/v1/collections/add_post_to_collection",
"collection_create": "api/v1/collections/create_collection",
"collection_delete": "api/v1/collections/delete_collection",
"collection_desc": "api/v1/collections/update_collection_description",
"collection_follow": "api/v1/collections/follow_collection",
"collection_remove_post": "api/v1/collections/remove_post_in_collection",
"collection_reorder": "api/v1/collections/reorder_collection",
"collection_subreddit": "api/v1/collections/subreddit_collections",
"collection_title": "api/v1/collections/update_collection_title",
"comment": "api/comment/",
"comment_replies": "message/comments/",
"compose": "api/compose/",
"contest_mode": "api/set_contest_mode/",
"del": "api/del/",
"delete_message": "api/del_msg",
"delete_sr_banner": "r/{subreddit}/api/delete_sr_banner",
"delete_sr_header": "r/{subreddit}/api/delete_sr_header",
"delete_sr_icon": "r/{subreddit}/api/delete_sr_icon",
"delete_sr_image": "r/{subreddit}/api/delete_sr_img",
"deleteflair": "r/{subreddit}/api/deleteflair",
"distinguish": "api/distinguish/",
"domain": "domain/{domain}/",
"duplicates": "duplicates/{submission_id}/",
"edit": "api/editusertext/",
"emoji_delete": "api/v1/{subreddit}/emoji/{emoji_name}",
"emoji_lease": "api/v1/{subreddit}/emoji_asset_upload_s3.json",
"emoji_list": "api/v1/{subreddit}/emojis/all",
"emoji_upload": "api/v1/{subreddit}/emoji.json",
"flair": "r/{subreddit}/api/flair/",
"flairconfig": "r/{subreddit}/api/flairconfig/",
"flaircsv": "r/{subreddit}/api/flaircsv/",
"flairlist": "r/{subreddit}/api/flairlist/",
"flairselector": "r/{subreddit}/api/flairselector/",
"flairtemplate": "r/{subreddit}/api/flairtemplate/",
"flairtemplate_v2": "r/{subreddit}/api/flairtemplate_v2",
"flairtemplateclear": "r/{subreddit}/api/clearflairtemplates/",
"flairtemplatedelete": "r/{subreddit}/api/deleteflairtemplate/",
"friend": "r/{subreddit}/api/friend/",
"friend_v1": "api/v1/me/friends/{user}",
"friends": "api/v1/me/friends/",
"gild_thing": "api/v1/gold/gild/{fullname}/",
"gild_user": "api/v1/gold/give/{username}/",
"hide": "api/hide/",
"ignore_reports": "api/ignore_reports/",
"inbox": "message/inbox/",
"info": "api/info/",
"karma": "api/v1/me/karma",
"leavecontributor": "api/leavecontributor",
"link_flair": "r/{subreddit}/api/link_flair_v2",
"list_banned": "r/{subreddit}/about/banned/",
"list_contributor": "r/{subreddit}/about/contributors/",
"list_moderator": "r/{subreddit}/about/moderators/",
"list_muted": "r/{subreddit}/about/muted/",
"list_wikibanned": "r/{subreddit}/about/wikibanned/",
"list_wikicontributor": "r/{subreddit}/about/wikicontributors/",
"live_accept_invite": "api/live/{id}/accept_contributor_invite",
"live_add_update": "api/live/{id}/update",
"live_close": "api/live/{id}/close_thread",
"live_contributors": "live/{id}/contributors",
"live_discussions": "live/{id}/discussions",
"live_focus": "live/{thread_id}/updates/{update_id}",
"live_info": "api/live/by_id/{ids}",
"live_invite": "api/live/{id}/invite_contributor",
"live_leave": "api/live/{id}/leave_contributor",
"live_now": "api/live/happening_now",
"live_remove_contrib": "api/live/{id}/rm_contributor",
"live_remove_invite": "api/live/{id}/rm_contributor_invite",
"live_remove_update": "api/live/{id}/delete_update",
"live_report": "api/live/{id}/report",
"live_strike": "api/live/{id}/strike_update",
"live_update_perms": "api/live/{id}/set_contributor_permissions",
"live_update_thread": "api/live/{id}/edit",
"live_updates": "live/{id}",
"liveabout": "api/live/{id}/about/",
"livecreate": "api/live/create",
"lock": "api/lock/",
"marknsfw": "api/marknsfw/",
"me": "api/v1/me",
"media_asset": "api/media/asset.json",
"mentions": "message/mentions",
"message": "message/messages/{id}/",
"messages": "message/messages/",
"moderator_messages": "r/{subreddit}/message/moderator/",
"moderator_unread": "r/{subreddit}/message/moderator/unread/",
"modmail_archive": "api/mod/conversations/{id}/archive",
"modmail_bulk_read": "api/mod/conversations/bulk/read",
"modmail_conversation": "api/mod/conversations/{id}",
"modmail_conversations": "api/mod/conversations/",
"modmail_highlight": "api/mod/conversations/{id}/highlight",
"modmail_mute": "api/mod/conversations/{id}/mute",
"modmail_read": "api/mod/conversations/read",
"modmail_subreddits": "api/mod/conversations/subreddits",
"modmail_unarchive": "api/mod/conversations/{id}/unarchive",
"modmail_unmute": "api/mod/conversations/{id}/unmute",
"modmail_unread": "api/mod/conversations/unread",
"modmail_unread_count": "api/mod/conversations/unread/count",
"morechildren": "api/morechildren/",
"multireddit": "user/{user}/m/{multi}/",
"multireddit_api": "api/multi/user/{user}/m/{multi}/",
"multireddit_base": "api/multi/",
"multireddit_copy": "api/multi/copy/",
"multireddit_rename": "api/multi/rename/",
"multireddit_update": "api/multi/user/{user}/m/{multi}/r/{subreddit}",
"multireddit_user": "api/multi/user/{user}/",
"mute_sender": "api/mute_message_author/",
"my_contributor": "subreddits/mine/contributor/",
"my_moderator": "subreddits/mine/moderator/",
"my_multireddits": "api/multi/mine/",
"my_subreddits": "subreddits/mine/subscriber/",
"preferences": "api/v1/me/prefs",
"quarantine_opt_in": "api/quarantine_optin",
"quarantine_opt_out": "api/quarantine_optout",
"read_message": "api/read_message/",
"removal_comment_message": "api/v1/modactions/removal_comment_message",
"removal_link_message": "api/v1/modactions/removal_link_message",
"remove": "api/remove/",
"report": "api/report/",
"rules": "r/{subreddit}/about/rules",
"save": "api/save/",
"search": "r/{subreddit}/search/",
"select_flair": "r/{subreddit}/api/selectflair/",
"sendreplies": "api/sendreplies",
"sent": "message/sent/",
"setpermissions": "r/{subreddit}/api/setpermissions/",
"site_admin": "api/site_admin/",
"spoiler": "api/spoiler/",
"sticky_submission": "api/set_subreddit_sticky/",
"store_visits": "api/store_visits",
"structured_styles": "api/v1/structured_styles/{subreddit}",
"style_asset_lease": "api/v1/style_asset_upload_s3/{subreddit}",
"sub_recommended": "api/recommend/sr/{subreddits}",
"submission": "comments/{id}/",
"submission_replies": "message/selfreply/",
"submit": "api/submit/",
"subreddit": "r/{subreddit}/",
"subreddit_about": "r/{subreddit}/about/",
"subreddit_filter": "api/filter/user/{user}/f/{special}/r/{subreddit}",
"subreddit_filter_list": "api/filter/user/{user}/f/{special}",
"subreddit_random": "r/{subreddit}/random/",
"subreddit_settings": "r/{subreddit}/about/edit/",
"subreddit_stylesheet": "r/{subreddit}/api/subreddit_stylesheet/",
"subreddits_by_topic": "api/subreddits_by_topic",
"subreddits_default": "subreddits/default/",
"subreddits_gold": "subreddits/gold/",
"subreddits_name_search": "api/search_reddit_names/",
"subreddits_new": "subreddits/new/",
"subreddits_popular": "subreddits/popular/",
"subreddits_search": "subreddits/search/",
"subscribe": "api/subscribe/",
"suggested_sort": "api/set_suggested_sort/",
"trophies": "api/v1/user/{user}/trophies",
"uncollapse": "api/uncollapse_message/",
"unfriend": "r/{subreddit}/api/unfriend/",
"unhide": "api/unhide/",
"unignore_reports": "api/unignore_reports/",
"unlock": "api/unlock/",
"unmarknsfw": "api/unmarknsfw/",
"unmute_sender": "api/unmute_message_author/",
"unread": "message/unread/",
"unread_message": "api/unread_message/",
"unsave": "api/unsave/",
"unspoiler": "api/unspoiler/",
"upload_image": "r/{subreddit}/api/upload_sr_img",
"user": "user/{user}/",
"user_about": "user/{user}/about/",
"user_flair": "r/{subreddit}/api/user_flair_v2",
"users_new": "users/new",
"users_popular": "users/popular",
"users_search": "users/search",
"vote": "api/vote/",
"widget_create": "r/{subreddit}/api/widget",
"widget_lease": "r/{subreddit}/api/widget_image_upload_s3",
"widget_modify": "r/{subreddit}/api/widget/{widget_id}",
"widget_order": "r/{subreddit}/api/widget_order/{section}",
"widgets": "r/{subreddit}/api/widgets",
"wiki_edit": "r/{subreddit}/api/wiki/edit/",
"wiki_page": "r/{subreddit}/wiki/{page}",
"wiki_page_editor": "r/{subreddit}/api/wiki/alloweditor/{method}",
"wiki_page_revisions": "r/{subreddit}/wiki/revisions/{page}",
"wiki_page_settings": "r/{subreddit}/wiki/settings/{page}",
"wiki_pages": "r/{subreddit}/wiki/pages/",
"wiki_revisions": "r/{subreddit}/wiki/revisions/",
}
| 58.234146 | 82 | 0.554951 |
API_PATH = {
"about_edited": "r/{subreddit}/about/edited/",
"about_log": "r/{subreddit}/about/log/",
"about_modqueue": "r/{subreddit}/about/modqueue/",
"about_reports": "r/{subreddit}/about/reports/",
"about_spam": "r/{subreddit}/about/spam/",
"about_sticky": "r/{subreddit}/about/sticky/",
"about_stylesheet": "r/{subreddit}/about/stylesheet/",
"about_traffic": "r/{subreddit}/about/traffic/",
"about_unmoderated": "r/{subreddit}/about/unmoderated/",
"accept_mod_invite": "r/{subreddit}/api/accept_moderator_invite",
"approve": "api/approve/",
"block": "api/block",
"block_user": "/api/block_user/",
"blocked": "prefs/blocked/",
"collapse": "api/collapse_message/",
"collection": "api/v1/collections/collection",
"collection_add_post": "api/v1/collections/add_post_to_collection",
"collection_create": "api/v1/collections/create_collection",
"collection_delete": "api/v1/collections/delete_collection",
"collection_desc": "api/v1/collections/update_collection_description",
"collection_follow": "api/v1/collections/follow_collection",
"collection_remove_post": "api/v1/collections/remove_post_in_collection",
"collection_reorder": "api/v1/collections/reorder_collection",
"collection_subreddit": "api/v1/collections/subreddit_collections",
"collection_title": "api/v1/collections/update_collection_title",
"comment": "api/comment/",
"comment_replies": "message/comments/",
"compose": "api/compose/",
"contest_mode": "api/set_contest_mode/",
"del": "api/del/",
"delete_message": "api/del_msg",
"delete_sr_banner": "r/{subreddit}/api/delete_sr_banner",
"delete_sr_header": "r/{subreddit}/api/delete_sr_header",
"delete_sr_icon": "r/{subreddit}/api/delete_sr_icon",
"delete_sr_image": "r/{subreddit}/api/delete_sr_img",
"deleteflair": "r/{subreddit}/api/deleteflair",
"distinguish": "api/distinguish/",
"domain": "domain/{domain}/",
"duplicates": "duplicates/{submission_id}/",
"edit": "api/editusertext/",
"emoji_delete": "api/v1/{subreddit}/emoji/{emoji_name}",
"emoji_lease": "api/v1/{subreddit}/emoji_asset_upload_s3.json",
"emoji_list": "api/v1/{subreddit}/emojis/all",
"emoji_upload": "api/v1/{subreddit}/emoji.json",
"flair": "r/{subreddit}/api/flair/",
"flairconfig": "r/{subreddit}/api/flairconfig/",
"flaircsv": "r/{subreddit}/api/flaircsv/",
"flairlist": "r/{subreddit}/api/flairlist/",
"flairselector": "r/{subreddit}/api/flairselector/",
"flairtemplate": "r/{subreddit}/api/flairtemplate/",
"flairtemplate_v2": "r/{subreddit}/api/flairtemplate_v2",
"flairtemplateclear": "r/{subreddit}/api/clearflairtemplates/",
"flairtemplatedelete": "r/{subreddit}/api/deleteflairtemplate/",
"friend": "r/{subreddit}/api/friend/",
"friend_v1": "api/v1/me/friends/{user}",
"friends": "api/v1/me/friends/",
"gild_thing": "api/v1/gold/gild/{fullname}/",
"gild_user": "api/v1/gold/give/{username}/",
"hide": "api/hide/",
"ignore_reports": "api/ignore_reports/",
"inbox": "message/inbox/",
"info": "api/info/",
"karma": "api/v1/me/karma",
"leavecontributor": "api/leavecontributor",
"link_flair": "r/{subreddit}/api/link_flair_v2",
"list_banned": "r/{subreddit}/about/banned/",
"list_contributor": "r/{subreddit}/about/contributors/",
"list_moderator": "r/{subreddit}/about/moderators/",
"list_muted": "r/{subreddit}/about/muted/",
"list_wikibanned": "r/{subreddit}/about/wikibanned/",
"list_wikicontributor": "r/{subreddit}/about/wikicontributors/",
"live_accept_invite": "api/live/{id}/accept_contributor_invite",
"live_add_update": "api/live/{id}/update",
"live_close": "api/live/{id}/close_thread",
"live_contributors": "live/{id}/contributors",
"live_discussions": "live/{id}/discussions",
"live_focus": "live/{thread_id}/updates/{update_id}",
"live_info": "api/live/by_id/{ids}",
"live_invite": "api/live/{id}/invite_contributor",
"live_leave": "api/live/{id}/leave_contributor",
"live_now": "api/live/happening_now",
"live_remove_contrib": "api/live/{id}/rm_contributor",
"live_remove_invite": "api/live/{id}/rm_contributor_invite",
"live_remove_update": "api/live/{id}/delete_update",
"live_report": "api/live/{id}/report",
"live_strike": "api/live/{id}/strike_update",
"live_update_perms": "api/live/{id}/set_contributor_permissions",
"live_update_thread": "api/live/{id}/edit",
"live_updates": "live/{id}",
"liveabout": "api/live/{id}/about/",
"livecreate": "api/live/create",
"lock": "api/lock/",
"marknsfw": "api/marknsfw/",
"me": "api/v1/me",
"media_asset": "api/media/asset.json",
"mentions": "message/mentions",
"message": "message/messages/{id}/",
"messages": "message/messages/",
"moderator_messages": "r/{subreddit}/message/moderator/",
"moderator_unread": "r/{subreddit}/message/moderator/unread/",
"modmail_archive": "api/mod/conversations/{id}/archive",
"modmail_bulk_read": "api/mod/conversations/bulk/read",
"modmail_conversation": "api/mod/conversations/{id}",
"modmail_conversations": "api/mod/conversations/",
"modmail_highlight": "api/mod/conversations/{id}/highlight",
"modmail_mute": "api/mod/conversations/{id}/mute",
"modmail_read": "api/mod/conversations/read",
"modmail_subreddits": "api/mod/conversations/subreddits",
"modmail_unarchive": "api/mod/conversations/{id}/unarchive",
"modmail_unmute": "api/mod/conversations/{id}/unmute",
"modmail_unread": "api/mod/conversations/unread",
"modmail_unread_count": "api/mod/conversations/unread/count",
"morechildren": "api/morechildren/",
"multireddit": "user/{user}/m/{multi}/",
"multireddit_api": "api/multi/user/{user}/m/{multi}/",
"multireddit_base": "api/multi/",
"multireddit_copy": "api/multi/copy/",
"multireddit_rename": "api/multi/rename/",
"multireddit_update": "api/multi/user/{user}/m/{multi}/r/{subreddit}",
"multireddit_user": "api/multi/user/{user}/",
"mute_sender": "api/mute_message_author/",
"my_contributor": "subreddits/mine/contributor/",
"my_moderator": "subreddits/mine/moderator/",
"my_multireddits": "api/multi/mine/",
"my_subreddits": "subreddits/mine/subscriber/",
"preferences": "api/v1/me/prefs",
"quarantine_opt_in": "api/quarantine_optin",
"quarantine_opt_out": "api/quarantine_optout",
"read_message": "api/read_message/",
"removal_comment_message": "api/v1/modactions/removal_comment_message",
"removal_link_message": "api/v1/modactions/removal_link_message",
"remove": "api/remove/",
"report": "api/report/",
"rules": "r/{subreddit}/about/rules",
"save": "api/save/",
"search": "r/{subreddit}/search/",
"select_flair": "r/{subreddit}/api/selectflair/",
"sendreplies": "api/sendreplies",
"sent": "message/sent/",
"setpermissions": "r/{subreddit}/api/setpermissions/",
"site_admin": "api/site_admin/",
"spoiler": "api/spoiler/",
"sticky_submission": "api/set_subreddit_sticky/",
"store_visits": "api/store_visits",
"structured_styles": "api/v1/structured_styles/{subreddit}",
"style_asset_lease": "api/v1/style_asset_upload_s3/{subreddit}",
"sub_recommended": "api/recommend/sr/{subreddits}",
"submission": "comments/{id}/",
"submission_replies": "message/selfreply/",
"submit": "api/submit/",
"subreddit": "r/{subreddit}/",
"subreddit_about": "r/{subreddit}/about/",
"subreddit_filter": "api/filter/user/{user}/f/{special}/r/{subreddit}",
"subreddit_filter_list": "api/filter/user/{user}/f/{special}",
"subreddit_random": "r/{subreddit}/random/",
"subreddit_settings": "r/{subreddit}/about/edit/",
"subreddit_stylesheet": "r/{subreddit}/api/subreddit_stylesheet/",
"subreddits_by_topic": "api/subreddits_by_topic",
"subreddits_default": "subreddits/default/",
"subreddits_gold": "subreddits/gold/",
"subreddits_name_search": "api/search_reddit_names/",
"subreddits_new": "subreddits/new/",
"subreddits_popular": "subreddits/popular/",
"subreddits_search": "subreddits/search/",
"subscribe": "api/subscribe/",
"suggested_sort": "api/set_suggested_sort/",
"trophies": "api/v1/user/{user}/trophies",
"uncollapse": "api/uncollapse_message/",
"unfriend": "r/{subreddit}/api/unfriend/",
"unhide": "api/unhide/",
"unignore_reports": "api/unignore_reports/",
"unlock": "api/unlock/",
"unmarknsfw": "api/unmarknsfw/",
"unmute_sender": "api/unmute_message_author/",
"unread": "message/unread/",
"unread_message": "api/unread_message/",
"unsave": "api/unsave/",
"unspoiler": "api/unspoiler/",
"upload_image": "r/{subreddit}/api/upload_sr_img",
"user": "user/{user}/",
"user_about": "user/{user}/about/",
"user_flair": "r/{subreddit}/api/user_flair_v2",
"users_new": "users/new",
"users_popular": "users/popular",
"users_search": "users/search",
"vote": "api/vote/",
"widget_create": "r/{subreddit}/api/widget",
"widget_lease": "r/{subreddit}/api/widget_image_upload_s3",
"widget_modify": "r/{subreddit}/api/widget/{widget_id}",
"widget_order": "r/{subreddit}/api/widget_order/{section}",
"widgets": "r/{subreddit}/api/widgets",
"wiki_edit": "r/{subreddit}/api/wiki/edit/",
"wiki_page": "r/{subreddit}/wiki/{page}",
"wiki_page_editor": "r/{subreddit}/api/wiki/alloweditor/{method}",
"wiki_page_revisions": "r/{subreddit}/wiki/revisions/{page}",
"wiki_page_settings": "r/{subreddit}/wiki/settings/{page}",
"wiki_pages": "r/{subreddit}/wiki/pages/",
"wiki_revisions": "r/{subreddit}/wiki/revisions/",
}
| true | true |
f7159f9ba44b38e5dac9b34e58d1d994a96098c0 | 5,561 | py | Python | autotorrent/clients/tests/test_transmission.py | jyggen/autotorrent | 5a8f2b40ccc8c66c73dc520f98b886d21e163afa | [
"MIT"
] | 278 | 2015-02-12T19:19:53.000Z | 2022-03-22T21:17:28.000Z | autotorrent/clients/tests/test_transmission.py | jyggen/autotorrent | 5a8f2b40ccc8c66c73dc520f98b886d21e163afa | [
"MIT"
] | 56 | 2015-03-27T00:38:37.000Z | 2022-03-26T17:52:58.000Z | autotorrent/clients/tests/test_transmission.py | jyggen/autotorrent | 5a8f2b40ccc8c66c73dc520f98b886d21e163afa | [
"MIT"
] | 48 | 2015-03-10T16:50:19.000Z | 2022-03-20T12:11:50.000Z | import json
import os
import shutil
import tempfile
from unittest import TestCase
from ...bencode import bdecode
from ..transmission import TransmissionClient as RealTransmissionClient
current_path = os.path.dirname(__file__)
class TransmissionClient(RealTransmissionClient):
def __init__(self, *args, **kwargs):
super(TransmissionClient, self).__init__(*args, **kwargs)
self._torrents = {}
self._torrent_id = 1
def call(self, method, **kwargs):
_ = json.dumps(kwargs)
if method == 'session-get':
return {'version': 'version: 2.82 (14160)',
'config-dir': '/home/autotorrent/.config/transmission-daemon',
'download-dir': '/home/autotorrent/Downloads',
'rpc-version': 15}
elif method == 'torrent-add':
self._torrent_id += 1
self._torrents[self._torrent_id] = kwargs
return {'torrent-added': {'id': self._torrent_id}}
elif method == 'torrent-rename-path':
self._torrents[kwargs['ids'][0]].update(kwargs)
return {}
elif method == 'torrent-start':
self._torrents[kwargs['ids'][0]]['paused'] = False
return {}
else:
raise Exception(method, kwargs)
class TestTransmissionClient(TestCase):
def setUp(self):
self.client = TransmissionClient('http://127.0.0.1:9091')
self._temp_path = tempfile.mkdtemp()
def tearDown(self):
if self._temp_path.startswith('/tmp'): # paranoid-mon, the best pokemon.
shutil.rmtree(self._temp_path)
def test_test_connection(self):
self.assertEqual(self.client.test_connection(), "version: 2.82 (14160), config-dir: /home/autotorrent/.config/transmission-daemon, download-dir: /home/autotorrent/Downloads")
def _add_torrent_with_links(self, letters):
with open(os.path.join(current_path, 'test.torrent'), 'rb') as f:
torrent = bdecode(f.read())
files = []
for letter in ['a', 'b', 'c']:
filename = 'file_%s.txt' % letter
files.append({
'completed': (letter in letters),
'length': 11,
'path': ['tmp', filename],
})
return self.client.add_torrent(torrent, '/tmp/', files)
def test_add_torrent_complete(self):
self.assertTrue(self._add_torrent_with_links(['a', 'b', 'c']))
self.assertTrue((2 in self.client._torrents))
self.assertEqual(self.client._torrents[2]['paused'], False)
def test_auto_config_successful_config(self):
os.environ['HOME'] = self._temp_path
config_path = os.path.join(self._temp_path, '.config/transmission-daemon')
os.makedirs(config_path)
with open(os.path.join(config_path, 'settings.json'), 'w') as f:
json.dump({
'rpc-bind-address': '0.0.0.0',
'rpc-port': 12312,
}, f)
tc = TransmissionClient.auto_config()
self.assertTrue(tc is not None)
self.assertEqual(tc.get_config(), {
'url': 'http://127.0.0.1:12312/transmission/rpc'
})
def test_auto_config_successful_differnet_bind_ip_config(self):
os.environ['HOME'] = self._temp_path
config_path = os.path.join(self._temp_path, '.config/transmission-daemon')
os.makedirs(config_path)
with open(os.path.join(config_path, 'settings.json'), 'w') as f:
json.dump({
'rpc-bind-address': '127.22.54.99',
'rpc-port': 12312,
}, f)
tc = TransmissionClient.auto_config()
self.assertTrue(tc is not None)
self.assertEqual(tc.get_config(), {
'url': 'http://127.22.54.99:12312/transmission/rpc'
})
def test_auto_config_unsuccessful_missing_ip(self):
os.environ['HOME'] = self._temp_path
config_path = os.path.join(self._temp_path, '.config/transmission-daemon')
os.makedirs(config_path)
with open(os.path.join(config_path, 'settings.json'), 'w') as f:
json.dump({
'rpc-port': 12312,
}, f)
tc = TransmissionClient.auto_config()
self.assertTrue(tc is None)
def test_auto_config_unsuccessful_missing_port(self):
os.environ['HOME'] = self._temp_path
config_path = os.path.join(self._temp_path, '.config/transmission-daemon')
os.makedirs(config_path)
with open(os.path.join(config_path, 'settings.json'), 'w') as f:
json.dump({
'rpc-bind-address': '127.22.54.99',
}, f)
tc = TransmissionClient.auto_config()
self.assertTrue(tc is None)
def test_auto_config_unsuccessful_problematic_file(self):
os.environ['HOME'] = self._temp_path
config_path = os.path.join(self._temp_path, '.config/transmission-daemon')
os.makedirs(config_path)
tc = TransmissionClient.auto_config()
self.assertTrue(tc is None)
with open(os.path.join(config_path, 'settings.json'), 'w') as f:
json.dump({
'rpc-bind-address': '127.22.54.99',
'rpc-port': 12312,
}, f)
os.chmod(os.path.join(config_path, 'settings.json'), 0)
tc = TransmissionClient.auto_config()
self.assertTrue(tc is None) | 36.827815 | 182 | 0.58209 | import json
import os
import shutil
import tempfile
from unittest import TestCase
from ...bencode import bdecode
from ..transmission import TransmissionClient as RealTransmissionClient
current_path = os.path.dirname(__file__)
class TransmissionClient(RealTransmissionClient):
def __init__(self, *args, **kwargs):
super(TransmissionClient, self).__init__(*args, **kwargs)
self._torrents = {}
self._torrent_id = 1
def call(self, method, **kwargs):
_ = json.dumps(kwargs)
if method == 'session-get':
return {'version': 'version: 2.82 (14160)',
'config-dir': '/home/autotorrent/.config/transmission-daemon',
'download-dir': '/home/autotorrent/Downloads',
'rpc-version': 15}
elif method == 'torrent-add':
self._torrent_id += 1
self._torrents[self._torrent_id] = kwargs
return {'torrent-added': {'id': self._torrent_id}}
elif method == 'torrent-rename-path':
self._torrents[kwargs['ids'][0]].update(kwargs)
return {}
elif method == 'torrent-start':
self._torrents[kwargs['ids'][0]]['paused'] = False
return {}
else:
raise Exception(method, kwargs)
class TestTransmissionClient(TestCase):
def setUp(self):
self.client = TransmissionClient('http://127.0.0.1:9091')
self._temp_path = tempfile.mkdtemp()
def tearDown(self):
if self._temp_path.startswith('/tmp'):
shutil.rmtree(self._temp_path)
def test_test_connection(self):
self.assertEqual(self.client.test_connection(), "version: 2.82 (14160), config-dir: /home/autotorrent/.config/transmission-daemon, download-dir: /home/autotorrent/Downloads")
def _add_torrent_with_links(self, letters):
with open(os.path.join(current_path, 'test.torrent'), 'rb') as f:
torrent = bdecode(f.read())
files = []
for letter in ['a', 'b', 'c']:
filename = 'file_%s.txt' % letter
files.append({
'completed': (letter in letters),
'length': 11,
'path': ['tmp', filename],
})
return self.client.add_torrent(torrent, '/tmp/', files)
def test_add_torrent_complete(self):
self.assertTrue(self._add_torrent_with_links(['a', 'b', 'c']))
self.assertTrue((2 in self.client._torrents))
self.assertEqual(self.client._torrents[2]['paused'], False)
def test_auto_config_successful_config(self):
os.environ['HOME'] = self._temp_path
config_path = os.path.join(self._temp_path, '.config/transmission-daemon')
os.makedirs(config_path)
with open(os.path.join(config_path, 'settings.json'), 'w') as f:
json.dump({
'rpc-bind-address': '0.0.0.0',
'rpc-port': 12312,
}, f)
tc = TransmissionClient.auto_config()
self.assertTrue(tc is not None)
self.assertEqual(tc.get_config(), {
'url': 'http://127.0.0.1:12312/transmission/rpc'
})
def test_auto_config_successful_differnet_bind_ip_config(self):
os.environ['HOME'] = self._temp_path
config_path = os.path.join(self._temp_path, '.config/transmission-daemon')
os.makedirs(config_path)
with open(os.path.join(config_path, 'settings.json'), 'w') as f:
json.dump({
'rpc-bind-address': '127.22.54.99',
'rpc-port': 12312,
}, f)
tc = TransmissionClient.auto_config()
self.assertTrue(tc is not None)
self.assertEqual(tc.get_config(), {
'url': 'http://127.22.54.99:12312/transmission/rpc'
})
def test_auto_config_unsuccessful_missing_ip(self):
os.environ['HOME'] = self._temp_path
config_path = os.path.join(self._temp_path, '.config/transmission-daemon')
os.makedirs(config_path)
with open(os.path.join(config_path, 'settings.json'), 'w') as f:
json.dump({
'rpc-port': 12312,
}, f)
tc = TransmissionClient.auto_config()
self.assertTrue(tc is None)
def test_auto_config_unsuccessful_missing_port(self):
os.environ['HOME'] = self._temp_path
config_path = os.path.join(self._temp_path, '.config/transmission-daemon')
os.makedirs(config_path)
with open(os.path.join(config_path, 'settings.json'), 'w') as f:
json.dump({
'rpc-bind-address': '127.22.54.99',
}, f)
tc = TransmissionClient.auto_config()
self.assertTrue(tc is None)
def test_auto_config_unsuccessful_problematic_file(self):
os.environ['HOME'] = self._temp_path
config_path = os.path.join(self._temp_path, '.config/transmission-daemon')
os.makedirs(config_path)
tc = TransmissionClient.auto_config()
self.assertTrue(tc is None)
with open(os.path.join(config_path, 'settings.json'), 'w') as f:
json.dump({
'rpc-bind-address': '127.22.54.99',
'rpc-port': 12312,
}, f)
os.chmod(os.path.join(config_path, 'settings.json'), 0)
tc = TransmissionClient.auto_config()
self.assertTrue(tc is None) | true | true |
f715a037d80404b6931c2c3dd6c455b1ba329594 | 4,755 | py | Python | tools/stats_mcdc_data.py | Yc174/tf-faster-rcnn-mcdc | 02d6008f2d689e6f928d2de24fc660073044d1b8 | [
"MIT"
] | null | null | null | tools/stats_mcdc_data.py | Yc174/tf-faster-rcnn-mcdc | 02d6008f2d689e6f928d2de24fc660073044d1b8 | [
"MIT"
] | null | null | null | tools/stats_mcdc_data.py | Yc174/tf-faster-rcnn-mcdc | 02d6008f2d689e6f928d2de24fc660073044d1b8 | [
"MIT"
] | null | null | null | #coding=utf-8
from __future__ import print_function
import time
import argparse
from glob import glob
import os, cv2
import json
def show(image_path, bbox):
print(image_path, bbox)
im = cv2.imread(image_path)
x, y, w, h = bbox
# left = int(x - w / 2)
# right = int(x + w / 2)
# top = int(y - h / 2)
# bottom = int(y + h / 2)
left = int(x)
top = int(y)
right = int(x + w)
bottom = int(y + h)
cv2.rectangle(im, (left, top), (right, bottom), color=[0, 255, 0], thickness=3)
im = cv2.resize(im, (im.shape[1]/2, im.shape[0]/2))
cv2.imshow('image', im)
# draw_bbox_with_center(arr, r)
k = cv2.waitKey(0)
if k == 27: # wait for ESC key to exit
cv2.destroyAllWindows()
elif k == ord('s'): # wait for 's' key to save and exit
cv2.imwrite('messigray.png', im)
cv2.destroyAllWindows()
def show_with_center(image_path, bbox):
print(image_path, bbox)
im = cv2.imread(image_path)
x, y, w, h = bbox
left = int(x - w / 2)
right = int(x + w / 2)
top = int(y - h / 2)
bottom = int(y + h / 2)
cv2.rectangle(im, (left, top), (right, bottom), color=[0, 255, 0], thickness=3)
im = cv2.resize(im, (im.shape[1]/2, im.shape[0]/2))
cv2.imshow('image', im)
# draw_bbox_with_center(arr, r)
k = cv2.waitKey(0)
if k == 27: # wait for ESC key to exit
cv2.destroyAllWindows()
elif k == ord('s'): # wait for 's' key to save and exit
cv2.imwrite('messigray.png', im)
cv2.destroyAllWindows()
if __name__ == '__main__':
# data_dir = '/home/hzshuai/mcdc/mcdc_data'
data_dir = '/data/mcdc_data'
train_dir = data_dir + '/train/train_images'
label_dir = '/home/m12/mcdc_data/train/train_labels'
ann_file = data_dir + '/train/MCDC_train_100000.coco.json'
with open(ann_file) as fin:
ann = json.loads(fin.read())
# with open(label_dir + '/train_format.json', 'w') as fout:
# json.dump(ann, fout, indent=4, ensure_ascii=False)
ann_map = {}
cls = {}
for im in ann['images']:
ann_map[im['id']] = im
for a in ann['annotations']:
if 'car_rear' in a and 'rear_box' in a['car_rear'] and a['image_id'] in ann_map:
if 'ann' not in ann_map[a['image_id']]:
ann_map[a['image_id']]['ann'] = []
ann_map[a['image_id']]['ann'].append(a)
if a['type'] not in cls:
cls[a['type']] = 0
cls[a['type']] += 1
# {u'xiaoxingche': 189955, u'gongchengche': 305, u'huoche': 12975, u'unknown': 63462, u'sanlunche': 6684, u'others': 228, u'gongjiaokeche': 20610}
# 96104
#{u'xiaoxingche': 18813, u'gongchengche': 26, u'huoche': 1267, u'unknown': 6244, u'sanlunche': 642, u'others': 19, u'gongjiaokeche': 1912}
# if a['type'] == 'unknown' and cls[a['type']] % 23 == 0:
# if a['image_id'] == 0:
# image_path = train_dir + '/' + ann_map[a['image_id']]['file_name']
# show(image_path, a['car_rear']['rear_box'])
print(cls)
im_list = []
cls = ['xiaoxingche', 'gongchengche', 'huoche', 'unknown', 'sanlunche', 'others', 'gongjiaokeche']
for k, image in ann_map.iteritems():
if 'ann' in image:
# print(k)
# print(k, image)
image_path = train_dir + '/' + image['file_name']
im_list.append(image_path)
txt_path = label_dir + '/' + image['file_name'][:-4] + '.txt'
dirname = os.path.dirname(txt_path)
if not os.path.exists(dirname):
os.makedirs(dirname)
dw, dh = 1./image['width'], 1./image['height']
# print(txt_path, 1./dw, 1./dh)
with open(txt_path, 'w') as fout:
for a in image['ann']:
# print(a)
x, y, w, h = a['car_rear']['rear_box']
# show(image_path, (x, y, w, h))
x = x + w / 2.
y = y + h / 2.
# show_with_center(image_path, (x, y, w, h))
x *= dw
y *= dh
w *= dw
h *= dh
bb = [x, y, w, h]
cls_id = cls.index(a['type'])
fout.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n')
# break
print(txt_path)
print(len(im_list))
with open(label_dir + '/train.txt', 'w') as fout:
for e in im_list:
fout.write(e + '\n')
with open(label_dir + '/valid.txt', 'w') as fout:
for i, e in enumerate(im_list):
if i % 10 == 0:
fout.write(e + '\n')
| 30.677419 | 146 | 0.512303 |
from __future__ import print_function
import time
import argparse
from glob import glob
import os, cv2
import json
def show(image_path, bbox):
print(image_path, bbox)
im = cv2.imread(image_path)
x, y, w, h = bbox
left = int(x)
top = int(y)
right = int(x + w)
bottom = int(y + h)
cv2.rectangle(im, (left, top), (right, bottom), color=[0, 255, 0], thickness=3)
im = cv2.resize(im, (im.shape[1]/2, im.shape[0]/2))
cv2.imshow('image', im)
k = cv2.waitKey(0)
if k == 27:
cv2.destroyAllWindows()
elif k == ord('s'):
cv2.imwrite('messigray.png', im)
cv2.destroyAllWindows()
def show_with_center(image_path, bbox):
print(image_path, bbox)
im = cv2.imread(image_path)
x, y, w, h = bbox
left = int(x - w / 2)
right = int(x + w / 2)
top = int(y - h / 2)
bottom = int(y + h / 2)
cv2.rectangle(im, (left, top), (right, bottom), color=[0, 255, 0], thickness=3)
im = cv2.resize(im, (im.shape[1]/2, im.shape[0]/2))
cv2.imshow('image', im)
k = cv2.waitKey(0)
if k == 27:
cv2.destroyAllWindows()
elif k == ord('s'):
cv2.imwrite('messigray.png', im)
cv2.destroyAllWindows()
if __name__ == '__main__':
data_dir = '/data/mcdc_data'
train_dir = data_dir + '/train/train_images'
label_dir = '/home/m12/mcdc_data/train/train_labels'
ann_file = data_dir + '/train/MCDC_train_100000.coco.json'
with open(ann_file) as fin:
ann = json.loads(fin.read())
ann_map = {}
cls = {}
for im in ann['images']:
ann_map[im['id']] = im
for a in ann['annotations']:
if 'car_rear' in a and 'rear_box' in a['car_rear'] and a['image_id'] in ann_map:
if 'ann' not in ann_map[a['image_id']]:
ann_map[a['image_id']]['ann'] = []
ann_map[a['image_id']]['ann'].append(a)
if a['type'] not in cls:
cls[a['type']] = 0
cls[a['type']] += 1
print(cls)
im_list = []
cls = ['xiaoxingche', 'gongchengche', 'huoche', 'unknown', 'sanlunche', 'others', 'gongjiaokeche']
for k, image in ann_map.iteritems():
if 'ann' in image:
image_path = train_dir + '/' + image['file_name']
im_list.append(image_path)
txt_path = label_dir + '/' + image['file_name'][:-4] + '.txt'
dirname = os.path.dirname(txt_path)
if not os.path.exists(dirname):
os.makedirs(dirname)
dw, dh = 1./image['width'], 1./image['height']
with open(txt_path, 'w') as fout:
for a in image['ann']:
x, y, w, h = a['car_rear']['rear_box']
x = x + w / 2.
y = y + h / 2.
x *= dw
y *= dh
w *= dw
h *= dh
bb = [x, y, w, h]
cls_id = cls.index(a['type'])
fout.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n')
print(txt_path)
print(len(im_list))
with open(label_dir + '/train.txt', 'w') as fout:
for e in im_list:
fout.write(e + '\n')
with open(label_dir + '/valid.txt', 'w') as fout:
for i, e in enumerate(im_list):
if i % 10 == 0:
fout.write(e + '\n')
| true | true |
f715a131ad3bfd03cd6d8810488e273d1fd54f64 | 6,015 | py | Python | docker/api/daemon.py | jbn/docker-py | 1e38d31c9fc74d07cb8dd3b7b100723bfacd23f7 | [
"Apache-2.0"
] | 72 | 2018-07-02T07:47:15.000Z | 2022-03-29T10:02:14.000Z | docker/api/daemon.py | jbn/docker-py | 1e38d31c9fc74d07cb8dd3b7b100723bfacd23f7 | [
"Apache-2.0"
] | 51 | 2019-10-08T01:53:02.000Z | 2021-06-04T22:02:21.000Z | docker/api/daemon.py | jbn/docker-py | 1e38d31c9fc74d07cb8dd3b7b100723bfacd23f7 | [
"Apache-2.0"
] | 29 | 2018-09-17T06:10:32.000Z | 2022-03-19T13:15:30.000Z | import os
from datetime import datetime
from .. import auth, types, utils
class DaemonApiMixin(object):
@utils.minimum_version('1.25')
def df(self):
"""
Get data usage information.
Returns:
(dict): A dictionary representing different resource categories
and their respective data usage.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/system/df')
return self._result(self._get(url), True)
def events(self, since=None, until=None, filters=None, decode=None):
"""
Get real-time events from the server. Similar to the ``docker events``
command.
Args:
since (UTC datetime or int): Get events from this point
until (UTC datetime or int): Get events until this point
filters (dict): Filter the events by event time, container or image
decode (bool): If set to true, stream will be decoded into dicts on
the fly. False by default.
Returns:
A :py:class:`docker.types.daemon.CancellableStream` generator
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> for event in client.events(decode=True)
... print(event)
{u'from': u'image/with:tag',
u'id': u'container-id',
u'status': u'start',
u'time': 1423339459}
...
or
>>> events = client.events()
>>> for event in events:
... print(event)
>>> # and cancel from another thread
>>> events.close()
"""
if isinstance(since, datetime):
since = utils.datetime_to_timestamp(since)
if isinstance(until, datetime):
until = utils.datetime_to_timestamp(until)
if filters:
filters = utils.convert_filters(filters)
params = {
'since': since,
'until': until,
'filters': filters
}
url = self._url('/events')
response = self._get(url, params=params, stream=True, timeout=None)
stream = self._stream_helper(response, decode=decode)
return types.CancellableStream(stream, response)
def info(self):
"""
Display system-wide information. Identical to the ``docker info``
command.
Returns:
(dict): The info as a dict
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self._result(self._get(self._url("/info")), True)
def login(self, username, password=None, email=None, registry=None,
reauth=False, dockercfg_path=None):
"""
Authenticate with a registry. Similar to the ``docker login`` command.
Args:
username (str): The registry username
password (str): The plaintext password
email (str): The email for the registry account
registry (str): URL to the registry. E.g.
``https://index.docker.io/v1/``
reauth (bool): Whether or not to refresh existing authentication on
the Docker server.
dockercfg_path (str): Use a custom path for the Docker config file
(default ``$HOME/.docker/config.json`` if present,
otherwise``$HOME/.dockercfg``)
Returns:
(dict): The response from the login request
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
# If we don't have any auth data so far, try reloading the config file
# one more time in case anything showed up in there.
# If dockercfg_path is passed check to see if the config file exists,
# if so load that config.
if dockercfg_path and os.path.exists(dockercfg_path):
self._auth_configs = auth.load_config(
dockercfg_path, credstore_env=self.credstore_env
)
elif not self._auth_configs or self._auth_configs.is_empty:
self._auth_configs = auth.load_config(
credstore_env=self.credstore_env
)
authcfg = self._auth_configs.resolve_authconfig(registry)
# If we found an existing auth config for this registry and username
# combination, we can return it immediately unless reauth is requested.
if authcfg and authcfg.get('username', None) == username \
and not reauth:
return authcfg
req_data = {
'username': username,
'password': password,
'email': email,
'serveraddress': registry,
}
response = self._post_json(self._url('/auth'), data=req_data)
if response.status_code == 200:
self._auth_configs.add_auth(registry or auth.INDEX_NAME, req_data)
return self._result(response, json=True)
def ping(self):
"""
Checks the server is responsive. An exception will be raised if it
isn't responding.
Returns:
(bool) The response from the server.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self._result(self._get(self._url('/_ping'))) == 'OK'
def version(self, api_version=True):
"""
Returns version information from the server. Similar to the ``docker
version`` command.
Returns:
(dict): The server version information
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url("/version", versioned_api=api_version)
return self._result(self._get(url), json=True)
| 33.049451 | 79 | 0.571904 | import os
from datetime import datetime
from .. import auth, types, utils
class DaemonApiMixin(object):
@utils.minimum_version('1.25')
def df(self):
url = self._url('/system/df')
return self._result(self._get(url), True)
def events(self, since=None, until=None, filters=None, decode=None):
if isinstance(since, datetime):
since = utils.datetime_to_timestamp(since)
if isinstance(until, datetime):
until = utils.datetime_to_timestamp(until)
if filters:
filters = utils.convert_filters(filters)
params = {
'since': since,
'until': until,
'filters': filters
}
url = self._url('/events')
response = self._get(url, params=params, stream=True, timeout=None)
stream = self._stream_helper(response, decode=decode)
return types.CancellableStream(stream, response)
def info(self):
return self._result(self._get(self._url("/info")), True)
def login(self, username, password=None, email=None, registry=None,
reauth=False, dockercfg_path=None):
# one more time in case anything showed up in there.
# If dockercfg_path is passed check to see if the config file exists,
# if so load that config.
if dockercfg_path and os.path.exists(dockercfg_path):
self._auth_configs = auth.load_config(
dockercfg_path, credstore_env=self.credstore_env
)
elif not self._auth_configs or self._auth_configs.is_empty:
self._auth_configs = auth.load_config(
credstore_env=self.credstore_env
)
authcfg = self._auth_configs.resolve_authconfig(registry)
# If we found an existing auth config for this registry and username
# combination, we can return it immediately unless reauth is requested.
if authcfg and authcfg.get('username', None) == username \
and not reauth:
return authcfg
req_data = {
'username': username,
'password': password,
'email': email,
'serveraddress': registry,
}
response = self._post_json(self._url('/auth'), data=req_data)
if response.status_code == 200:
self._auth_configs.add_auth(registry or auth.INDEX_NAME, req_data)
return self._result(response, json=True)
def ping(self):
return self._result(self._get(self._url('/_ping'))) == 'OK'
def version(self, api_version=True):
url = self._url("/version", versioned_api=api_version)
return self._result(self._get(url), json=True)
| true | true |
f715a15885e13fd0957f648c1414a90e72a239ca | 10,690 | py | Python | stable_baselines3/dqn/dqn.py | haorang/285 | 3b7369b8eb4433952c9cdf27d4feaa015a6c40e4 | [
"MIT"
] | 26 | 2021-11-05T08:46:06.000Z | 2022-03-22T05:53:57.000Z | stable_baselines3/dqn/dqn.py | haorang/285 | 3b7369b8eb4433952c9cdf27d4feaa015a6c40e4 | [
"MIT"
] | 1 | 2021-11-19T11:13:37.000Z | 2021-11-30T09:08:04.000Z | stable_baselines3/dqn/dqn.py | haorang/285 | 3b7369b8eb4433952c9cdf27d4feaa015a6c40e4 | [
"MIT"
] | 5 | 2021-11-05T08:46:12.000Z | 2022-03-25T21:56:58.000Z | from typing import Any, Dict, List, Optional, Tuple, Type, Union
import numpy as np
import torch as th
from torch.nn import functional as F
from stable_baselines3.common import logger
from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm
from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule
from stable_baselines3.common.utils import get_linear_fn, is_vectorized_observation, polyak_update
from stable_baselines3.dqn.policies import DQNPolicy
class DQN(OffPolicyAlgorithm):
"""
Deep Q-Network (DQN)
Paper: https://arxiv.org/abs/1312.5602, https://www.nature.com/articles/nature14236
Default hyperparameters are taken from the nature paper,
except for the optimizer and learning rate that were taken from Stable Baselines defaults.
:param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)
:param env: The environment to learn from (if registered in Gym, can be str)
:param learning_rate: The learning rate, it can be a function
of the current progress (from 1 to 0)
:param buffer_size: size of the replay buffer
:param learning_starts: how many steps of the model to collect transitions for before learning starts
:param batch_size: Minibatch size for each gradient update
:param tau: the soft update coefficient ("Polyak update", between 0 and 1) default 1 for hard update
:param gamma: the discount factor
:param train_freq: Update the model every ``train_freq`` steps. Set to `-1` to disable.
:param gradient_steps: How many gradient steps to do after each rollout
(see ``train_freq`` and ``n_episodes_rollout``)
Set to ``-1`` means to do as many gradient steps as steps done in the environment
during the rollout.
:param n_episodes_rollout: Update the model every ``n_episodes_rollout`` episodes.
Note that this cannot be used at the same time as ``train_freq``. Set to `-1` to disable.
:param optimize_memory_usage: Enable a memory efficient variant of the replay buffer
at a cost of more complexity.
See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195
:param target_update_interval: update the target network every ``target_update_interval``
environment steps.
:param exploration_fraction: fraction of entire training period over which the exploration rate is reduced
:param exploration_initial_eps: initial value of random action probability
:param exploration_final_eps: final value of random action probability
:param max_grad_norm: The maximum value for the gradient clipping
:param tensorboard_log: the log location for tensorboard (if None, no logging)
:param create_eval_env: Whether to create a second environment that will be
used for evaluating the agent periodically. (Only available when passing string for the environment)
:param policy_kwargs: additional arguments to be passed to the policy on creation
:param verbose: the verbosity level: 0 no output, 1 info, 2 debug
:param seed: Seed for the pseudo random generators
:param device: Device (cpu, cuda, ...) on which the code should be run.
Setting it to auto, the code will be run on the GPU if possible.
:param _init_setup_model: Whether or not to build the network at the creation of the instance
"""
def __init__(
self,
policy: Union[str, Type[DQNPolicy]],
env: Union[GymEnv, str],
learning_rate: Union[float, Schedule] = 1e-4,
buffer_size: int = 1000000,
learning_starts: int = 50000,
batch_size: Optional[int] = 32,
tau: float = 1.0,
gamma: float = 0.99,
train_freq: int = 4,
gradient_steps: int = 1,
n_episodes_rollout: int = -1,
optimize_memory_usage: bool = False,
target_update_interval: int = 10000,
exploration_fraction: float = 0.1,
exploration_initial_eps: float = 1.0,
exploration_final_eps: float = 0.05,
max_grad_norm: float = 10,
tensorboard_log: Optional[str] = None,
create_eval_env: bool = False,
policy_kwargs: Optional[Dict[str, Any]] = None,
verbose: int = 0,
seed: Optional[int] = None,
device: Union[th.device, str] = "auto",
_init_setup_model: bool = True,
):
super(DQN, self).__init__(
policy,
env,
DQNPolicy,
learning_rate,
buffer_size,
learning_starts,
batch_size,
tau,
gamma,
train_freq,
gradient_steps,
n_episodes_rollout,
action_noise=None, # No action noise
policy_kwargs=policy_kwargs,
tensorboard_log=tensorboard_log,
verbose=verbose,
device=device,
create_eval_env=create_eval_env,
seed=seed,
sde_support=False,
optimize_memory_usage=optimize_memory_usage,
)
self.exploration_initial_eps = exploration_initial_eps
self.exploration_final_eps = exploration_final_eps
self.exploration_fraction = exploration_fraction
self.target_update_interval = target_update_interval
self.max_grad_norm = max_grad_norm
# "epsilon" for the epsilon-greedy exploration
self.exploration_rate = 0.0
# Linear schedule will be defined in `_setup_model()`
self.exploration_schedule = None
self.q_net, self.q_net_target = None, None
if _init_setup_model:
self._setup_model()
def _setup_model(self) -> None:
super(DQN, self)._setup_model()
self._create_aliases()
self.exploration_schedule = get_linear_fn(
self.exploration_initial_eps, self.exploration_final_eps, self.exploration_fraction
)
def _create_aliases(self) -> None:
self.q_net = self.policy.q_net
self.q_net_target = self.policy.q_net_target
def _on_step(self) -> None:
"""
Update the exploration rate and target network if needed.
This method is called in ``collect_rollouts()`` after each step in the environment.
"""
if self.num_timesteps % self.target_update_interval == 0:
polyak_update(self.q_net.parameters(), self.q_net_target.parameters(), self.tau)
self.exploration_rate = self.exploration_schedule(self._current_progress_remaining)
logger.record("rollout/exploration rate", self.exploration_rate)
def train(self, gradient_steps: int, batch_size: int = 100) -> None:
# Update learning rate according to schedule
self._update_learning_rate(self.policy.optimizer)
losses = []
for gradient_step in range(gradient_steps):
# Sample replay buffer
replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env)
with th.no_grad():
# Compute the target Q values
target_q = self.q_net_target(replay_data.next_observations)
# Follow greedy policy: use the one with the highest value
target_q, _ = target_q.max(dim=1)
# Avoid potential broadcast issue
target_q = target_q.reshape(-1, 1)
# 1-step TD target
target_q = replay_data.rewards + (1 - replay_data.dones) * self.gamma * target_q
# Get current Q estimates
current_q = self.q_net(replay_data.observations)
# Retrieve the q-values for the actions from the replay buffer
current_q = th.gather(current_q, dim=1, index=replay_data.actions.long())
# Compute Huber loss (less sensitive to outliers)
loss = F.smooth_l1_loss(current_q, target_q)
losses.append(loss.item())
# Optimize the policy
self.policy.optimizer.zero_grad()
loss.backward()
# Clip gradient norm
th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)
self.policy.optimizer.step()
# Increase update counter
self._n_updates += gradient_steps
logger.record("train/n_updates", self._n_updates, exclude="tensorboard")
logger.record("train/loss", np.mean(losses))
def predict(
self,
observation: np.ndarray,
state: Optional[np.ndarray] = None,
mask: Optional[np.ndarray] = None,
deterministic: bool = False,
) -> Tuple[np.ndarray, Optional[np.ndarray]]:
"""
Overrides the base_class predict function to include epsilon-greedy exploration.
:param observation: the input observation
:param state: The last states (can be None, used in recurrent policies)
:param mask: The last masks (can be None, used in recurrent policies)
:param deterministic: Whether or not to return deterministic actions.
:return: the model's action and the next state
(used in recurrent policies)
"""
if not deterministic and np.random.rand() < self.exploration_rate:
if is_vectorized_observation(observation, self.observation_space):
n_batch = observation.shape[0]
action = np.array([self.action_space.sample() for _ in range(n_batch)])
else:
action = np.array(self.action_space.sample())
else:
action, state = self.policy.predict(observation, state, mask, deterministic)
return action, state
def learn(
self,
total_timesteps: int,
callback: MaybeCallback = None,
log_interval: int = 4,
eval_env: Optional[GymEnv] = None,
eval_freq: int = -1,
n_eval_episodes: int = 5,
tb_log_name: str = "DQN",
eval_log_path: Optional[str] = None,
reset_num_timesteps: bool = True,
) -> OffPolicyAlgorithm:
return super(DQN, self).learn(
total_timesteps=total_timesteps,
callback=callback,
log_interval=log_interval,
eval_env=eval_env,
eval_freq=eval_freq,
n_eval_episodes=n_eval_episodes,
tb_log_name=tb_log_name,
eval_log_path=eval_log_path,
reset_num_timesteps=reset_num_timesteps,
)
def _excluded_save_params(self) -> List[str]:
return super(DQN, self)._excluded_save_params() + ["q_net", "q_net_target"]
def _get_torch_save_params(self) -> Tuple[List[str], List[str]]:
state_dicts = ["policy", "policy.optimizer"]
return state_dicts, []
| 43.279352 | 110 | 0.659682 | from typing import Any, Dict, List, Optional, Tuple, Type, Union
import numpy as np
import torch as th
from torch.nn import functional as F
from stable_baselines3.common import logger
from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm
from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule
from stable_baselines3.common.utils import get_linear_fn, is_vectorized_observation, polyak_update
from stable_baselines3.dqn.policies import DQNPolicy
class DQN(OffPolicyAlgorithm):
def __init__(
self,
policy: Union[str, Type[DQNPolicy]],
env: Union[GymEnv, str],
learning_rate: Union[float, Schedule] = 1e-4,
buffer_size: int = 1000000,
learning_starts: int = 50000,
batch_size: Optional[int] = 32,
tau: float = 1.0,
gamma: float = 0.99,
train_freq: int = 4,
gradient_steps: int = 1,
n_episodes_rollout: int = -1,
optimize_memory_usage: bool = False,
target_update_interval: int = 10000,
exploration_fraction: float = 0.1,
exploration_initial_eps: float = 1.0,
exploration_final_eps: float = 0.05,
max_grad_norm: float = 10,
tensorboard_log: Optional[str] = None,
create_eval_env: bool = False,
policy_kwargs: Optional[Dict[str, Any]] = None,
verbose: int = 0,
seed: Optional[int] = None,
device: Union[th.device, str] = "auto",
_init_setup_model: bool = True,
):
super(DQN, self).__init__(
policy,
env,
DQNPolicy,
learning_rate,
buffer_size,
learning_starts,
batch_size,
tau,
gamma,
train_freq,
gradient_steps,
n_episodes_rollout,
action_noise=None,
policy_kwargs=policy_kwargs,
tensorboard_log=tensorboard_log,
verbose=verbose,
device=device,
create_eval_env=create_eval_env,
seed=seed,
sde_support=False,
optimize_memory_usage=optimize_memory_usage,
)
self.exploration_initial_eps = exploration_initial_eps
self.exploration_final_eps = exploration_final_eps
self.exploration_fraction = exploration_fraction
self.target_update_interval = target_update_interval
self.max_grad_norm = max_grad_norm
self.exploration_rate = 0.0
self.exploration_schedule = None
self.q_net, self.q_net_target = None, None
if _init_setup_model:
self._setup_model()
def _setup_model(self) -> None:
super(DQN, self)._setup_model()
self._create_aliases()
self.exploration_schedule = get_linear_fn(
self.exploration_initial_eps, self.exploration_final_eps, self.exploration_fraction
)
def _create_aliases(self) -> None:
self.q_net = self.policy.q_net
self.q_net_target = self.policy.q_net_target
def _on_step(self) -> None:
if self.num_timesteps % self.target_update_interval == 0:
polyak_update(self.q_net.parameters(), self.q_net_target.parameters(), self.tau)
self.exploration_rate = self.exploration_schedule(self._current_progress_remaining)
logger.record("rollout/exploration rate", self.exploration_rate)
def train(self, gradient_steps: int, batch_size: int = 100) -> None:
self._update_learning_rate(self.policy.optimizer)
losses = []
for gradient_step in range(gradient_steps):
replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env)
with th.no_grad():
target_q = self.q_net_target(replay_data.next_observations)
target_q, _ = target_q.max(dim=1)
target_q = target_q.reshape(-1, 1)
target_q = replay_data.rewards + (1 - replay_data.dones) * self.gamma * target_q
current_q = self.q_net(replay_data.observations)
current_q = th.gather(current_q, dim=1, index=replay_data.actions.long())
loss = F.smooth_l1_loss(current_q, target_q)
losses.append(loss.item())
self.policy.optimizer.zero_grad()
loss.backward()
th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)
self.policy.optimizer.step()
self._n_updates += gradient_steps
logger.record("train/n_updates", self._n_updates, exclude="tensorboard")
logger.record("train/loss", np.mean(losses))
def predict(
self,
observation: np.ndarray,
state: Optional[np.ndarray] = None,
mask: Optional[np.ndarray] = None,
deterministic: bool = False,
) -> Tuple[np.ndarray, Optional[np.ndarray]]:
if not deterministic and np.random.rand() < self.exploration_rate:
if is_vectorized_observation(observation, self.observation_space):
n_batch = observation.shape[0]
action = np.array([self.action_space.sample() for _ in range(n_batch)])
else:
action = np.array(self.action_space.sample())
else:
action, state = self.policy.predict(observation, state, mask, deterministic)
return action, state
def learn(
self,
total_timesteps: int,
callback: MaybeCallback = None,
log_interval: int = 4,
eval_env: Optional[GymEnv] = None,
eval_freq: int = -1,
n_eval_episodes: int = 5,
tb_log_name: str = "DQN",
eval_log_path: Optional[str] = None,
reset_num_timesteps: bool = True,
) -> OffPolicyAlgorithm:
return super(DQN, self).learn(
total_timesteps=total_timesteps,
callback=callback,
log_interval=log_interval,
eval_env=eval_env,
eval_freq=eval_freq,
n_eval_episodes=n_eval_episodes,
tb_log_name=tb_log_name,
eval_log_path=eval_log_path,
reset_num_timesteps=reset_num_timesteps,
)
def _excluded_save_params(self) -> List[str]:
return super(DQN, self)._excluded_save_params() + ["q_net", "q_net_target"]
def _get_torch_save_params(self) -> Tuple[List[str], List[str]]:
state_dicts = ["policy", "policy.optimizer"]
return state_dicts, []
| true | true |
f715a21938d09961aef70bbfb712b4ac4b78ccb3 | 2,266 | py | Python | src/spinnaker_ros_lsm/venv/lib/python2.7/site-packages/spinnman/messages/scp/impl/scp_sdram_alloc_request.py | Roboy/LSM_SpiNNaker_MyoArm | 04fa1eaf78778edea3ba3afa4c527d20c491718e | [
"BSD-3-Clause"
] | 2 | 2020-11-01T13:22:11.000Z | 2020-11-01T13:22:20.000Z | src/spinnaker_ros_lsm/venv/lib/python2.7/site-packages/spinnman/messages/scp/impl/scp_sdram_alloc_request.py | Roboy/LSM_SpiNNaker_MyoArm | 04fa1eaf78778edea3ba3afa4c527d20c491718e | [
"BSD-3-Clause"
] | null | null | null | src/spinnaker_ros_lsm/venv/lib/python2.7/site-packages/spinnman/messages/scp/impl/scp_sdram_alloc_request.py | Roboy/LSM_SpiNNaker_MyoArm | 04fa1eaf78778edea3ba3afa4c527d20c491718e | [
"BSD-3-Clause"
] | null | null | null | from spinnman.messages.scp.abstract_messages.abstract_scp_request\
import AbstractSCPRequest
from spinnman.messages.scp.impl.scp_sdram_alloc_response import \
SCPSDRAMAllocResponse
from spinnman.messages.sdp.sdp_header import SDPHeader
from spinnman.messages.sdp.sdp_flag import SDPFlag
from spinnman.messages.scp.scp_request_header import SCPRequestHeader
from spinnman.messages.scp.scp_command import SCPCommand
from spinnman.messages.scp.scp_alloc_free_type import SCPAllocFreeType
from spinnman import exceptions
class SCPSDRAMAllocRequest(AbstractSCPRequest):
""" An SCP Request to allocate space in the SDRAM space
"""
def __init__(self, x, y, app_id, size, tag=None):
"""
:param x: The x-coordinate of the chip to allocate on, between 0 and\
255
:type x: int
:param y: The y-coordinate of the chip to allocate on, between 0 and\
255
:type y: int
:param app_id: The id of the application, between 0 and 255
:type app_id: int
:param size: The size in bytes of memory to be allocated
:type size: int
:param tag: the tag for the SDRAM, a 8-bit (chip-wide) tag that can be\
looked up by a SpiNNaker application to discover the address\
of the allocated block. If `0` then no tag is applied.
:type tag: int
"""
if tag is None:
tag = 0
elif not(0 <= tag < 256):
raise exceptions.SpinnmanInvalidParameterException(
"The tag param needs to be between 0 and 255, or None (in "
"which case 0 will be used by default)")
AbstractSCPRequest.__init__(
self,
SDPHeader(
flags=SDPFlag.REPLY_EXPECTED, destination_port=0,
destination_cpu=0, destination_chip_x=x,
destination_chip_y=y),
SCPRequestHeader(command=SCPCommand.CMD_ALLOC),
argument_1=(
(app_id << 8) |
SCPAllocFreeType.ALLOC_SDRAM.value), # @UndefinedVariable
argument_2=size, argument_3=tag)
self._size = size
def get_scp_response(self):
return SCPSDRAMAllocResponse(self._size)
| 39.068966 | 79 | 0.644307 | from spinnman.messages.scp.abstract_messages.abstract_scp_request\
import AbstractSCPRequest
from spinnman.messages.scp.impl.scp_sdram_alloc_response import \
SCPSDRAMAllocResponse
from spinnman.messages.sdp.sdp_header import SDPHeader
from spinnman.messages.sdp.sdp_flag import SDPFlag
from spinnman.messages.scp.scp_request_header import SCPRequestHeader
from spinnman.messages.scp.scp_command import SCPCommand
from spinnman.messages.scp.scp_alloc_free_type import SCPAllocFreeType
from spinnman import exceptions
class SCPSDRAMAllocRequest(AbstractSCPRequest):
def __init__(self, x, y, app_id, size, tag=None):
if tag is None:
tag = 0
elif not(0 <= tag < 256):
raise exceptions.SpinnmanInvalidParameterException(
"The tag param needs to be between 0 and 255, or None (in "
"which case 0 will be used by default)")
AbstractSCPRequest.__init__(
self,
SDPHeader(
flags=SDPFlag.REPLY_EXPECTED, destination_port=0,
destination_cpu=0, destination_chip_x=x,
destination_chip_y=y),
SCPRequestHeader(command=SCPCommand.CMD_ALLOC),
argument_1=(
(app_id << 8) |
SCPAllocFreeType.ALLOC_SDRAM.value),
argument_2=size, argument_3=tag)
self._size = size
def get_scp_response(self):
return SCPSDRAMAllocResponse(self._size)
| true | true |
f715a27d0c9909bea75ea1edd3eb15e6bba3b9a4 | 5,102 | py | Python | gluon/packages/dal/pydal/adapters/mssql.py | kyomei/python-locadora | c461252387f77bd01465fd851d0b5bfa9ce53493 | [
"BSD-3-Clause"
] | null | null | null | gluon/packages/dal/pydal/adapters/mssql.py | kyomei/python-locadora | c461252387f77bd01465fd851d0b5bfa9ce53493 | [
"BSD-3-Clause"
] | null | null | null | gluon/packages/dal/pydal/adapters/mssql.py | kyomei/python-locadora | c461252387f77bd01465fd851d0b5bfa9ce53493 | [
"BSD-3-Clause"
] | null | null | null | import re
from .._compat import PY2, iteritems, integer_types, to_unicode, long
from .._globals import IDENTITY
from .base import SQLAdapter
from . import adapters, with_connection_or_raise
class Slicer(object):
def rowslice(self, rows, minimum=0, maximum=None):
if maximum is None:
return rows[minimum:]
return rows[minimum:maximum]
class MSSQL(SQLAdapter):
dbengine = 'mssql'
drivers = ('pyodbc',)
REGEX_DSN = '^.+$'
REGEX_URI = \
'^(?P<user>[^:@]+)(:(?P<password>[^@]*))?' \
r'@(?P<host>[^:/]+|\[[^\]]+\])(:(?P<port>\d+))?' \
'/(?P<db>[^?]+)' \
r'(\?(?P<urlargs>.*))?$'
REGEX_ARG_VAL = '(?P<argkey>[^=]+)=(?P<argvalue>[^&]*)'
def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, srid=4326,
after_connection=None):
self.srid = srid
super(MSSQL, self).__init__(
db, uri, pool_size, folder, db_codec, credential_decoder,
driver_args, adapter_args, do_connect, after_connection)
def _initialize_(self, do_connect):
super(MSSQL, self)._initialize_(do_connect)
ruri = self.uri.split('://', 1)[1]
if '@' not in ruri:
m = re.match(self.REGEX_DSN, ruri)
if not m:
raise SyntaxError("Invalid URI string in DAL")
self.dsn = m.group()
else:
m = re.match(self.REGEX_URI, ruri)
if not m:
raise SyntaxError(
"Invalid URI string in DAL: %s" % self.uri)
user = self.credential_decoder(m.group('user'))
password = self.credential_decoder(m.group('password'))
if password is None:
password = ''
host = m.group('host')
db = m.group('db')
port = m.group('port') or '1433'
# Parse the optional url name-value arg pairs after the '?'
# (in the form of arg1=value1&arg2=value2&...)
# (drivers like FreeTDS insist on uppercase parameter keys)
argsdict = {'DRIVER': '{SQL Server}'}
urlargs = m.group('urlargs') or ''
for argmatch in re.finditer(self.REGEX_ARG_VAL, urlargs):
argsdict[str(argmatch.group('argkey')).upper()] = \
argmatch.group('argvalue')
urlargs = ';'.join([
'%s=%s' % (ak, av) for (ak, av) in iteritems(argsdict)])
self.dsn = 'SERVER=%s;PORT=%s;DATABASE=%s;UID=%s;PWD=%s;%s' \
% (host, port, db, user, password, urlargs)
def connector(self):
return self.driver.connect(self.dsn, **self.driver_args)
def lastrowid(self, table):
self.execute('SELECT SCOPE_IDENTITY();')
return long(self.cursor.fetchone()[0])
@adapters.register_for('mssql')
class MSSQL1(MSSQL, Slicer):
pass
@adapters.register_for('mssql3')
class MSSQL3(MSSQL):
pass
@adapters.register_for('mssql4')
class MSSQL4(MSSQL):
pass
class MSSQLN(MSSQL):
def represent(self, obj, field_type):
rv = super(MSSQLN, self).represent(obj, field_type)
if field_type in ('string', 'text', 'json') and rv.startswith("'"):
rv = 'N' + rv
return rv
@with_connection_or_raise
def execute(self, *args, **kwargs):
if PY2:
args = list(args)
args[0] = to_unicode(args[0])
return super(MSSQLN, self).execute(*args, **kwargs)
@adapters.register_for('mssqln', 'mssql2')
class MSSQL1N(MSSQLN, Slicer):
pass
@adapters.register_for('mssql3n')
class MSSQL3N(MSSQLN):
pass
@adapters.register_for('mssql4n')
class MSSQL4N(MSSQLN):
pass
@adapters.register_for('vertica')
class Vertica(MSSQL1):
def lastrowid(self, table):
self.execute('SELECT SCOPE_IDENTITY();')
return long(self.cursor.fetchone()[0])
@adapters.register_for('sybase')
class Sybase(MSSQL1):
dbengine = 'sybase'
def _initialize_(self, do_connect):
super(MSSQL, self)._initialize_(do_connect)
ruri = self.uri.split('://', 1)[1]
if '@' not in ruri:
m = re.match(self.REGEX_DSN, ruri)
if not m:
raise SyntaxError("Invalid URI string in DAL")
dsn = m.group()
else:
m = re.match(self.REGEX_URI, ruri)
if not m:
raise SyntaxError(
"Invalid URI string in DAL: %s" % self.uri)
user = self.credential_decoder(m.group('user'))
password = self.credential_decoder(m.group('password'))
if password is None:
password = ''
host = m.group('host')
db = m.group('db')
port = m.group('port') or '1433'
self.dsn = 'sybase:host=%s:%s;dbname=%s' % (host, port, db)
self.driver_args.update(
user=self.credential_decoder(user),
passwd=self.credential_decoder(password))
| 32.705128 | 75 | 0.561348 | import re
from .._compat import PY2, iteritems, integer_types, to_unicode, long
from .._globals import IDENTITY
from .base import SQLAdapter
from . import adapters, with_connection_or_raise
class Slicer(object):
def rowslice(self, rows, minimum=0, maximum=None):
if maximum is None:
return rows[minimum:]
return rows[minimum:maximum]
class MSSQL(SQLAdapter):
dbengine = 'mssql'
drivers = ('pyodbc',)
REGEX_DSN = '^.+$'
REGEX_URI = \
'^(?P<user>[^:@]+)(:(?P<password>[^@]*))?' \
r'@(?P<host>[^:/]+|\[[^\]]+\])(:(?P<port>\d+))?' \
'/(?P<db>[^?]+)' \
r'(\?(?P<urlargs>.*))?$'
REGEX_ARG_VAL = '(?P<argkey>[^=]+)=(?P<argvalue>[^&]*)'
def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, srid=4326,
after_connection=None):
self.srid = srid
super(MSSQL, self).__init__(
db, uri, pool_size, folder, db_codec, credential_decoder,
driver_args, adapter_args, do_connect, after_connection)
def _initialize_(self, do_connect):
super(MSSQL, self)._initialize_(do_connect)
ruri = self.uri.split('://', 1)[1]
if '@' not in ruri:
m = re.match(self.REGEX_DSN, ruri)
if not m:
raise SyntaxError("Invalid URI string in DAL")
self.dsn = m.group()
else:
m = re.match(self.REGEX_URI, ruri)
if not m:
raise SyntaxError(
"Invalid URI string in DAL: %s" % self.uri)
user = self.credential_decoder(m.group('user'))
password = self.credential_decoder(m.group('password'))
if password is None:
password = ''
host = m.group('host')
db = m.group('db')
port = m.group('port') or '1433'
argsdict = {'DRIVER': '{SQL Server}'}
urlargs = m.group('urlargs') or ''
for argmatch in re.finditer(self.REGEX_ARG_VAL, urlargs):
argsdict[str(argmatch.group('argkey')).upper()] = \
argmatch.group('argvalue')
urlargs = ';'.join([
'%s=%s' % (ak, av) for (ak, av) in iteritems(argsdict)])
self.dsn = 'SERVER=%s;PORT=%s;DATABASE=%s;UID=%s;PWD=%s;%s' \
% (host, port, db, user, password, urlargs)
def connector(self):
return self.driver.connect(self.dsn, **self.driver_args)
def lastrowid(self, table):
self.execute('SELECT SCOPE_IDENTITY();')
return long(self.cursor.fetchone()[0])
@adapters.register_for('mssql')
class MSSQL1(MSSQL, Slicer):
pass
@adapters.register_for('mssql3')
class MSSQL3(MSSQL):
pass
@adapters.register_for('mssql4')
class MSSQL4(MSSQL):
pass
class MSSQLN(MSSQL):
def represent(self, obj, field_type):
rv = super(MSSQLN, self).represent(obj, field_type)
if field_type in ('string', 'text', 'json') and rv.startswith("'"):
rv = 'N' + rv
return rv
@with_connection_or_raise
def execute(self, *args, **kwargs):
if PY2:
args = list(args)
args[0] = to_unicode(args[0])
return super(MSSQLN, self).execute(*args, **kwargs)
@adapters.register_for('mssqln', 'mssql2')
class MSSQL1N(MSSQLN, Slicer):
pass
@adapters.register_for('mssql3n')
class MSSQL3N(MSSQLN):
pass
@adapters.register_for('mssql4n')
class MSSQL4N(MSSQLN):
pass
@adapters.register_for('vertica')
class Vertica(MSSQL1):
def lastrowid(self, table):
self.execute('SELECT SCOPE_IDENTITY();')
return long(self.cursor.fetchone()[0])
@adapters.register_for('sybase')
class Sybase(MSSQL1):
dbengine = 'sybase'
def _initialize_(self, do_connect):
super(MSSQL, self)._initialize_(do_connect)
ruri = self.uri.split('://', 1)[1]
if '@' not in ruri:
m = re.match(self.REGEX_DSN, ruri)
if not m:
raise SyntaxError("Invalid URI string in DAL")
dsn = m.group()
else:
m = re.match(self.REGEX_URI, ruri)
if not m:
raise SyntaxError(
"Invalid URI string in DAL: %s" % self.uri)
user = self.credential_decoder(m.group('user'))
password = self.credential_decoder(m.group('password'))
if password is None:
password = ''
host = m.group('host')
db = m.group('db')
port = m.group('port') or '1433'
self.dsn = 'sybase:host=%s:%s;dbname=%s' % (host, port, db)
self.driver_args.update(
user=self.credential_decoder(user),
passwd=self.credential_decoder(password))
| true | true |
f715a381967b6c4678430e111919f89608f9e232 | 1,922 | py | Python | astropy/stats/lombscargle/implementations/tests/test_mle.py | b1quint/astropy | a170a74739e4356c169429a42e554f9777b53f4d | [
"BSD-3-Clause"
] | 8 | 2019-04-27T01:19:45.000Z | 2020-09-21T03:31:01.000Z | astropy/stats/lombscargle/implementations/tests/test_mle.py | b1quint/astropy | a170a74739e4356c169429a42e554f9777b53f4d | [
"BSD-3-Clause"
] | null | null | null | astropy/stats/lombscargle/implementations/tests/test_mle.py | b1quint/astropy | a170a74739e4356c169429a42e554f9777b53f4d | [
"BSD-3-Clause"
] | 5 | 2019-04-27T01:19:47.000Z | 2020-09-20T15:15:19.000Z | import pytest
import numpy as np
from numpy.testing import assert_allclose
from astropy.stats.lombscargle.implementations.mle import design_matrix, periodic_fit
@pytest.fixture
def t():
rand = np.random.RandomState(42)
return 10 * rand.rand(10)
@pytest.mark.parametrize('freq', [1.0, 2])
@pytest.mark.parametrize('dy', [None, 2.0])
@pytest.mark.parametrize('bias', [True, False])
def test_design_matrix(t, freq, dy, bias):
X = design_matrix(t, freq, dy, bias=bias)
assert X.shape == (t.shape[0], 2 + bool(bias))
if bias:
assert_allclose(X[:, 0], 1. / (dy or 1.0))
assert_allclose(X[:, -2], np.sin(2 * np.pi * freq * t) / (dy or 1.0))
assert_allclose(X[:, -1], np.cos(2 * np.pi * freq * t) / (dy or 1.0))
@pytest.mark.parametrize('nterms', range(4))
def test_multiterm_design_matrix(t, nterms):
dy = 2.0
freq = 1.5
X = design_matrix(t, freq, dy=dy, bias=True, nterms=nterms)
assert X.shape == (t.shape[0], 1 + 2 * nterms)
assert_allclose(X[:, 0], 1. / dy)
for i in range(1, nterms + 1):
assert_allclose(X[:, 2 * i - 1], np.sin(2 * np.pi * i * freq * t) / dy)
assert_allclose(X[:, 2 * i], np.cos(2 * np.pi * i * freq * t) / dy)
@pytest.mark.parametrize('nterms', range(1, 4))
@pytest.mark.parametrize('freq', [1, 2])
@pytest.mark.parametrize('fit_mean', [True, False])
def test_exact_mle_fit(nterms, freq, fit_mean):
rand = np.random.RandomState(42)
t = 10 * rand.rand(30)
theta = -1 + rand.rand(2 * nterms + 1)
y = np.zeros(t.shape)
if fit_mean:
y = theta[0] * np.ones(t.shape)
for i in range(1, nterms + 1):
y += theta[2 * i - 1] * np.sin(2 * np.pi * i * freq * t)
y += theta[2 * i] * np.cos(2 * np.pi * i * freq * t)
y_fit = periodic_fit(t, y, dy=1, frequency=freq, t_fit=t, nterms=nterms,
center_data=False, fit_mean=fit_mean)
assert_allclose(y, y_fit)
| 34.945455 | 85 | 0.605619 | import pytest
import numpy as np
from numpy.testing import assert_allclose
from astropy.stats.lombscargle.implementations.mle import design_matrix, periodic_fit
@pytest.fixture
def t():
rand = np.random.RandomState(42)
return 10 * rand.rand(10)
@pytest.mark.parametrize('freq', [1.0, 2])
@pytest.mark.parametrize('dy', [None, 2.0])
@pytest.mark.parametrize('bias', [True, False])
def test_design_matrix(t, freq, dy, bias):
X = design_matrix(t, freq, dy, bias=bias)
assert X.shape == (t.shape[0], 2 + bool(bias))
if bias:
assert_allclose(X[:, 0], 1. / (dy or 1.0))
assert_allclose(X[:, -2], np.sin(2 * np.pi * freq * t) / (dy or 1.0))
assert_allclose(X[:, -1], np.cos(2 * np.pi * freq * t) / (dy or 1.0))
@pytest.mark.parametrize('nterms', range(4))
def test_multiterm_design_matrix(t, nterms):
dy = 2.0
freq = 1.5
X = design_matrix(t, freq, dy=dy, bias=True, nterms=nterms)
assert X.shape == (t.shape[0], 1 + 2 * nterms)
assert_allclose(X[:, 0], 1. / dy)
for i in range(1, nterms + 1):
assert_allclose(X[:, 2 * i - 1], np.sin(2 * np.pi * i * freq * t) / dy)
assert_allclose(X[:, 2 * i], np.cos(2 * np.pi * i * freq * t) / dy)
@pytest.mark.parametrize('nterms', range(1, 4))
@pytest.mark.parametrize('freq', [1, 2])
@pytest.mark.parametrize('fit_mean', [True, False])
def test_exact_mle_fit(nterms, freq, fit_mean):
rand = np.random.RandomState(42)
t = 10 * rand.rand(30)
theta = -1 + rand.rand(2 * nterms + 1)
y = np.zeros(t.shape)
if fit_mean:
y = theta[0] * np.ones(t.shape)
for i in range(1, nterms + 1):
y += theta[2 * i - 1] * np.sin(2 * np.pi * i * freq * t)
y += theta[2 * i] * np.cos(2 * np.pi * i * freq * t)
y_fit = periodic_fit(t, y, dy=1, frequency=freq, t_fit=t, nterms=nterms,
center_data=False, fit_mean=fit_mean)
assert_allclose(y, y_fit)
| true | true |
f715a4a05c0ac41089e088d453bb1aff5563f056 | 29,345 | py | Python | braintree/webhook_testing_gateway.py | maneeshd/braintree_python | 4aa3f4b8a376ea81bf16a053d840efe55ae13675 | [
"MIT"
] | 1 | 2019-05-23T10:08:54.000Z | 2019-05-23T10:08:54.000Z | braintree/webhook_testing_gateway.py | maneeshd/braintree_python | 4aa3f4b8a376ea81bf16a053d840efe55ae13675 | [
"MIT"
] | null | null | null | braintree/webhook_testing_gateway.py | maneeshd/braintree_python | 4aa3f4b8a376ea81bf16a053d840efe55ae13675 | [
"MIT"
] | 2 | 2019-05-06T01:10:41.000Z | 2019-05-06T01:10:42.000Z | from braintree.util.crypto import Crypto
from braintree.webhook_notification import WebhookNotification
import sys
if sys.version_info[0] == 2:
from base64 import encodestring as encodebytes
else:
from base64 import encodebytes
from datetime import datetime
class WebhookTestingGateway(object):
def __init__(self, gateway):
self.gateway = gateway
self.config = gateway.config
def sample_notification(self, kind, id, source_merchant_id=None):
payload = encodebytes(self.__sample_xml(kind, id, source_merchant_id))
hmac_payload = Crypto.sha1_hmac_hash(self.gateway.config.private_key, payload)
signature = "%s|%s" % (self.gateway.config.public_key, hmac_payload)
return {'bt_signature': signature, 'bt_payload': payload}
def __sample_xml(self, kind, id, source_merchant_id):
timestamp = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
source_merchant_id_xml = ''
if source_merchant_id is not None:
source_merchant_id_xml = '<source-merchant-id>%s</source-merchant-id>' % source_merchant_id
sample_xml = """
<notification>
<timestamp type="datetime">%s</timestamp>
<kind>%s</kind>
%s
<subject>%s</subject>
</notification>
""" % (timestamp, kind, source_merchant_id_xml, self.__subject_sample_xml(kind, id))
return sample_xml.encode('utf-8')
def __subject_sample_xml(self, kind, id):
if kind == WebhookNotification.Kind.Check:
return self.__check_sample_xml()
if kind == WebhookNotification.Kind.ConnectedMerchantStatusTransitioned:
return self.__connected_merchant_status_transitioned_xml(id)
if kind == WebhookNotification.Kind.ConnectedMerchantPayPalStatusChanged:
return self.__connected_merchant_paypal_status_changed_xml(id)
if kind == WebhookNotification.Kind.SubMerchantAccountApproved:
return self.__merchant_account_approved_sample_xml(id)
elif kind == WebhookNotification.Kind.SubMerchantAccountDeclined:
return self.__merchant_account_declined_sample_xml(id)
elif kind == WebhookNotification.Kind.TransactionDisbursed:
return self.__transaction_disbursed_sample_xml(id)
elif kind == WebhookNotification.Kind.TransactionSettled:
return self.__transaction_settled_sample_xml(id)
elif kind == WebhookNotification.Kind.TransactionSettlementDeclined:
return self.__transaction_settlement_declined_sample_xml(id)
elif kind == WebhookNotification.Kind.PartnerMerchantConnected:
return self.__partner_merchant_connected_sample_xml()
elif kind == WebhookNotification.Kind.PartnerMerchantDisconnected:
return self.__partner_merchant_disconnected_sample_xml()
elif kind == WebhookNotification.Kind.PartnerMerchantDeclined:
return self.__partner_merchant_declined_sample_xml()
elif kind == WebhookNotification.Kind.OAuthAccessRevoked:
return self.__oauth_access_revocation_sample_xml(id)
elif kind == WebhookNotification.Kind.DisbursementException:
return self.__disbursement_exception_sample_xml(id)
elif kind == WebhookNotification.Kind.Disbursement:
return self.__disbursement_sample_xml(id)
elif kind == WebhookNotification.Kind.DisputeOpened:
return self.__dispute_opened_sample_xml(id)
elif kind == WebhookNotification.Kind.DisputeLost:
return self.__dispute_lost_sample_xml(id)
elif kind == WebhookNotification.Kind.DisputeWon:
return self.__dispute_won_sample_xml(id)
elif kind == WebhookNotification.Kind.SubscriptionChargedSuccessfully:
return self.__subscription_charged_successfully_sample_xml(id)
elif kind == WebhookNotification.Kind.SubscriptionChargedUnsuccessfully:
return self.__subscription_charged_unsuccessfully_sample_xml(id)
elif kind == WebhookNotification.Kind.AccountUpdaterDailyReport:
return self.__account_updater_daily_report_sample_xml()
# NEXT_MAJOR_VERSION Remove this class as legacy Ideal has been removed/disabled in the Braintree Gateway
# DEPRECATED If you're looking to accept iDEAL as a payment method contact accounts@braintreepayments.com for a solution.
elif kind == WebhookNotification.Kind.IdealPaymentComplete:
return self.__ideal_payment_complete_sample_xml(id)
# NEXT_MAJOR_VERSION Remove this class as legacy Ideal has been removed/disabled in the Braintree Gateway
# DEPRECATED If you're looking to accept iDEAL as a payment method contact accounts@braintreepayments.com for a solution.
elif kind == WebhookNotification.Kind.IdealPaymentFailed:
return self.__ideal_payment_failed_sample_xml(id)
# NEXT_MAJOR_VERSION remove GrantedPaymentInstrumentUpdate
elif kind == WebhookNotification.Kind.GrantedPaymentInstrumentUpdate:
return self.__granted_payment_instrument_update()
elif kind == WebhookNotification.Kind.GrantorUpdatedGrantedPaymentMethod:
return self.__granted_payment_instrument_update()
elif kind == WebhookNotification.Kind.RecipientUpdatedGrantedPaymentMethod:
return self.__granted_payment_instrument_update()
elif kind == WebhookNotification.Kind.PaymentMethodRevokedByCustomer:
return self.__payment_method_revoked_by_customer(id)
elif kind == WebhookNotification.Kind.LocalPaymentCompleted:
return self.__local_payment_completed()
else:
return self.__subscription_sample_xml(id)
def __check_sample_xml(self):
return """
<check type="boolean">
true
</check>
"""
def __transaction_disbursed_sample_xml(self, id):
return """
<transaction>
<id>%s</id>
<amount>100</amount>
<tax-amount>10</tax-amount>
<disbursement-details>
<settlement-amount>100</settlement-amount>
<settlement-currency-exchange-rate>10</settlement-currency-exchange-rate>
<disbursement-date type="datetime">2013-07-09T18:23:29Z</disbursement-date>
</disbursement-details>
</transaction>
""" % id
def __transaction_settled_sample_xml(self, id):
return """
<transaction>
<id>%s</id>
<status>settled</status>
<type>sale</type>
<currency-iso-code>USD</currency-iso-code>
<amount>100.00</amount>
<merchant-account-id>ogaotkivejpfayqfeaimuktty</merchant-account-id>
<payment-instrument-type>us_bank_account</payment-instrument-type>
<us-bank-account>
<routing-number>123456789</routing-number>
<last-4>1234</last-4>
<account-type>checking</account-type>
<account-holder-name>Dan Schulman</account-holder-name>
</us-bank-account>
<tax-amount>0</tax-amount>
</transaction>
""" % id
def __transaction_settlement_declined_sample_xml(self, id):
return """
<transaction>
<id>%s</id>
<status>settlement_declined</status>
<type>sale</type>
<currency-iso-code>USD</currency-iso-code>
<amount>100.00</amount>
<merchant-account-id>ogaotkivejpfayqfeaimuktty</merchant-account-id>
<payment-instrument-type>us_bank_account</payment-instrument-type>
<us-bank-account>
<routing-number>123456789</routing-number>
<last-4>1234</last-4>
<account-type>checking</account-type>
<account-holder-name>Dan Schulman</account-holder-name>
</us-bank-account>
<tax-amount>0</tax-amount>
</transaction>
""" % id
def __disbursement_exception_sample_xml(self, id):
return """
<disbursement>
<id>%s</id>
<transaction-ids type="array">
<item>afv56j</item>
<item>kj8hjk</item>
</transaction-ids>
<success type="boolean">false</success>
<retry type="boolean">false</retry>
<merchant-account>
<id>merchant_account_token</id>
<currency-iso-code>USD</currency-iso-code>
<sub-merchant-account type="boolean">false</sub-merchant-account>
<status>active</status>
</merchant-account>
<amount>100.00</amount>
<disbursement-date type="date">2014-02-09</disbursement-date>
<exception-message>bank_rejected</exception-message>
<follow-up-action>update_funding_information</follow-up-action>
</disbursement>
""" % id
def __disbursement_sample_xml(self, id):
return """
<disbursement>
<id>%s</id>
<transaction-ids type="array">
<item>afv56j</item>
<item>kj8hjk</item>
</transaction-ids>
<success type="boolean">true</success>
<retry type="boolean">false</retry>
<merchant-account>
<id>merchant_account_token</id>
<currency-iso-code>USD</currency-iso-code>
<sub-merchant-account type="boolean">false</sub-merchant-account>
<status>active</status>
</merchant-account>
<amount>100.00</amount>
<disbursement-date type="date">2014-02-09</disbursement-date>
<exception-message nil="true"/>
<follow-up-action nil="true"/>
</disbursement>
""" % id
def __dispute_opened_sample_xml(self, id):
if id == "legacy_dispute_id":
return self.__old_dispute_opened_sample_xml(id)
else:
return self.__new_dispute_opened_sample_xml(id)
def __dispute_lost_sample_xml(self, id):
if id == "legacy_dispute_id":
return self.__old_dispute_lost_sample_xml(id)
else:
return self.__new_dispute_lost_sample_xml(id)
def __dispute_won_sample_xml(self, id):
if id == "legacy_dispute_id":
return self.__old_dispute_won_sample_xml(id)
else:
return self.__new_dispute_won_sample_xml(id)
def __old_dispute_opened_sample_xml(self, id):
return """
<dispute>
<amount>250.00</amount>
<currency-iso-code>USD</currency-iso-code>
<received-date type="date">2014-03-01</received-date>
<reply-by-date type="date">2014-03-21</reply-by-date>
<kind>chargeback</kind>
<status>open</status>
<reason>fraud</reason>
<id>%s</id>
<transaction>
<id>%s</id>
<amount>250.00</amount>
</transaction>
<date-opened type="date">2014-03-28</date-opened>
</dispute>
""" % (id, id)
def __old_dispute_lost_sample_xml(self, id):
return """
<dispute>
<amount>250.00</amount>
<currency-iso-code>USD</currency-iso-code>
<received-date type="date">2014-03-01</received-date>
<reply-by-date type="date">2014-03-21</reply-by-date>
<kind>chargeback</kind>
<status>lost</status>
<reason>fraud</reason>
<id>%s</id>
<transaction>
<id>%s</id>
<amount>250.00</amount>
</transaction>
<date-opened type="date">2014-03-28</date-opened>
</dispute>
""" % (id, id)
def __old_dispute_won_sample_xml(self, id):
return """
<dispute>
<amount>250.00</amount>
<currency-iso-code>USD</currency-iso-code>
<received-date type="date">2014-03-01</received-date>
<reply-by-date type="date">2014-03-21</reply-by-date>
<kind>chargeback</kind>
<status>won</status>
<reason>fraud</reason>
<id>%s</id>
<transaction>
<id>%s</id>
<amount>250.00</amount>
</transaction>
<date-opened type="date">2014-03-28</date-opened>
<date-won type="date">2014-09-01</date-won>
</dispute>
""" % (id, id)
def __new_dispute_opened_sample_xml(self, id):
return """
<dispute>
<id>%s</id>
<amount>100.00</amount>
<amount-disputed>100.00</amount-disputed>
<amount-won>95.00</amount-won>
<case-number>CASE-12345</case-number>
<created-at type="datetime">2017-06-16T20:44:41Z</created-at>
<currency-iso-code>USD</currency-iso-code>
<forwarded-comments nil="true"/>
<kind>chargeback</kind>
<merchant-account-id>ytnlulaloidoqwvzxjrdqputg</merchant-account-id>
<reason>fraud</reason>
<reason-code nil="true"/>
<reason-description nil="true"/>
<received-date type="date">2016-02-15</received-date>
<reference-number>REF-9876</reference-number>
<reply-by-date type="date">2016-02-22</reply-by-date>
<status>open</status>
<updated-at type="datetime">2017-06-16T20:44:41Z</updated-at>
<original-dispute-id>9qde5qgp</original-dispute-id>
<status-history type="array">
<status-history>
<status>open</status>
<timestamp type="datetime">2017-06-16T20:44:41Z</timestamp>
</status-history>
</status-history>
<evidence type="array"/>
<transaction>
<id>%s</id>
<amount>100.00</amount>
<created-at>2017-06-21T20:44:41Z</created-at>
<order-id nil="true"/>
<purchase-order-number nil="true"/>
<payment-instrument-subtype>Visa</payment-instrument-subtype>
</transaction>
<date-opened type=\"date\">2014-03-28</date-opened>
</dispute>
""" % (id, id)
def __new_dispute_lost_sample_xml(self, id):
return """
<dispute>
<id>%s</id>
<amount>100.00</amount>
<amount-disputed>100.00</amount-disputed>
<amount-won>95.00</amount-won>
<case-number>CASE-12345</case-number>
<created-at type="datetime">2017-06-16T20:44:41Z</created-at>
<currency-iso-code>USD</currency-iso-code>
<forwarded-comments nil="true"/>
<kind>chargeback</kind>
<merchant-account-id>ytnlulaloidoqwvzxjrdqputg</merchant-account-id>
<reason>fraud</reason>
<reason-code nil="true"/>
<reason-description nil="true"/>
<received-date type="date">2016-02-15</received-date>
<reference-number>REF-9876</reference-number>
<reply-by-date type="date">2016-02-22</reply-by-date>
<status>lost</status>
<updated-at type="datetime">2017-06-21T20:44:41Z</updated-at>
<original-dispute-id>9qde5qgp</original-dispute-id>
<status-history type="array">
<status-history>
<status>open</status>
<timestamp type="datetime">2017-06-16T20:44:41Z</timestamp>
</status-history>
<status-history>
<status>lost</status>
<timestamp type="datetime">2017-06-25T20:50:55Z</timestamp>
</status-history>
</status-history>
<evidence type="array">
<evidence>
<id>rxtngk9j5j93tsrq</id>
<comments nil="true"/>
<created-at type="datetime">2017-06-21T20:44:42Z</created-at>
<sent-to-processor-at nil="true"/>
<url>s3.amazonaws.com/foo.jpg</url>
</evidence>
<evidence>
<id>88cfb8dd</id>
<comments>text evidence</comments>
<created-at type="datetime">2017-06-21T20:44:42Z</created-at>
<sent-to-processor-at nil="true"/>
<url nil="true"/>
</evidence>
</evidence>
<transaction>
<id>%s</id>
<amount>100.00</amount>
<created-at>2017-06-21T20:44:41Z</created-at>
<order-id nil="true"/>
<purchase-order-number nil="true"/>
<payment-instrument-subtype>Visa</payment-instrument-subtype>
</transaction>
<date-opened type=\"date\">2014-03-28</date-opened>
</dispute>
""" % (id, id)
def __new_dispute_won_sample_xml(self, id):
return """
<dispute>
<id>%s</id>
<amount>100.00</amount>
<amount-disputed>100.00</amount-disputed>
<amount-won>95.00</amount-won>
<case-number>CASE-12345</case-number>
<created-at type="datetime">2017-06-16T20:44:41Z</created-at>
<currency-iso-code>USD</currency-iso-code>
<forwarded-comments nil="true"/>
<kind>chargeback</kind>
<merchant-account-id>ytnlulaloidoqwvzxjrdqputg</merchant-account-id>
<reason>fraud</reason>
<reason-code nil="true"/>
<reason-description nil="true"/>
<received-date type="date">2016-02-15</received-date>
<reference-number>REF-9876</reference-number>
<reply-by-date type="date">2016-02-22</reply-by-date>
<status>won</status>
<updated-at type="datetime">2017-06-21T20:44:41Z</updated-at>
<original-dispute-id>9qde5qgp</original-dispute-id>
<status-history type="array">
<status-history>
<status>open</status>
<timestamp type="datetime">2017-06-16T20:44:41Z</timestamp>
</status-history>
<status-history>
<status>won</status>
<timestamp type="datetime">2017-06-25T20:50:55Z</timestamp>
</status-history>
</status-history>
<evidence type="array">
<evidence>
<id>rxtngk9j5j93tsrq</id>
<comments nil="true"/>
<created-at type="datetime">2017-06-21T20:44:42Z</created-at>
<sent-to-processor-at nil="true"/>
<url>s3.amazonaws.com/foo.jpg</url>
</evidence>
<evidence>
<id>88cfb8dd</id>
<comments>text evidence</comments>
<created-at type="datetime">2017-06-21T20:44:42Z</created-at>
<sent-to-processor-at nil="true"/>
<url nil="true"/>
</evidence>
</evidence>
<transaction>
<id>%s</id>
<amount>100.00</amount>
<created-at>2017-06-21T20:44:41Z</created-at>
<order-id nil="true"/>
<purchase-order-number nil="true"/>
<payment-instrument-subtype>Visa</payment-instrument-subtype>
</transaction>
<date-opened type=\"date\">2014-03-28</date-opened>
<date-won type=\"date\">2014-09-01</date-won>
</dispute>
""" % (id, id)
def __subscription_sample_xml(self, id):
return """
<subscription>
<id>%s</id>
<transactions type="array"></transactions>
<add_ons type="array"></add_ons>
<discounts type="array"></discounts>
</subscription>
""" % id
def __subscription_charged_successfully_sample_xml(self, id):
return """
<subscription>
<id>%s</id>
<transactions type="array">
<transaction>
<id>%s</id>
<status>submitted_for_settlement</status>
<amount>49.99</amount>
<tax_amount></tax_amount>
</transaction>
</transactions>
<add_ons type="array"></add_ons>
<discounts type="array"></discounts>
</subscription>
""" % (id, id)
def __subscription_charged_unsuccessfully_sample_xml(self, id):
return """
<subscription>
<id>%s</id>
<transactions type="array">
<transaction>
<id>%s</id>
<status>failed</status>
<amount>49.99</amount>
<tax_amount></tax_amount>
</transaction>
</transactions>
<add_ons type="array"></add_ons>
<discounts type="array"></discounts>
</subscription>
""" % (id, id)
def __merchant_account_approved_sample_xml(self, id):
return """
<merchant-account>
<id>%s</id>
<status>active</status>
<master-merchant-account>
<id>master_ma_for_%s</id>
<status>active</status>
</master-merchant-account>
</merchant-account>
""" % (id, id)
def __merchant_account_declined_sample_xml(self, id):
return """
<api-error-response>
<message>Credit score is too low</message>
<errors>
<errors type="array"/>
<merchant-account>
<errors type="array">
<error>
<code>82621</code>
<message>Credit score is too low</message>
<attribute type="symbol">base</attribute>
</error>
</errors>
</merchant-account>
</errors>
<merchant-account>
<id>%s</id>
<status>suspended</status>
<master-merchant-account>
<id>master_ma_for_%s</id>
<status>suspended</status>
</master-merchant-account>
</merchant-account>
</api-error-response>
""" % (id, id)
def __partner_merchant_connected_sample_xml(self):
return """
<partner-merchant>
<partner-merchant-id>abc123</partner-merchant-id>
<public-key>public_key</public-key>
<private-key>private_key</private-key>
<merchant-public-id>public_id</merchant-public-id>
<client-side-encryption-key>cse_key</client-side-encryption-key>
</partner-merchant>
"""
def __partner_merchant_disconnected_sample_xml(self):
return """
<partner-merchant>
<partner-merchant-id>abc123</partner-merchant-id>
</partner-merchant>
"""
def __connected_merchant_status_transitioned_xml(self, id):
return """
<connected-merchant-status-transitioned>
<status>new_status</status>
<merchant-public-id>%s</merchant-public-id>
<oauth-application-client-id>oauth_application_client_id</oauth-application-client-id>
</connected-merchant-status-transitioned>
""" % id
def __connected_merchant_paypal_status_changed_xml(self, id):
return """
<connected-merchant-paypal-status-changed>
<action>link</action>
<merchant-public-id>%s</merchant-public-id>
<oauth-application-client-id>oauth_application_client_id</oauth-application-client-id>
</connected-merchant-paypal-status-changed>
""" % id
def __partner_merchant_declined_sample_xml(self):
return """
<partner-merchant>
<partner-merchant-id>abc123</partner-merchant-id>
</partner-merchant>
"""
def __oauth_access_revocation_sample_xml(self, id):
return """
<oauth-application-revocation>
<merchant-id>%s</merchant-id>
<oauth-application-client-id>oauth_application_client_id</oauth-application-client-id>
</oauth-application-revocation>
""" % id
def __account_updater_daily_report_sample_xml(self):
return """
<account-updater-daily-report>
<report-date type="date">2016-01-14</report-date>
<report-url>link-to-csv-report</report-url>
</account-updater-daily-report>
"""
# NEXT_MAJOR_VERSION Remove this class as legacy Ideal has been removed/disabled in the Braintree Gateway
# DEPRECATED If you're looking to accept iDEAL as a payment method contact accounts@braintreepayments.com for a solution.
def __ideal_payment_complete_sample_xml(self, id):
return """
<ideal-payment>
<id>%s</id>
<status>COMPLETE</status>
<issuer>ABCISSUER</issuer>
<order-id>ORDERABC</order-id>
<currency>EUR</currency>
<amount>10.00</amount>
<created-at>2016-11-29T23:27:34.547Z</created-at>
<approval-url>https://example.com</approval-url>
<ideal-transaction-id>1234567890</ideal-transaction-id>
</ideal-payment>
""" % id
# NEXT_MAJOR_VERSION Remove this class as legacy Ideal has been removed/disabled in the Braintree Gateway
# DEPRECATED If you're looking to accept iDEAL as a payment method contact accounts@braintreepayments.com for a solution.
def __ideal_payment_failed_sample_xml(self, id):
return """
<ideal-payment>
<id>%s</id>
<status>FAILED</status>
<issuer>ABCISSUER</issuer>
<order-id>ORDERABC</order-id>
<currency>EUR</currency>
<amount>10.00</amount>
<created-at>2016-11-29T23:27:34.547Z</created-at>
<approval-url>https://example.com</approval-url>
<ideal-transaction-id>1234567890</ideal-transaction-id>
</ideal-payment>
""" % id
def __granted_payment_instrument_update(self):
return """
<granted-payment-instrument-update>
<grant-owner-merchant-id>vczo7jqrpwrsi2px</grant-owner-merchant-id>
<grant-recipient-merchant-id>cf0i8wgarszuy6hc</grant-recipient-merchant-id>
<payment-method-nonce>
<nonce>ee257d98-de40-47e8-96b3-a6954ea7a9a4</nonce>
<consumed type="boolean">false</consumed>
<locked type="boolean">false</locked>
</payment-method-nonce>
<token>abc123z</token>
<updated-fields type="array">
<item>expiration-month</item>
<item>expiration-year</item>
</updated-fields>
</granted-payment-instrument-update>
"""
def __payment_method_revoked_by_customer(self, id):
return """
<paypal-account>
<billing-agreement-id>a-billing-agreement-id</billing-agreement-id>
<created-at type="datetime">2019-01-01T12:00:00Z</created-at>
<customer-id>a-customer-id</customer-id>
<default type="boolean">true</default>
<email>name@email.com</email>
<global-id>cGF5bWVudG1ldGhvZF9jaDZieXNz</global-id>
<image-url>https://assets.braintreegateway.com/payment_method_logo/paypal.png?environment=test</image-url>
<subscriptions type="array"/>
<token>%s</token>
<updated-at type="datetime">2019-01-02T12:00:00Z</updated-at>
<is-channel-initiated nil="true"/>
<payer-id>a-payer-id</payer-id>
<payer-info nil="true"/>
<limited-use-order-id nil="true"/>
<revoked-at type="datetime">2019-01-02T12:00:00Z</revoked-at>
</paypal-account>
""" % id
def __local_payment_completed(self):
return """
<local-payment>
<payment-id>a-payment-id</payment-id>
<payer-id>a-payer-id</payer-id>
<payment-method-nonce>ee257d98-de40-47e8-96b3-a6954ea7a9a4</payment-method-nonce>
<transaction>
<id>1</id>
<status>authorizing</status>
<amount>10.00</amount>
<order-id>order1234</order-id>
</transaction>
</local-payment>
"""
| 43.474074 | 129 | 0.566434 | from braintree.util.crypto import Crypto
from braintree.webhook_notification import WebhookNotification
import sys
if sys.version_info[0] == 2:
from base64 import encodestring as encodebytes
else:
from base64 import encodebytes
from datetime import datetime
class WebhookTestingGateway(object):
def __init__(self, gateway):
self.gateway = gateway
self.config = gateway.config
def sample_notification(self, kind, id, source_merchant_id=None):
payload = encodebytes(self.__sample_xml(kind, id, source_merchant_id))
hmac_payload = Crypto.sha1_hmac_hash(self.gateway.config.private_key, payload)
signature = "%s|%s" % (self.gateway.config.public_key, hmac_payload)
return {'bt_signature': signature, 'bt_payload': payload}
def __sample_xml(self, kind, id, source_merchant_id):
timestamp = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
source_merchant_id_xml = ''
if source_merchant_id is not None:
source_merchant_id_xml = '<source-merchant-id>%s</source-merchant-id>' % source_merchant_id
sample_xml = """
<notification>
<timestamp type="datetime">%s</timestamp>
<kind>%s</kind>
%s
<subject>%s</subject>
</notification>
""" % (timestamp, kind, source_merchant_id_xml, self.__subject_sample_xml(kind, id))
return sample_xml.encode('utf-8')
def __subject_sample_xml(self, kind, id):
if kind == WebhookNotification.Kind.Check:
return self.__check_sample_xml()
if kind == WebhookNotification.Kind.ConnectedMerchantStatusTransitioned:
return self.__connected_merchant_status_transitioned_xml(id)
if kind == WebhookNotification.Kind.ConnectedMerchantPayPalStatusChanged:
return self.__connected_merchant_paypal_status_changed_xml(id)
if kind == WebhookNotification.Kind.SubMerchantAccountApproved:
return self.__merchant_account_approved_sample_xml(id)
elif kind == WebhookNotification.Kind.SubMerchantAccountDeclined:
return self.__merchant_account_declined_sample_xml(id)
elif kind == WebhookNotification.Kind.TransactionDisbursed:
return self.__transaction_disbursed_sample_xml(id)
elif kind == WebhookNotification.Kind.TransactionSettled:
return self.__transaction_settled_sample_xml(id)
elif kind == WebhookNotification.Kind.TransactionSettlementDeclined:
return self.__transaction_settlement_declined_sample_xml(id)
elif kind == WebhookNotification.Kind.PartnerMerchantConnected:
return self.__partner_merchant_connected_sample_xml()
elif kind == WebhookNotification.Kind.PartnerMerchantDisconnected:
return self.__partner_merchant_disconnected_sample_xml()
elif kind == WebhookNotification.Kind.PartnerMerchantDeclined:
return self.__partner_merchant_declined_sample_xml()
elif kind == WebhookNotification.Kind.OAuthAccessRevoked:
return self.__oauth_access_revocation_sample_xml(id)
elif kind == WebhookNotification.Kind.DisbursementException:
return self.__disbursement_exception_sample_xml(id)
elif kind == WebhookNotification.Kind.Disbursement:
return self.__disbursement_sample_xml(id)
elif kind == WebhookNotification.Kind.DisputeOpened:
return self.__dispute_opened_sample_xml(id)
elif kind == WebhookNotification.Kind.DisputeLost:
return self.__dispute_lost_sample_xml(id)
elif kind == WebhookNotification.Kind.DisputeWon:
return self.__dispute_won_sample_xml(id)
elif kind == WebhookNotification.Kind.SubscriptionChargedSuccessfully:
return self.__subscription_charged_successfully_sample_xml(id)
elif kind == WebhookNotification.Kind.SubscriptionChargedUnsuccessfully:
return self.__subscription_charged_unsuccessfully_sample_xml(id)
elif kind == WebhookNotification.Kind.AccountUpdaterDailyReport:
return self.__account_updater_daily_report_sample_xml()
elif kind == WebhookNotification.Kind.IdealPaymentComplete:
return self.__ideal_payment_complete_sample_xml(id)
# NEXT_MAJOR_VERSION Remove this class as legacy Ideal has been removed/disabled in the Braintree Gateway
# DEPRECATED If you're looking to accept iDEAL as a payment method contact accounts@braintreepayments.com for a solution.
elif kind == WebhookNotification.Kind.IdealPaymentFailed:
return self.__ideal_payment_failed_sample_xml(id)
elif kind == WebhookNotification.Kind.GrantedPaymentInstrumentUpdate:
return self.__granted_payment_instrument_update()
elif kind == WebhookNotification.Kind.GrantorUpdatedGrantedPaymentMethod:
return self.__granted_payment_instrument_update()
elif kind == WebhookNotification.Kind.RecipientUpdatedGrantedPaymentMethod:
return self.__granted_payment_instrument_update()
elif kind == WebhookNotification.Kind.PaymentMethodRevokedByCustomer:
return self.__payment_method_revoked_by_customer(id)
elif kind == WebhookNotification.Kind.LocalPaymentCompleted:
return self.__local_payment_completed()
else:
return self.__subscription_sample_xml(id)
def __check_sample_xml(self):
return """
<check type="boolean">
true
</check>
"""
def __transaction_disbursed_sample_xml(self, id):
return """
<transaction>
<id>%s</id>
<amount>100</amount>
<tax-amount>10</tax-amount>
<disbursement-details>
<settlement-amount>100</settlement-amount>
<settlement-currency-exchange-rate>10</settlement-currency-exchange-rate>
<disbursement-date type="datetime">2013-07-09T18:23:29Z</disbursement-date>
</disbursement-details>
</transaction>
""" % id
def __transaction_settled_sample_xml(self, id):
return """
<transaction>
<id>%s</id>
<status>settled</status>
<type>sale</type>
<currency-iso-code>USD</currency-iso-code>
<amount>100.00</amount>
<merchant-account-id>ogaotkivejpfayqfeaimuktty</merchant-account-id>
<payment-instrument-type>us_bank_account</payment-instrument-type>
<us-bank-account>
<routing-number>123456789</routing-number>
<last-4>1234</last-4>
<account-type>checking</account-type>
<account-holder-name>Dan Schulman</account-holder-name>
</us-bank-account>
<tax-amount>0</tax-amount>
</transaction>
""" % id
def __transaction_settlement_declined_sample_xml(self, id):
return """
<transaction>
<id>%s</id>
<status>settlement_declined</status>
<type>sale</type>
<currency-iso-code>USD</currency-iso-code>
<amount>100.00</amount>
<merchant-account-id>ogaotkivejpfayqfeaimuktty</merchant-account-id>
<payment-instrument-type>us_bank_account</payment-instrument-type>
<us-bank-account>
<routing-number>123456789</routing-number>
<last-4>1234</last-4>
<account-type>checking</account-type>
<account-holder-name>Dan Schulman</account-holder-name>
</us-bank-account>
<tax-amount>0</tax-amount>
</transaction>
""" % id
def __disbursement_exception_sample_xml(self, id):
return """
<disbursement>
<id>%s</id>
<transaction-ids type="array">
<item>afv56j</item>
<item>kj8hjk</item>
</transaction-ids>
<success type="boolean">false</success>
<retry type="boolean">false</retry>
<merchant-account>
<id>merchant_account_token</id>
<currency-iso-code>USD</currency-iso-code>
<sub-merchant-account type="boolean">false</sub-merchant-account>
<status>active</status>
</merchant-account>
<amount>100.00</amount>
<disbursement-date type="date">2014-02-09</disbursement-date>
<exception-message>bank_rejected</exception-message>
<follow-up-action>update_funding_information</follow-up-action>
</disbursement>
""" % id
def __disbursement_sample_xml(self, id):
return """
<disbursement>
<id>%s</id>
<transaction-ids type="array">
<item>afv56j</item>
<item>kj8hjk</item>
</transaction-ids>
<success type="boolean">true</success>
<retry type="boolean">false</retry>
<merchant-account>
<id>merchant_account_token</id>
<currency-iso-code>USD</currency-iso-code>
<sub-merchant-account type="boolean">false</sub-merchant-account>
<status>active</status>
</merchant-account>
<amount>100.00</amount>
<disbursement-date type="date">2014-02-09</disbursement-date>
<exception-message nil="true"/>
<follow-up-action nil="true"/>
</disbursement>
""" % id
def __dispute_opened_sample_xml(self, id):
if id == "legacy_dispute_id":
return self.__old_dispute_opened_sample_xml(id)
else:
return self.__new_dispute_opened_sample_xml(id)
def __dispute_lost_sample_xml(self, id):
if id == "legacy_dispute_id":
return self.__old_dispute_lost_sample_xml(id)
else:
return self.__new_dispute_lost_sample_xml(id)
def __dispute_won_sample_xml(self, id):
if id == "legacy_dispute_id":
return self.__old_dispute_won_sample_xml(id)
else:
return self.__new_dispute_won_sample_xml(id)
def __old_dispute_opened_sample_xml(self, id):
return """
<dispute>
<amount>250.00</amount>
<currency-iso-code>USD</currency-iso-code>
<received-date type="date">2014-03-01</received-date>
<reply-by-date type="date">2014-03-21</reply-by-date>
<kind>chargeback</kind>
<status>open</status>
<reason>fraud</reason>
<id>%s</id>
<transaction>
<id>%s</id>
<amount>250.00</amount>
</transaction>
<date-opened type="date">2014-03-28</date-opened>
</dispute>
""" % (id, id)
def __old_dispute_lost_sample_xml(self, id):
return """
<dispute>
<amount>250.00</amount>
<currency-iso-code>USD</currency-iso-code>
<received-date type="date">2014-03-01</received-date>
<reply-by-date type="date">2014-03-21</reply-by-date>
<kind>chargeback</kind>
<status>lost</status>
<reason>fraud</reason>
<id>%s</id>
<transaction>
<id>%s</id>
<amount>250.00</amount>
</transaction>
<date-opened type="date">2014-03-28</date-opened>
</dispute>
""" % (id, id)
def __old_dispute_won_sample_xml(self, id):
return """
<dispute>
<amount>250.00</amount>
<currency-iso-code>USD</currency-iso-code>
<received-date type="date">2014-03-01</received-date>
<reply-by-date type="date">2014-03-21</reply-by-date>
<kind>chargeback</kind>
<status>won</status>
<reason>fraud</reason>
<id>%s</id>
<transaction>
<id>%s</id>
<amount>250.00</amount>
</transaction>
<date-opened type="date">2014-03-28</date-opened>
<date-won type="date">2014-09-01</date-won>
</dispute>
""" % (id, id)
def __new_dispute_opened_sample_xml(self, id):
return """
<dispute>
<id>%s</id>
<amount>100.00</amount>
<amount-disputed>100.00</amount-disputed>
<amount-won>95.00</amount-won>
<case-number>CASE-12345</case-number>
<created-at type="datetime">2017-06-16T20:44:41Z</created-at>
<currency-iso-code>USD</currency-iso-code>
<forwarded-comments nil="true"/>
<kind>chargeback</kind>
<merchant-account-id>ytnlulaloidoqwvzxjrdqputg</merchant-account-id>
<reason>fraud</reason>
<reason-code nil="true"/>
<reason-description nil="true"/>
<received-date type="date">2016-02-15</received-date>
<reference-number>REF-9876</reference-number>
<reply-by-date type="date">2016-02-22</reply-by-date>
<status>open</status>
<updated-at type="datetime">2017-06-16T20:44:41Z</updated-at>
<original-dispute-id>9qde5qgp</original-dispute-id>
<status-history type="array">
<status-history>
<status>open</status>
<timestamp type="datetime">2017-06-16T20:44:41Z</timestamp>
</status-history>
</status-history>
<evidence type="array"/>
<transaction>
<id>%s</id>
<amount>100.00</amount>
<created-at>2017-06-21T20:44:41Z</created-at>
<order-id nil="true"/>
<purchase-order-number nil="true"/>
<payment-instrument-subtype>Visa</payment-instrument-subtype>
</transaction>
<date-opened type=\"date\">2014-03-28</date-opened>
</dispute>
""" % (id, id)
def __new_dispute_lost_sample_xml(self, id):
return """
<dispute>
<id>%s</id>
<amount>100.00</amount>
<amount-disputed>100.00</amount-disputed>
<amount-won>95.00</amount-won>
<case-number>CASE-12345</case-number>
<created-at type="datetime">2017-06-16T20:44:41Z</created-at>
<currency-iso-code>USD</currency-iso-code>
<forwarded-comments nil="true"/>
<kind>chargeback</kind>
<merchant-account-id>ytnlulaloidoqwvzxjrdqputg</merchant-account-id>
<reason>fraud</reason>
<reason-code nil="true"/>
<reason-description nil="true"/>
<received-date type="date">2016-02-15</received-date>
<reference-number>REF-9876</reference-number>
<reply-by-date type="date">2016-02-22</reply-by-date>
<status>lost</status>
<updated-at type="datetime">2017-06-21T20:44:41Z</updated-at>
<original-dispute-id>9qde5qgp</original-dispute-id>
<status-history type="array">
<status-history>
<status>open</status>
<timestamp type="datetime">2017-06-16T20:44:41Z</timestamp>
</status-history>
<status-history>
<status>lost</status>
<timestamp type="datetime">2017-06-25T20:50:55Z</timestamp>
</status-history>
</status-history>
<evidence type="array">
<evidence>
<id>rxtngk9j5j93tsrq</id>
<comments nil="true"/>
<created-at type="datetime">2017-06-21T20:44:42Z</created-at>
<sent-to-processor-at nil="true"/>
<url>s3.amazonaws.com/foo.jpg</url>
</evidence>
<evidence>
<id>88cfb8dd</id>
<comments>text evidence</comments>
<created-at type="datetime">2017-06-21T20:44:42Z</created-at>
<sent-to-processor-at nil="true"/>
<url nil="true"/>
</evidence>
</evidence>
<transaction>
<id>%s</id>
<amount>100.00</amount>
<created-at>2017-06-21T20:44:41Z</created-at>
<order-id nil="true"/>
<purchase-order-number nil="true"/>
<payment-instrument-subtype>Visa</payment-instrument-subtype>
</transaction>
<date-opened type=\"date\">2014-03-28</date-opened>
</dispute>
""" % (id, id)
def __new_dispute_won_sample_xml(self, id):
return """
<dispute>
<id>%s</id>
<amount>100.00</amount>
<amount-disputed>100.00</amount-disputed>
<amount-won>95.00</amount-won>
<case-number>CASE-12345</case-number>
<created-at type="datetime">2017-06-16T20:44:41Z</created-at>
<currency-iso-code>USD</currency-iso-code>
<forwarded-comments nil="true"/>
<kind>chargeback</kind>
<merchant-account-id>ytnlulaloidoqwvzxjrdqputg</merchant-account-id>
<reason>fraud</reason>
<reason-code nil="true"/>
<reason-description nil="true"/>
<received-date type="date">2016-02-15</received-date>
<reference-number>REF-9876</reference-number>
<reply-by-date type="date">2016-02-22</reply-by-date>
<status>won</status>
<updated-at type="datetime">2017-06-21T20:44:41Z</updated-at>
<original-dispute-id>9qde5qgp</original-dispute-id>
<status-history type="array">
<status-history>
<status>open</status>
<timestamp type="datetime">2017-06-16T20:44:41Z</timestamp>
</status-history>
<status-history>
<status>won</status>
<timestamp type="datetime">2017-06-25T20:50:55Z</timestamp>
</status-history>
</status-history>
<evidence type="array">
<evidence>
<id>rxtngk9j5j93tsrq</id>
<comments nil="true"/>
<created-at type="datetime">2017-06-21T20:44:42Z</created-at>
<sent-to-processor-at nil="true"/>
<url>s3.amazonaws.com/foo.jpg</url>
</evidence>
<evidence>
<id>88cfb8dd</id>
<comments>text evidence</comments>
<created-at type="datetime">2017-06-21T20:44:42Z</created-at>
<sent-to-processor-at nil="true"/>
<url nil="true"/>
</evidence>
</evidence>
<transaction>
<id>%s</id>
<amount>100.00</amount>
<created-at>2017-06-21T20:44:41Z</created-at>
<order-id nil="true"/>
<purchase-order-number nil="true"/>
<payment-instrument-subtype>Visa</payment-instrument-subtype>
</transaction>
<date-opened type=\"date\">2014-03-28</date-opened>
<date-won type=\"date\">2014-09-01</date-won>
</dispute>
""" % (id, id)
def __subscription_sample_xml(self, id):
return """
<subscription>
<id>%s</id>
<transactions type="array"></transactions>
<add_ons type="array"></add_ons>
<discounts type="array"></discounts>
</subscription>
""" % id
def __subscription_charged_successfully_sample_xml(self, id):
return """
<subscription>
<id>%s</id>
<transactions type="array">
<transaction>
<id>%s</id>
<status>submitted_for_settlement</status>
<amount>49.99</amount>
<tax_amount></tax_amount>
</transaction>
</transactions>
<add_ons type="array"></add_ons>
<discounts type="array"></discounts>
</subscription>
""" % (id, id)
def __subscription_charged_unsuccessfully_sample_xml(self, id):
return """
<subscription>
<id>%s</id>
<transactions type="array">
<transaction>
<id>%s</id>
<status>failed</status>
<amount>49.99</amount>
<tax_amount></tax_amount>
</transaction>
</transactions>
<add_ons type="array"></add_ons>
<discounts type="array"></discounts>
</subscription>
""" % (id, id)
def __merchant_account_approved_sample_xml(self, id):
return """
<merchant-account>
<id>%s</id>
<status>active</status>
<master-merchant-account>
<id>master_ma_for_%s</id>
<status>active</status>
</master-merchant-account>
</merchant-account>
""" % (id, id)
def __merchant_account_declined_sample_xml(self, id):
return """
<api-error-response>
<message>Credit score is too low</message>
<errors>
<errors type="array"/>
<merchant-account>
<errors type="array">
<error>
<code>82621</code>
<message>Credit score is too low</message>
<attribute type="symbol">base</attribute>
</error>
</errors>
</merchant-account>
</errors>
<merchant-account>
<id>%s</id>
<status>suspended</status>
<master-merchant-account>
<id>master_ma_for_%s</id>
<status>suspended</status>
</master-merchant-account>
</merchant-account>
</api-error-response>
""" % (id, id)
def __partner_merchant_connected_sample_xml(self):
return """
<partner-merchant>
<partner-merchant-id>abc123</partner-merchant-id>
<public-key>public_key</public-key>
<private-key>private_key</private-key>
<merchant-public-id>public_id</merchant-public-id>
<client-side-encryption-key>cse_key</client-side-encryption-key>
</partner-merchant>
"""
def __partner_merchant_disconnected_sample_xml(self):
return """
<partner-merchant>
<partner-merchant-id>abc123</partner-merchant-id>
</partner-merchant>
"""
def __connected_merchant_status_transitioned_xml(self, id):
return """
<connected-merchant-status-transitioned>
<status>new_status</status>
<merchant-public-id>%s</merchant-public-id>
<oauth-application-client-id>oauth_application_client_id</oauth-application-client-id>
</connected-merchant-status-transitioned>
""" % id
def __connected_merchant_paypal_status_changed_xml(self, id):
return """
<connected-merchant-paypal-status-changed>
<action>link</action>
<merchant-public-id>%s</merchant-public-id>
<oauth-application-client-id>oauth_application_client_id</oauth-application-client-id>
</connected-merchant-paypal-status-changed>
""" % id
def __partner_merchant_declined_sample_xml(self):
return """
<partner-merchant>
<partner-merchant-id>abc123</partner-merchant-id>
</partner-merchant>
"""
def __oauth_access_revocation_sample_xml(self, id):
return """
<oauth-application-revocation>
<merchant-id>%s</merchant-id>
<oauth-application-client-id>oauth_application_client_id</oauth-application-client-id>
</oauth-application-revocation>
""" % id
def __account_updater_daily_report_sample_xml(self):
return """
<account-updater-daily-report>
<report-date type="date">2016-01-14</report-date>
<report-url>link-to-csv-report</report-url>
</account-updater-daily-report>
"""
def __ideal_payment_complete_sample_xml(self, id):
return """
<ideal-payment>
<id>%s</id>
<status>COMPLETE</status>
<issuer>ABCISSUER</issuer>
<order-id>ORDERABC</order-id>
<currency>EUR</currency>
<amount>10.00</amount>
<created-at>2016-11-29T23:27:34.547Z</created-at>
<approval-url>https://example.com</approval-url>
<ideal-transaction-id>1234567890</ideal-transaction-id>
</ideal-payment>
""" % id
# NEXT_MAJOR_VERSION Remove this class as legacy Ideal has been removed/disabled in the Braintree Gateway
# DEPRECATED If you're looking to accept iDEAL as a payment method contact accounts@braintreepayments.com for a solution.
def __ideal_payment_failed_sample_xml(self, id):
return """
<ideal-payment>
<id>%s</id>
<status>FAILED</status>
<issuer>ABCISSUER</issuer>
<order-id>ORDERABC</order-id>
<currency>EUR</currency>
<amount>10.00</amount>
<created-at>2016-11-29T23:27:34.547Z</created-at>
<approval-url>https://example.com</approval-url>
<ideal-transaction-id>1234567890</ideal-transaction-id>
</ideal-payment>
""" % id
def __granted_payment_instrument_update(self):
return """
<granted-payment-instrument-update>
<grant-owner-merchant-id>vczo7jqrpwrsi2px</grant-owner-merchant-id>
<grant-recipient-merchant-id>cf0i8wgarszuy6hc</grant-recipient-merchant-id>
<payment-method-nonce>
<nonce>ee257d98-de40-47e8-96b3-a6954ea7a9a4</nonce>
<consumed type="boolean">false</consumed>
<locked type="boolean">false</locked>
</payment-method-nonce>
<token>abc123z</token>
<updated-fields type="array">
<item>expiration-month</item>
<item>expiration-year</item>
</updated-fields>
</granted-payment-instrument-update>
"""
def __payment_method_revoked_by_customer(self, id):
return """
<paypal-account>
<billing-agreement-id>a-billing-agreement-id</billing-agreement-id>
<created-at type="datetime">2019-01-01T12:00:00Z</created-at>
<customer-id>a-customer-id</customer-id>
<default type="boolean">true</default>
<email>name@email.com</email>
<global-id>cGF5bWVudG1ldGhvZF9jaDZieXNz</global-id>
<image-url>https://assets.braintreegateway.com/payment_method_logo/paypal.png?environment=test</image-url>
<subscriptions type="array"/>
<token>%s</token>
<updated-at type="datetime">2019-01-02T12:00:00Z</updated-at>
<is-channel-initiated nil="true"/>
<payer-id>a-payer-id</payer-id>
<payer-info nil="true"/>
<limited-use-order-id nil="true"/>
<revoked-at type="datetime">2019-01-02T12:00:00Z</revoked-at>
</paypal-account>
""" % id
def __local_payment_completed(self):
return """
<local-payment>
<payment-id>a-payment-id</payment-id>
<payer-id>a-payer-id</payer-id>
<payment-method-nonce>ee257d98-de40-47e8-96b3-a6954ea7a9a4</payment-method-nonce>
<transaction>
<id>1</id>
<status>authorizing</status>
<amount>10.00</amount>
<order-id>order1234</order-id>
</transaction>
</local-payment>
"""
| true | true |
f715a55d3a4d0e4ed9e635af1fb7092bd4dc3fdc | 2,188 | py | Python | project-1-command-line/main.py | jadry92/Course-data-ing-with-python | 57d4eb1564a2379497546ff28e02377fb07ba0b9 | [
"MIT"
] | null | null | null | project-1-command-line/main.py | jadry92/Course-data-ing-with-python | 57d4eb1564a2379497546ff28e02377fb07ba0b9 | [
"MIT"
] | null | null | null | project-1-command-line/main.py | jadry92/Course-data-ing-with-python | 57d4eb1564a2379497546ff28e02377fb07ba0b9 | [
"MIT"
] | null | null | null | import argparse
import logging
import datetime
import csv
from requests.exceptions import HTTPError
from urllib3.exceptions import MaxRetryError
# local imports
from common import config
import news_page_objects as news
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def _news_scraper(news_sites_uid):
host = config()['news_sites'][news_sites_uid]['url']
logging.info('Beginning scraper for {}'.format(host))
homepage = news.HomePage(news_sites_uid, host)
articles = []
for link in homepage.articles_links:
print(link)
article = _fetch_article(news_sites_uid, host, link)
if article:
articles.append(article)
_save_articles(news_sites_uid, articles)
print(len(articles))
def _save_articles(news_sites_uid, articles):
now = datetime.datetime.now().strftime('%Y-%m-%d')
out_file_name = '{news_sites_uid}_{datatime}_articles.csv'.format(
news_sites_uid=news_sites_uid,
datatime=now
)
csv_headers = list(filter(lambda property: not property.startswith('_'), dir(articles[0])))
with open(out_file_name, mode='w+') as f:
writer = csv.writer(f)
writer.writerow(csv_headers)
for article in articles:
row = [str(getattr(article, prop)) for prop in csv_headers]
writer.writerow(row)
def _fetch_article(news_sites_uid, host, link):
logger.info('Start fetching article at {}'.format(link))
article = None
try:
article = news.ArticlePage(news_sites_uid, link)
except (HTTPError, MaxRetryError) as e:
logger.error('The article coudn\'t be fetched')
if article and not article.body:
logger.warning('There isn\'t a body in this page. ')
return None
return article
if __name__ == '__main__':
parser = argparse.ArgumentParser()
news_sites_choices = list(config()['news_sites'].keys())
parser.add_argument('news_sites',
help='The news site that you want to scrape',
type=str,
choices=news_sites_choices)
args = parser.parse_args()
_news_scraper(args.news_sites)
| 29.567568 | 95 | 0.673675 | import argparse
import logging
import datetime
import csv
from requests.exceptions import HTTPError
from urllib3.exceptions import MaxRetryError
from common import config
import news_page_objects as news
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def _news_scraper(news_sites_uid):
host = config()['news_sites'][news_sites_uid]['url']
logging.info('Beginning scraper for {}'.format(host))
homepage = news.HomePage(news_sites_uid, host)
articles = []
for link in homepage.articles_links:
print(link)
article = _fetch_article(news_sites_uid, host, link)
if article:
articles.append(article)
_save_articles(news_sites_uid, articles)
print(len(articles))
def _save_articles(news_sites_uid, articles):
now = datetime.datetime.now().strftime('%Y-%m-%d')
out_file_name = '{news_sites_uid}_{datatime}_articles.csv'.format(
news_sites_uid=news_sites_uid,
datatime=now
)
csv_headers = list(filter(lambda property: not property.startswith('_'), dir(articles[0])))
with open(out_file_name, mode='w+') as f:
writer = csv.writer(f)
writer.writerow(csv_headers)
for article in articles:
row = [str(getattr(article, prop)) for prop in csv_headers]
writer.writerow(row)
def _fetch_article(news_sites_uid, host, link):
logger.info('Start fetching article at {}'.format(link))
article = None
try:
article = news.ArticlePage(news_sites_uid, link)
except (HTTPError, MaxRetryError) as e:
logger.error('The article coudn\'t be fetched')
if article and not article.body:
logger.warning('There isn\'t a body in this page. ')
return None
return article
if __name__ == '__main__':
parser = argparse.ArgumentParser()
news_sites_choices = list(config()['news_sites'].keys())
parser.add_argument('news_sites',
help='The news site that you want to scrape',
type=str,
choices=news_sites_choices)
args = parser.parse_args()
_news_scraper(args.news_sites)
| true | true |
f715a596a287133251f9a3c65e63acf439e485b9 | 18,541 | py | Python | powerbot/models/order_entry.py | rogerarmstrong/python-samples | df73b5dab70090f820fc47096b0ae5490c7779b6 | [
"Apache-2.0"
] | null | null | null | powerbot/models/order_entry.py | rogerarmstrong/python-samples | df73b5dab70090f820fc47096b0ae5490c7779b6 | [
"Apache-2.0"
] | null | null | null | powerbot/models/order_entry.py | rogerarmstrong/python-samples | df73b5dab70090f820fc47096b0ae5490c7779b6 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Powerbot Server
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class OrderEntry(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'side': 'str',
'prod': 'str',
'quantity': 'float',
'price': 'float',
'display_qty': 'int',
'contract_id': 'int',
'contract_name': 'str',
'cl_ordr_id': 'str',
'clearing_acct_type': 'str',
'ordr_exe_restriction': 'str',
'pre_arranged': 'bool',
'pre_arranged_acct': 'str',
'type': 'str',
'validity_res': 'str',
'state': 'str',
'validity_date': 'datetime',
'txt': 'str',
'ppd': 'int',
'dlvry_start': 'datetime',
'dlvry_end': 'datetime'
}
attribute_map = {
'side': 'side',
'prod': 'prod',
'quantity': 'quantity',
'price': 'price',
'display_qty': 'displayQty',
'contract_id': 'contractId',
'contract_name': 'contractName',
'cl_ordr_id': 'clOrdrId',
'clearing_acct_type': 'clearingAcctType',
'ordr_exe_restriction': 'ordrExeRestriction',
'pre_arranged': 'preArranged',
'pre_arranged_acct': 'preArrangedAcct',
'type': 'type',
'validity_res': 'validityRes',
'state': 'state',
'validity_date': 'validityDate',
'txt': 'txt',
'ppd': 'ppd',
'dlvry_start': 'dlvryStart',
'dlvry_end': 'dlvryEnd'
}
def __init__(self, side=None, prod=None, quantity=None, price=None, display_qty=None, contract_id=None, contract_name=None, cl_ordr_id=None, clearing_acct_type=None, ordr_exe_restriction='NON', pre_arranged=False, pre_arranged_acct=None, type='O', validity_res='GFS', state=None, validity_date=None, txt=None, ppd=None, dlvry_start=None, dlvry_end=None): # noqa: E501
"""OrderEntry - a model defined in Swagger""" # noqa: E501
self._side = None
self._prod = None
self._quantity = None
self._price = None
self._display_qty = None
self._contract_id = None
self._contract_name = None
self._cl_ordr_id = None
self._clearing_acct_type = None
self._ordr_exe_restriction = None
self._pre_arranged = None
self._pre_arranged_acct = None
self._type = None
self._validity_res = None
self._state = None
self._validity_date = None
self._txt = None
self._ppd = None
self._dlvry_start = None
self._dlvry_end = None
self.discriminator = None
if side is not None:
self.side = side
self.prod = prod
self.quantity = quantity
self.price = price
if display_qty is not None:
self.display_qty = display_qty
if contract_id is not None:
self.contract_id = contract_id
if contract_name is not None:
self.contract_name = contract_name
if cl_ordr_id is not None:
self.cl_ordr_id = cl_ordr_id
self.clearing_acct_type = clearing_acct_type
if ordr_exe_restriction is not None:
self.ordr_exe_restriction = ordr_exe_restriction
if pre_arranged is not None:
self.pre_arranged = pre_arranged
if pre_arranged_acct is not None:
self.pre_arranged_acct = pre_arranged_acct
if type is not None:
self.type = type
if validity_res is not None:
self.validity_res = validity_res
if state is not None:
self.state = state
if validity_date is not None:
self.validity_date = validity_date
if txt is not None:
self.txt = txt
if ppd is not None:
self.ppd = ppd
if dlvry_start is not None:
self.dlvry_start = dlvry_start
if dlvry_end is not None:
self.dlvry_end = dlvry_end
@property
def side(self):
"""Gets the side of this OrderEntry. # noqa: E501
:return: The side of this OrderEntry. # noqa: E501
:rtype: str
"""
return self._side
@side.setter
def side(self, side):
"""Sets the side of this OrderEntry.
:param side: The side of this OrderEntry. # noqa: E501
:type: str
"""
allowed_values = ["SELL", "BUY"] # noqa: E501
if side not in allowed_values:
raise ValueError(
"Invalid value for `side` ({0}), must be one of {1}" # noqa: E501
.format(side, allowed_values)
)
self._side = side
@property
def prod(self):
"""Gets the prod of this OrderEntry. # noqa: E501
:return: The prod of this OrderEntry. # noqa: E501
:rtype: str
"""
return self._prod
@prod.setter
def prod(self, prod):
"""Sets the prod of this OrderEntry.
:param prod: The prod of this OrderEntry. # noqa: E501
:type: str
"""
if prod is None:
raise ValueError("Invalid value for `prod`, must not be `None`") # noqa: E501
self._prod = prod
@property
def quantity(self):
"""Gets the quantity of this OrderEntry. # noqa: E501
:return: The quantity of this OrderEntry. # noqa: E501
:rtype: float
"""
return self._quantity
@quantity.setter
def quantity(self, quantity):
"""Sets the quantity of this OrderEntry.
:param quantity: The quantity of this OrderEntry. # noqa: E501
:type: float
"""
if quantity is None:
raise ValueError("Invalid value for `quantity`, must not be `None`") # noqa: E501
self._quantity = quantity
@property
def price(self):
"""Gets the price of this OrderEntry. # noqa: E501
:return: The price of this OrderEntry. # noqa: E501
:rtype: float
"""
return self._price
@price.setter
def price(self, price):
"""Sets the price of this OrderEntry.
:param price: The price of this OrderEntry. # noqa: E501
:type: float
"""
if price is None:
raise ValueError("Invalid value for `price`, must not be `None`") # noqa: E501
self._price = price
@property
def display_qty(self):
"""Gets the display_qty of this OrderEntry. # noqa: E501
:return: The display_qty of this OrderEntry. # noqa: E501
:rtype: int
"""
return self._display_qty
@display_qty.setter
def display_qty(self, display_qty):
"""Sets the display_qty of this OrderEntry.
:param display_qty: The display_qty of this OrderEntry. # noqa: E501
:type: int
"""
self._display_qty = display_qty
@property
def contract_id(self):
"""Gets the contract_id of this OrderEntry. # noqa: E501
:return: The contract_id of this OrderEntry. # noqa: E501
:rtype: int
"""
return self._contract_id
@contract_id.setter
def contract_id(self, contract_id):
"""Sets the contract_id of this OrderEntry.
:param contract_id: The contract_id of this OrderEntry. # noqa: E501
:type: int
"""
self._contract_id = contract_id
@property
def contract_name(self):
"""Gets the contract_name of this OrderEntry. # noqa: E501
Set a contract name instead of the contractId, and the attempt is made to look up the contract via it's name. If contractId is ist, the contractName field is ignored. # noqa: E501
:return: The contract_name of this OrderEntry. # noqa: E501
:rtype: str
"""
return self._contract_name
@contract_name.setter
def contract_name(self, contract_name):
"""Sets the contract_name of this OrderEntry.
Set a contract name instead of the contractId, and the attempt is made to look up the contract via it's name. If contractId is ist, the contractName field is ignored. # noqa: E501
:param contract_name: The contract_name of this OrderEntry. # noqa: E501
:type: str
"""
self._contract_name = contract_name
@property
def cl_ordr_id(self):
"""Gets the cl_ordr_id of this OrderEntry. # noqa: E501
:return: The cl_ordr_id of this OrderEntry. # noqa: E501
:rtype: str
"""
return self._cl_ordr_id
@cl_ordr_id.setter
def cl_ordr_id(self, cl_ordr_id):
"""Sets the cl_ordr_id of this OrderEntry.
:param cl_ordr_id: The cl_ordr_id of this OrderEntry. # noqa: E501
:type: str
"""
if cl_ordr_id is not None and len(cl_ordr_id) > 40:
raise ValueError("Invalid value for `cl_ordr_id`, length must be less than or equal to `40`") # noqa: E501
self._cl_ordr_id = cl_ordr_id
@property
def clearing_acct_type(self):
"""Gets the clearing_acct_type of this OrderEntry. # noqa: E501
:return: The clearing_acct_type of this OrderEntry. # noqa: E501
:rtype: str
"""
return self._clearing_acct_type
@clearing_acct_type.setter
def clearing_acct_type(self, clearing_acct_type):
"""Sets the clearing_acct_type of this OrderEntry.
:param clearing_acct_type: The clearing_acct_type of this OrderEntry. # noqa: E501
:type: str
"""
if clearing_acct_type is None:
raise ValueError("Invalid value for `clearing_acct_type`, must not be `None`") # noqa: E501
self._clearing_acct_type = clearing_acct_type
@property
def ordr_exe_restriction(self):
"""Gets the ordr_exe_restriction of this OrderEntry. # noqa: E501
:return: The ordr_exe_restriction of this OrderEntry. # noqa: E501
:rtype: str
"""
return self._ordr_exe_restriction
@ordr_exe_restriction.setter
def ordr_exe_restriction(self, ordr_exe_restriction):
"""Sets the ordr_exe_restriction of this OrderEntry.
:param ordr_exe_restriction: The ordr_exe_restriction of this OrderEntry. # noqa: E501
:type: str
"""
allowed_values = ["FOK", "IOC", "NON", "AON", "AU"] # noqa: E501
if ordr_exe_restriction not in allowed_values:
raise ValueError(
"Invalid value for `ordr_exe_restriction` ({0}), must be one of {1}" # noqa: E501
.format(ordr_exe_restriction, allowed_values)
)
self._ordr_exe_restriction = ordr_exe_restriction
@property
def pre_arranged(self):
"""Gets the pre_arranged of this OrderEntry. # noqa: E501
:return: The pre_arranged of this OrderEntry. # noqa: E501
:rtype: bool
"""
return self._pre_arranged
@pre_arranged.setter
def pre_arranged(self, pre_arranged):
"""Sets the pre_arranged of this OrderEntry.
:param pre_arranged: The pre_arranged of this OrderEntry. # noqa: E501
:type: bool
"""
self._pre_arranged = pre_arranged
@property
def pre_arranged_acct(self):
"""Gets the pre_arranged_acct of this OrderEntry. # noqa: E501
:return: The pre_arranged_acct of this OrderEntry. # noqa: E501
:rtype: str
"""
return self._pre_arranged_acct
@pre_arranged_acct.setter
def pre_arranged_acct(self, pre_arranged_acct):
"""Sets the pre_arranged_acct of this OrderEntry.
:param pre_arranged_acct: The pre_arranged_acct of this OrderEntry. # noqa: E501
:type: str
"""
self._pre_arranged_acct = pre_arranged_acct
@property
def type(self):
"""Gets the type of this OrderEntry. # noqa: E501
:return: The type of this OrderEntry. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this OrderEntry.
:param type: The type of this OrderEntry. # noqa: E501
:type: str
"""
allowed_values = ["B", "O", "I", "L", "S", "H", "C", "N", "E"] # noqa: E501
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}" # noqa: E501
.format(type, allowed_values)
)
self._type = type
@property
def validity_res(self):
"""Gets the validity_res of this OrderEntry. # noqa: E501
:return: The validity_res of this OrderEntry. # noqa: E501
:rtype: str
"""
return self._validity_res
@validity_res.setter
def validity_res(self, validity_res):
"""Sets the validity_res of this OrderEntry.
:param validity_res: The validity_res of this OrderEntry. # noqa: E501
:type: str
"""
allowed_values = ["GFS", "GTD", "NON"] # noqa: E501
if validity_res not in allowed_values:
raise ValueError(
"Invalid value for `validity_res` ({0}), must be one of {1}" # noqa: E501
.format(validity_res, allowed_values)
)
self._validity_res = validity_res
@property
def state(self):
"""Gets the state of this OrderEntry. # noqa: E501
:return: The state of this OrderEntry. # noqa: E501
:rtype: str
"""
return self._state
@state.setter
def state(self, state):
"""Sets the state of this OrderEntry.
:param state: The state of this OrderEntry. # noqa: E501
:type: str
"""
allowed_values = ["ACTI", "HIBE"] # noqa: E501
if state not in allowed_values:
raise ValueError(
"Invalid value for `state` ({0}), must be one of {1}" # noqa: E501
.format(state, allowed_values)
)
self._state = state
@property
def validity_date(self):
"""Gets the validity_date of this OrderEntry. # noqa: E501
:return: The validity_date of this OrderEntry. # noqa: E501
:rtype: datetime
"""
return self._validity_date
@validity_date.setter
def validity_date(self, validity_date):
"""Sets the validity_date of this OrderEntry.
:param validity_date: The validity_date of this OrderEntry. # noqa: E501
:type: datetime
"""
self._validity_date = validity_date
@property
def txt(self):
"""Gets the txt of this OrderEntry. # noqa: E501
:return: The txt of this OrderEntry. # noqa: E501
:rtype: str
"""
return self._txt
@txt.setter
def txt(self, txt):
"""Sets the txt of this OrderEntry.
:param txt: The txt of this OrderEntry. # noqa: E501
:type: str
"""
if txt is not None and len(txt) > 250:
raise ValueError("Invalid value for `txt`, length must be less than or equal to `250`") # noqa: E501
self._txt = txt
@property
def ppd(self):
"""Gets the ppd of this OrderEntry. # noqa: E501
:return: The ppd of this OrderEntry. # noqa: E501
:rtype: int
"""
return self._ppd
@ppd.setter
def ppd(self, ppd):
"""Sets the ppd of this OrderEntry.
:param ppd: The ppd of this OrderEntry. # noqa: E501
:type: int
"""
self._ppd = ppd
@property
def dlvry_start(self):
"""Gets the dlvry_start of this OrderEntry. # noqa: E501
:return: The dlvry_start of this OrderEntry. # noqa: E501
:rtype: datetime
"""
return self._dlvry_start
@dlvry_start.setter
def dlvry_start(self, dlvry_start):
"""Sets the dlvry_start of this OrderEntry.
:param dlvry_start: The dlvry_start of this OrderEntry. # noqa: E501
:type: datetime
"""
self._dlvry_start = dlvry_start
@property
def dlvry_end(self):
"""Gets the dlvry_end of this OrderEntry. # noqa: E501
:return: The dlvry_end of this OrderEntry. # noqa: E501
:rtype: datetime
"""
return self._dlvry_end
@dlvry_end.setter
def dlvry_end(self, dlvry_end):
"""Sets the dlvry_end of this OrderEntry.
:param dlvry_end: The dlvry_end of this OrderEntry. # noqa: E501
:type: datetime
"""
self._dlvry_end = dlvry_end
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OrderEntry):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.656878 | 372 | 0.584596 |
import pprint
import re
import six
class OrderEntry(object):
swagger_types = {
'side': 'str',
'prod': 'str',
'quantity': 'float',
'price': 'float',
'display_qty': 'int',
'contract_id': 'int',
'contract_name': 'str',
'cl_ordr_id': 'str',
'clearing_acct_type': 'str',
'ordr_exe_restriction': 'str',
'pre_arranged': 'bool',
'pre_arranged_acct': 'str',
'type': 'str',
'validity_res': 'str',
'state': 'str',
'validity_date': 'datetime',
'txt': 'str',
'ppd': 'int',
'dlvry_start': 'datetime',
'dlvry_end': 'datetime'
}
attribute_map = {
'side': 'side',
'prod': 'prod',
'quantity': 'quantity',
'price': 'price',
'display_qty': 'displayQty',
'contract_id': 'contractId',
'contract_name': 'contractName',
'cl_ordr_id': 'clOrdrId',
'clearing_acct_type': 'clearingAcctType',
'ordr_exe_restriction': 'ordrExeRestriction',
'pre_arranged': 'preArranged',
'pre_arranged_acct': 'preArrangedAcct',
'type': 'type',
'validity_res': 'validityRes',
'state': 'state',
'validity_date': 'validityDate',
'txt': 'txt',
'ppd': 'ppd',
'dlvry_start': 'dlvryStart',
'dlvry_end': 'dlvryEnd'
}
def __init__(self, side=None, prod=None, quantity=None, price=None, display_qty=None, contract_id=None, contract_name=None, cl_ordr_id=None, clearing_acct_type=None, ordr_exe_restriction='NON', pre_arranged=False, pre_arranged_acct=None, type='O', validity_res='GFS', state=None, validity_date=None, txt=None, ppd=None, dlvry_start=None, dlvry_end=None):
self._side = None
self._prod = None
self._quantity = None
self._price = None
self._display_qty = None
self._contract_id = None
self._contract_name = None
self._cl_ordr_id = None
self._clearing_acct_type = None
self._ordr_exe_restriction = None
self._pre_arranged = None
self._pre_arranged_acct = None
self._type = None
self._validity_res = None
self._state = None
self._validity_date = None
self._txt = None
self._ppd = None
self._dlvry_start = None
self._dlvry_end = None
self.discriminator = None
if side is not None:
self.side = side
self.prod = prod
self.quantity = quantity
self.price = price
if display_qty is not None:
self.display_qty = display_qty
if contract_id is not None:
self.contract_id = contract_id
if contract_name is not None:
self.contract_name = contract_name
if cl_ordr_id is not None:
self.cl_ordr_id = cl_ordr_id
self.clearing_acct_type = clearing_acct_type
if ordr_exe_restriction is not None:
self.ordr_exe_restriction = ordr_exe_restriction
if pre_arranged is not None:
self.pre_arranged = pre_arranged
if pre_arranged_acct is not None:
self.pre_arranged_acct = pre_arranged_acct
if type is not None:
self.type = type
if validity_res is not None:
self.validity_res = validity_res
if state is not None:
self.state = state
if validity_date is not None:
self.validity_date = validity_date
if txt is not None:
self.txt = txt
if ppd is not None:
self.ppd = ppd
if dlvry_start is not None:
self.dlvry_start = dlvry_start
if dlvry_end is not None:
self.dlvry_end = dlvry_end
@property
def side(self):
return self._side
@side.setter
def side(self, side):
allowed_values = ["SELL", "BUY"]
if side not in allowed_values:
raise ValueError(
"Invalid value for `side` ({0}), must be one of {1}"
.format(side, allowed_values)
)
self._side = side
@property
def prod(self):
return self._prod
@prod.setter
def prod(self, prod):
if prod is None:
raise ValueError("Invalid value for `prod`, must not be `None`")
self._prod = prod
@property
def quantity(self):
return self._quantity
@quantity.setter
def quantity(self, quantity):
if quantity is None:
raise ValueError("Invalid value for `quantity`, must not be `None`")
self._quantity = quantity
@property
def price(self):
return self._price
@price.setter
def price(self, price):
if price is None:
raise ValueError("Invalid value for `price`, must not be `None`")
self._price = price
@property
def display_qty(self):
return self._display_qty
@display_qty.setter
def display_qty(self, display_qty):
self._display_qty = display_qty
@property
def contract_id(self):
return self._contract_id
@contract_id.setter
def contract_id(self, contract_id):
self._contract_id = contract_id
@property
def contract_name(self):
return self._contract_name
@contract_name.setter
def contract_name(self, contract_name):
self._contract_name = contract_name
@property
def cl_ordr_id(self):
return self._cl_ordr_id
@cl_ordr_id.setter
def cl_ordr_id(self, cl_ordr_id):
if cl_ordr_id is not None and len(cl_ordr_id) > 40:
raise ValueError("Invalid value for `cl_ordr_id`, length must be less than or equal to `40`")
self._cl_ordr_id = cl_ordr_id
@property
def clearing_acct_type(self):
return self._clearing_acct_type
@clearing_acct_type.setter
def clearing_acct_type(self, clearing_acct_type):
if clearing_acct_type is None:
raise ValueError("Invalid value for `clearing_acct_type`, must not be `None`")
self._clearing_acct_type = clearing_acct_type
@property
def ordr_exe_restriction(self):
return self._ordr_exe_restriction
@ordr_exe_restriction.setter
def ordr_exe_restriction(self, ordr_exe_restriction):
allowed_values = ["FOK", "IOC", "NON", "AON", "AU"]
if ordr_exe_restriction not in allowed_values:
raise ValueError(
"Invalid value for `ordr_exe_restriction` ({0}), must be one of {1}"
.format(ordr_exe_restriction, allowed_values)
)
self._ordr_exe_restriction = ordr_exe_restriction
@property
def pre_arranged(self):
return self._pre_arranged
@pre_arranged.setter
def pre_arranged(self, pre_arranged):
self._pre_arranged = pre_arranged
@property
def pre_arranged_acct(self):
return self._pre_arranged_acct
@pre_arranged_acct.setter
def pre_arranged_acct(self, pre_arranged_acct):
self._pre_arranged_acct = pre_arranged_acct
@property
def type(self):
return self._type
@type.setter
def type(self, type):
allowed_values = ["B", "O", "I", "L", "S", "H", "C", "N", "E"]
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}"
.format(type, allowed_values)
)
self._type = type
@property
def validity_res(self):
return self._validity_res
@validity_res.setter
def validity_res(self, validity_res):
allowed_values = ["GFS", "GTD", "NON"]
if validity_res not in allowed_values:
raise ValueError(
"Invalid value for `validity_res` ({0}), must be one of {1}"
.format(validity_res, allowed_values)
)
self._validity_res = validity_res
@property
def state(self):
return self._state
@state.setter
def state(self, state):
allowed_values = ["ACTI", "HIBE"]
if state not in allowed_values:
raise ValueError(
"Invalid value for `state` ({0}), must be one of {1}"
.format(state, allowed_values)
)
self._state = state
@property
def validity_date(self):
return self._validity_date
@validity_date.setter
def validity_date(self, validity_date):
self._validity_date = validity_date
@property
def txt(self):
return self._txt
@txt.setter
def txt(self, txt):
if txt is not None and len(txt) > 250:
raise ValueError("Invalid value for `txt`, length must be less than or equal to `250`")
self._txt = txt
@property
def ppd(self):
return self._ppd
@ppd.setter
def ppd(self, ppd):
self._ppd = ppd
@property
def dlvry_start(self):
return self._dlvry_start
@dlvry_start.setter
def dlvry_start(self, dlvry_start):
self._dlvry_start = dlvry_start
@property
def dlvry_end(self):
return self._dlvry_end
@dlvry_end.setter
def dlvry_end(self, dlvry_end):
self._dlvry_end = dlvry_end
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, OrderEntry):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f715a6870b84172a6bce55c32434e579a2ef0c2a | 6,133 | py | Python | output/models/ms_data/element/elem_z018_xsd/elem_z018.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/ms_data/element/elem_z018_xsd/elem_z018.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/ms_data/element/elem_z018_xsd/elem_z018.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | null | null | null | from dataclasses import dataclass, field
from typing import Dict, List, Optional
@dataclass
class Signatures:
class Meta:
name = "signatures"
w3_org_2000_09_xmldsig_element: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "http://www.w3.org/2000/09/xmldsig#",
}
)
@dataclass
class Zzz:
class Meta:
name = "zzz"
signatures: Optional[Signatures] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Yyy:
class Meta:
name = "yyy"
zzz: Optional[Zzz] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Xxx:
class Meta:
name = "xxx"
yyy: Optional[Yyy] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Www:
class Meta:
name = "www"
xxx: Optional[Xxx] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Uuu:
class Meta:
name = "uuu"
www: Optional[Www] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Ttt:
class Meta:
name = "ttt"
uuu: Optional[Uuu] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Sss:
class Meta:
name = "sss"
ttt: Optional[Ttt] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Rrr:
class Meta:
name = "rrr"
sss: Optional[Sss] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Qqq:
class Meta:
name = "qqq"
rrr: Optional[Rrr] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Ppp:
class Meta:
name = "ppp"
qqq: Optional[Qqq] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Ooo:
class Meta:
name = "ooo"
ppp: Optional[Ppp] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Nnn:
class Meta:
name = "nnn"
ooo: Optional[Ooo] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Mmm:
class Meta:
name = "mmm"
nnn: Optional[Nnn] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Lll:
class Meta:
name = "lll"
mmm: Optional[Mmm] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Kkk:
class Meta:
name = "kkk"
lll: Optional[Lll] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Jjj:
class Meta:
name = "jjj"
kkk: Optional[Kkk] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Iii:
class Meta:
name = "iii"
jjj: Optional[Jjj] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Hhh:
class Meta:
name = "hhh"
iii: Optional[Iii] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Ggg:
class Meta:
name = "ggg"
hhh: Optional[Hhh] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Fff:
class Meta:
name = "fff"
ggg: Optional[Ggg] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Eee:
class Meta:
name = "eee"
fff: Optional[Fff] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Ddd:
class Meta:
name = "ddd"
eee: Optional[Eee] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Ccc:
class Meta:
name = "ccc"
ddd: Optional[Ddd] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Bbb:
class Meta:
name = "bbb"
ccc: Optional[Ccc] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Aaa:
class Meta:
name = "aaa"
bbb: Optional[Bbb] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Root:
class Meta:
name = "root"
aaa: Optional[Aaa] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
w3_org_xml_1998_namespace_attributes: Dict[str, str] = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "http://www.w3.org/XML/1998/namespace",
}
)
| 15.806701 | 65 | 0.459971 | from dataclasses import dataclass, field
from typing import Dict, List, Optional
@dataclass
class Signatures:
class Meta:
name = "signatures"
w3_org_2000_09_xmldsig_element: List[object] = field(
default_factory=list,
metadata={
"type": "Wildcard",
"namespace": "http://www.w3.org/2000/09/xmldsig#",
}
)
@dataclass
class Zzz:
class Meta:
name = "zzz"
signatures: Optional[Signatures] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Yyy:
class Meta:
name = "yyy"
zzz: Optional[Zzz] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Xxx:
class Meta:
name = "xxx"
yyy: Optional[Yyy] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Www:
class Meta:
name = "www"
xxx: Optional[Xxx] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Uuu:
class Meta:
name = "uuu"
www: Optional[Www] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Ttt:
class Meta:
name = "ttt"
uuu: Optional[Uuu] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Sss:
class Meta:
name = "sss"
ttt: Optional[Ttt] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Rrr:
class Meta:
name = "rrr"
sss: Optional[Sss] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Qqq:
class Meta:
name = "qqq"
rrr: Optional[Rrr] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Ppp:
class Meta:
name = "ppp"
qqq: Optional[Qqq] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Ooo:
class Meta:
name = "ooo"
ppp: Optional[Ppp] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Nnn:
class Meta:
name = "nnn"
ooo: Optional[Ooo] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Mmm:
class Meta:
name = "mmm"
nnn: Optional[Nnn] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Lll:
class Meta:
name = "lll"
mmm: Optional[Mmm] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Kkk:
class Meta:
name = "kkk"
lll: Optional[Lll] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Jjj:
class Meta:
name = "jjj"
kkk: Optional[Kkk] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Iii:
class Meta:
name = "iii"
jjj: Optional[Jjj] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Hhh:
class Meta:
name = "hhh"
iii: Optional[Iii] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Ggg:
class Meta:
name = "ggg"
hhh: Optional[Hhh] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Fff:
class Meta:
name = "fff"
ggg: Optional[Ggg] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Eee:
class Meta:
name = "eee"
fff: Optional[Fff] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Ddd:
class Meta:
name = "ddd"
eee: Optional[Eee] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Ccc:
class Meta:
name = "ccc"
ddd: Optional[Ddd] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Bbb:
class Meta:
name = "bbb"
ccc: Optional[Ccc] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Aaa:
class Meta:
name = "aaa"
bbb: Optional[Bbb] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
@dataclass
class Root:
class Meta:
name = "root"
aaa: Optional[Aaa] = field(
default=None,
metadata={
"type": "Element",
"required": True,
}
)
w3_org_xml_1998_namespace_attributes: Dict[str, str] = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "http://www.w3.org/XML/1998/namespace",
}
)
| true | true |
f715a6b708707b2792f67edc44f6ef7fd6f14e2c | 58,348 | py | Python | torch/nn/parallel/distributed.py | chaekit/pytorch | 132f5c1f36698361149ea99ca3504bd2acfdc19f | [
"Intel"
] | null | null | null | torch/nn/parallel/distributed.py | chaekit/pytorch | 132f5c1f36698361149ea99ca3504bd2acfdc19f | [
"Intel"
] | null | null | null | torch/nn/parallel/distributed.py | chaekit/pytorch | 132f5c1f36698361149ea99ca3504bd2acfdc19f | [
"Intel"
] | null | null | null | import copy
import inspect
import itertools
import logging
import os
import warnings
from contextlib import contextmanager
from typing import NamedTuple
import torch
import torch.distributed as dist
RPC_AVAILABLE = False
if dist.is_available():
from torch.distributed.distributed_c10d import ReduceOp
from torch.distributed.distributed_c10d import _get_default_group
if torch.distributed.rpc.is_available():
RPC_AVAILABLE = True
from torch.distributed.rpc import RRef
from torch._utils import _get_device_index
from ..modules import Module
from ._functions import _get_stream
from .scatter_gather import scatter_kwargs, gather, is_namedtuple
def _find_tensors(obj):
r"""
Recursively find all tensors contained in the specified object.
"""
if RPC_AVAILABLE and isinstance(obj, RRef):
# If the current node is the owner of the RRef, unwrap it and try to
# find Tensors.
# TODO: Expand to remote RRefs.
if obj.is_owner():
return _find_tensors(obj.local_value())
if isinstance(obj, torch.Tensor):
return [obj]
if isinstance(obj, (list, tuple)):
return itertools.chain(*map(_find_tensors, obj))
if isinstance(obj, dict):
return itertools.chain(*map(_find_tensors, obj.values()))
return []
def _dump_DDP_relevant_env_vars():
relevant_env_vars = [
"RANK",
"LOCAL_RANK",
"WORLD_SIZE",
"MASTER_PORT",
"MASTER_ADDR",
"CUDA_VISIBLE_DEVICES",
"GLOO_SOCKET_IFNAME",
"GLOO_DEVICE_TRANSPORT",
"NCCL_SOCKET_IFNAME",
"NCCL_BLOCKING_WAIT",
"NCCL_DEBUG",
"NCCL_DEBUG_SUBSYS",
"NCCL_IB_DISABLE",
# More NCCL env vars:
"NCCL_P2P_DISABLE",
"NCCL_P2P_LEVEL",
"NCCL_SHM_DISABLE",
"NCCL_SOCKET_NTHREADS",
"NCCL_NSOCKS_PERTHREAD",
"NCCL_BUFFSIZE",
"NCCL_NTHREADS",
"NCCL_RINGS",
"NCCL_MAX_NCHANNELS",
"NCCL_MIN_NCHANNELS",
"NCCL_CHECKS_DISABLE",
"NCCL_CHECK_POINTERS",
"NCCL_LAUNCH_MODE",
"NCCL_IB_HCA",
"NCCL_IB_TIMEOUT",
"NCCL_IB_RETRY_CNT",
"NCCL_IB_GID_INDEX",
"NCCL_IB_SL",
"NCCL_IB_TC",
"NCCL_IB_AR_THRESHOLD",
"NCCL_IB_CUDA_SUPPORT",
"NCCL_NET_GDR_LEVEL",
"NCCL_NET_GDR_READ",
"NCCL_SINGLE_RING_THRESHOLD",
"NCCL_LL_THRESHOLD",
"NCCL_TREE_THRESHOLD",
"NCCL_ALGO",
"NCCL_PROTO",
"NCCL_IGNORE_CPU_AFFINITY",
"NCCL_DEBUG_FILE",
"NCCL_COLLNET_ENABLE",
"NCCL_TOPO_FILE",
"NCCL_TOPO_DUMP_FILE",
]
formatted_output = ""
for var in relevant_env_vars:
value = os.environ[var] if var in os.environ else "N/A"
formatted_output += "env:%s=%s\n" % (var, value)
print(formatted_output)
class _DDPUnevenInputsConfig(NamedTuple):
ddp_join_enabled: bool
ddp_join_divide_by_initial_world_size: bool
class DistributedDataParallel(Module):
r"""Implements distributed data parallelism that is based on
``torch.distributed`` package at the module level.
This container parallelizes the application of the given module by
splitting the input across the specified devices by chunking in the batch
dimension. The module is replicated on each machine and each device, and
each such replica handles a portion of the input. During the backwards
pass, gradients from each node are averaged.
The batch size should be larger than the number of GPUs used locally.
See also: :ref:`distributed-basics` and :ref:`cuda-nn-ddp-instead`.
The same constraints on input as in :class:`torch.nn.DataParallel` apply.
Creation of this class requires that ``torch.distributed`` to be already
initialized, by calling :func:`torch.distributed.init_process_group`.
``DistributedDataParallel`` is proven to be significantly faster than
:class:`torch.nn.DataParallel` for single-node multi-GPU data
parallel training.
To use ``DistributedDataParallel`` on a host with N GPUs, you should spawn
up ``N`` processes, ensuring that each process exclusively works on a single
GPU from 0 to N-1. This can be done by either setting
``CUDA_VISIBLE_DEVICES`` for every process or by calling:
>>> torch.cuda.set_device(i)
where i is from 0 to N-1. In each process, you should refer the following
to construct this module:
>>> torch.distributed.init_process_group(
>>> backend='nccl', world_size=N, init_method='...'
>>> )
>>> model = DistributedDataParallel(model, device_ids=[i], output_device=i)
In order to spawn up multiple processes per node, you can use either
``torch.distributed.launch`` or ``torch.multiprocessing.spawn``.
.. note::
Please refer to `PyTorch Distributed Overview <https://pytorch.org/tutorials/beginner/dist_overview.html>`__
for a brief introduction to all features related to distributed training.
.. note::
``DistributedDataParallel`` can be used in conjunction with
:class:`torch.distributed.optim.ZeroRedundancyOptimizer` to reduce
per-rank optimizer states memory footprint. Please refer to
`ZeroRedundancyOptimizer recipe <https://pytorch.org/tutorials/recipes/zero_redundancy_optimizer.html>`__
for more details.
.. note:: ``nccl`` backend is currently the fastest and highly recommended
backend when using GPUs. This applies to both single-node and
multi-node distributed training.
.. note:: This module also supports mixed-precision distributed training.
This means that your model can have different types of parameters such
as mixed types of ``fp16`` and ``fp32``, the gradient reduction on these
mixed types of parameters will just work fine.
.. note:: If you use ``torch.save`` on one process to checkpoint the module,
and ``torch.load`` on some other processes to recover it, make sure that
``map_location`` is configured properly for every process. Without
``map_location``, ``torch.load`` would recover the module to devices
where the module was saved from.
.. note:: When a model is trained on ``M`` nodes with ``batch=N``, the
gradient will be ``M`` times smaller when compared to the same model
trained on a single node with ``batch=M*N`` if the loss is summed (NOT
averaged as usual) across instances in a batch (because the gradients
between different nodes are averaged). You should take this into
consideration when you want to obtain a mathematically equivalent
training process compared to the local training counterpart. But in most
cases, you can just treat a DistributedDataParallel wrapped model, a
DataParallel wrapped model and an ordinary model on a single GPU as the
same (E.g. using the same learning rate for equivalent batch size).
.. note::
Parameters are never broadcast between processes. The module performs
an all-reduce step on gradients and assumes that they will be modified
by the optimizer in all processes in the same way. Buffers
(e.g. BatchNorm stats) are broadcast from the module in process of rank
0, to all other replicas in the system in every iteration.
.. note::
If you are using DistributedDataParallel in conjunction with the
:ref:`distributed-rpc-framework`, you should always use
:meth:`torch.distributed.autograd.backward` to compute gradients and
:class:`torch.distributed.optim.DistributedOptimizer` for optimizing
parameters.
Example::
>>> import torch.distributed.autograd as dist_autograd
>>> from torch.nn.parallel import DistributedDataParallel as DDP
>>> from torch import optim
>>> from torch.distributed.optim import DistributedOptimizer
>>> from torch.distributed.rpc import RRef
>>>
>>> t1 = torch.rand((3, 3), requires_grad=True)
>>> t2 = torch.rand((3, 3), requires_grad=True)
>>> rref = rpc.remote("worker1", torch.add, args=(t1, t2))
>>> ddp_model = DDP(my_model)
>>>
>>> # Setup optimizer
>>> optimizer_params = [rref]
>>> for param in ddp_model.parameters():
>>> optimizer_params.append(RRef(param))
>>>
>>> dist_optim = DistributedOptimizer(
>>> optim.SGD,
>>> optimizer_params,
>>> lr=0.05,
>>> )
>>>
>>> with dist_autograd.context() as context_id:
>>> pred = ddp_model(rref.to_here())
>>> loss = loss_func(pred, loss)
>>> dist_autograd.backward(context_id, loss)
>>> dist_optim.step()
.. note::
To let a non-DDP model load a state dict from a DDP model,
:meth:`~torch.nn.modules.utils.consume_prefix_in_state_dict_if_present`
needs to be applied to strip the prefix "module." in the DDP state dict before loading.
.. warning::
Constructor, forward method, and differentiation of the output (or a
function of the output of this module) are distributed synchronization
points. Take that into account in case different processes might be
executing different code.
.. warning::
This module assumes all parameters are registered in the model by the
time it is created. No parameters should be added nor removed later.
Same applies to buffers.
.. warning::
This module assumes all parameters are registered in the model of each
distributed processes are in the same order. The module itself will
conduct gradient ``allreduce`` following the reverse order of the
registered parameters of the model. In other words, it is users'
responsibility to ensure that each distributed process has the exact
same model and thus the exact same parameter registration order.
.. warning::
This module allows parameters with non-rowmajor-contiguous strides.
For example, your model may contain some parameters whose
:class:`torch.memory_format` is ``torch.contiguous_format``
and others whose format is ``torch.channels_last``. However,
corresponding parameters in different processes must have the
same strides.
.. warning::
This module doesn't work with :func:`torch.autograd.grad` (i.e. it will
only work if gradients are to be accumulated in ``.grad`` attributes of
parameters).
.. warning::
If you plan on using this module with a ``nccl`` backend or a ``gloo``
backend (that uses Infiniband), together with a DataLoader that uses
multiple workers, please change the multiprocessing start method to
``forkserver`` (Python 3 only) or ``spawn``. Unfortunately
Gloo (that uses Infiniband) and NCCL2 are not fork safe, and you will
likely experience deadlocks if you don't change this setting.
.. warning::
Forward and backward hooks defined on :attr:`module` and its submodules
won't be invoked anymore, unless the hooks are initialized in the
:meth:`forward` method.
.. warning::
You should never try to change your model's parameters after wrapping
up your model with ``DistributedDataParallel``. Because, when
wrapping up your model with ``DistributedDataParallel``, the constructor
of ``DistributedDataParallel`` will register the additional gradient
reduction functions on all the parameters of the model itself at the
time of construction. If you change the model's parameters afterwards,
gradient redunction functions no longer match the correct set of
parameters.
.. warning::
Using ``DistributedDataParallel`` in conjunction with the
:ref:`distributed-rpc-framework` is experimental and subject to change.
.. warning::
The ``gradient_as_bucket_view`` mode does not yet work with Automatic
Mixed Precision (AMP). AMP maintains stashed gradients that are used for
unscaling gradients. With ``gradient_as_bucket_view=True``, these
stashed gradients will point to communication buckets in the first
iteration. In the next iteration, the communication buckets are mutated
and thus these stashed gradients will be unexpectedly mutated as well,
which might lead to wrong results.
Args:
module (Module): module to be parallelized
device_ids (list of int or torch.device): CUDA devices.
1) For single-device modules, ``device_ids`` can
contain exactly one device id, which represents the only
CUDA device where the input module corresponding to this process resides.
Alternatively, ``device_ids`` can also be ``None``.
2) For multi-device modules and CPU modules,
``device_ids`` must be ``None``.
When ``device_ids`` is ``None`` for both cases,
both the input data for the forward pass and the actual module
must be placed on the correct device.
(default: ``None``)
output_device (int or torch.device): Device location of output for
single-device CUDA modules. For multi-device modules and
CPU modules, it must be ``None``, and the module itself
dictates the output location. (default: ``device_ids[0]``
for single-device modules)
broadcast_buffers (bool): Flag that enables syncing (broadcasting)
buffers of the module at beginning of the ``forward``
function. (default: ``True``)
process_group: The process group to be used for distributed data
all-reduction. If ``None``, the default process group, which
is created by :func:`torch.distributed.init_process_group`,
will be used. (default: ``None``)
bucket_cap_mb: ``DistributedDataParallel`` will bucket parameters into
multiple buckets so that gradient reduction of each
bucket can potentially overlap with backward computation.
:attr:`bucket_cap_mb` controls the bucket size in
MegaBytes (MB). (default: 25)
find_unused_parameters (bool): Traverse the autograd graph from all
tensors contained in the return value of the
wrapped module's ``forward`` function. Parameters
that don't receive gradients as part of this
graph are preemptively marked as being ready to
be reduced. Note that all ``forward`` outputs
that are derived from module parameters must
participate in calculating loss and later the
gradient computation. If they don't, this wrapper
will hang waiting for autograd to produce
gradients for those parameters. Any outputs
derived from module parameters that are otherwise
unused can be detached from the autograd graph
using ``torch.Tensor.detach``. (default: ``False``)
check_reduction: This argument is deprecated.
gradient_as_bucket_view (bool): This is a prototype feature and subject
to changes. When set to ``True``, gradients will be views
pointing to different offsets of ``allreduce`` communication
buckets. This can reduce peak memory usage, where the
saved memory size will be equal to the total gradients
size. Moreover, it avoids the overhead of copying between
gradients and ``allreduce`` communication buckets. When
gradients are views, ``detach_()`` cannot be called on the
gradients. If hitting such errors, please fix it by
referring to the :meth:`~torch.optim.Optimizer.zero_grad`
function in ``torch/optim/optimizer.py`` as a solution.
Attributes:
module (Module): the module to be parallelized.
Example::
>>> torch.distributed.init_process_group(backend='nccl', world_size=4, init_method='...')
>>> net = torch.nn.parallel.DistributedDataParallel(model, pg)
"""
def __init__(
self,
module,
device_ids=None,
output_device=None,
dim=0,
broadcast_buffers=True,
process_group=None,
bucket_cap_mb=25,
find_unused_parameters=False,
check_reduction=False,
gradient_as_bucket_view=False,
):
super(DistributedDataParallel, self).__init__()
assert any((p.requires_grad for p in module.parameters())), (
"DistributedDataParallel is not needed when a module "
"doesn't have any parameter that requires a gradient."
)
if device_ids is not None and len(device_ids) > 1:
raise ValueError("device_ids can only be None or contain a single element.")
self.is_multi_device_module = len({p.device for p in module.parameters()}) > 1
distinct_device_types = {p.device.type for p in module.parameters()}
if len(distinct_device_types) != 1:
raise ValueError(
"DistributedDataParallel's input module must be on "
"the same type of devices, but input module parameters locate in {}.".format(
distinct_device_types
)
)
self.device_type = list(distinct_device_types)[0]
if (
device_ids is None
or len(device_ids) == 0 # For backward compatibility.
or self.device_type == "cpu"
or self.is_multi_device_module
):
if device_ids or output_device:
raise ValueError(
"DistributedDataParallel device_ids and output_device arguments "
"only work with single-device/multiple-device GPU modules or CPU modules, "
"but got device_ids {}, output_device {}, and module parameters {}.".format(
device_ids,
output_device,
{p.device for p in module.parameters()},
)
)
self.device_ids = None
self.output_device = None
else:
self.device_ids = [_get_device_index(x, True) for x in device_ids]
if output_device is None:
output_device = device_ids[0]
self.output_device = _get_device_index(output_device, True)
if process_group is None:
self.process_group = _get_default_group()
else:
self.process_group = process_group
self.dim = dim
self.module = module
self.device = list(self.module.parameters())[0].device
self.broadcast_buffers = broadcast_buffers
self.find_unused_parameters = find_unused_parameters
self.require_backward_grad_sync = True
self.require_forward_param_sync = True
self.ddp_uneven_inputs_config = _DDPUnevenInputsConfig(
ddp_join_enabled=False, ddp_join_divide_by_initial_world_size=False
)
self.gradient_as_bucket_view = gradient_as_bucket_view
if hasattr(module, "_ddp_params_and_buffers_to_ignore"):
self.parameters_to_ignore = module._ddp_params_and_buffers_to_ignore
else:
self.parameters_to_ignore = []
if check_reduction:
# This argument is no longer used since the reducer
# will ensure reduction completes even if some parameters
# do not receive gradients.
warnings.warn(
"The `check_reduction` argument in `DistributedDataParallel` "
"module is deprecated. Please avoid using it."
)
# Check that a module does not have Uninitialized parameters
for param in module.parameters():
if isinstance(param, torch.nn.parameter.UninitializedParameter):
raise RuntimeError(
"Modules with uninitialized parameters can't be used with `DistributedDataParallel`. "
"Run a dummy forward pass to correctly initialize the modules"
)
# used for intra-node param sync and inter-node sync as wel
self.broadcast_bucket_size = int(250 * 1024 * 1024)
# reduction bucket size
self.bucket_bytes_cap = int(bucket_cap_mb * 1024 * 1024)
# Whether to perform input tensor CPU to GPU copies on a side-stream
self.use_side_stream_for_tensor_copies = (
os.environ.get("PYTORCH_DDP_USE_SIDE_STREAM", "1") == "1"
)
# TODO(wayi@): Remove this field since SPMD is no longer supported,
# and also remove all the relevant unnecessary loops.
# Module replication within process (single-process multi device)
self._module_copies = [self.module]
# Build parameters for reducer.
parameters, expect_sparse_gradient = self._build_params_for_reducer()
# Verify model equivalence.
dist._verify_model_across_ranks(self.process_group, parameters)
# Sync params and buffers. Ensures all DDP models start off at the same value.
self._sync_params_and_buffers(authoritative_rank=0)
# Builds reducer.
self._ddp_init_helper(parameters, expect_sparse_gradient)
def _sync_params_and_buffers(self, authoritative_rank=0):
module_states = []
for name, param in self.module.state_dict().items():
if name not in self.parameters_to_ignore:
module_states.append(param)
if len(module_states) > 0:
self._distributed_broadcast_coalesced(
module_states, self.broadcast_bucket_size, authoritative_rank
)
def _ddp_init_helper(self, parameters, expect_sparse_gradient):
"""
Initialization helper function that does the following:
(1) bucketing the parameters for reductions
(2) resetting the bucketing states
(3) registering the grad hooks
(4) Logging constructin-time DDP logging data
(5) passing a handle of DDP to SyncBatchNorm Layer
"""
# The bucket size limit is specified in the constructor.
# Additionally, we allow for a single small bucket for parameters
# that are defined first, such that their gradients don't spill into
# a much larger bucket, adding unnecessary latency after gradient
# computation finishes. Experiments showed 1MB is a reasonable value.
bucket_indices = dist._compute_bucket_assignment_by_size(
parameters[0],
[dist._DEFAULT_FIRST_BUCKET_BYTES, self.bucket_bytes_cap],
expect_sparse_gradient[0],
)
# Note: reverse list of buckets because we want to approximate the
# order in which their gradients are produced, and assume they
# are used in the forward pass in the order they are defined.
self.reducer = dist.Reducer(
parameters,
list(reversed(bucket_indices)),
self.process_group,
expect_sparse_gradient,
self.bucket_bytes_cap,
self.find_unused_parameters,
self.gradient_as_bucket_view,
)
self.logger = dist.Logger(self.reducer)
# Set logging data that can be got during construction time.
self.logger.set_construction_data_and_log(
self.module.__class__.__name__,
[] if self.device_ids is None else self.device_ids,
-1 if self.output_device is None else self.output_device,
self.broadcast_buffers,
)
# passing a handle to torch.nn.SyncBatchNorm layer
self._passing_sync_batchnorm_handle(self._module_copies)
def __getstate__(self):
self._check_default_group()
attrs = copy.copy(self.__dict__)
del attrs["process_group"]
del attrs["reducer"]
del attrs["logger"]
return attrs
def __setstate__(self, state):
# If serializable, then the process group should be the default one
self.process_group = _get_default_group()
super(DistributedDataParallel, self).__setstate__(state)
self.__dict__.setdefault("require_forward_param_sync", True)
self.__dict__.setdefault("require_backward_grad_sync", True)
parameters, expect_sparse_gradient = self._build_params_for_reducer()
self._ddp_init_helper(parameters, expect_sparse_gradient)
def _build_params_for_reducer(self):
# Build tuple of (module, parameter) for all parameters that require grads.
modules_and_parameters = [
[
(module, parameter)
for module_name, module in replica.named_modules()
for parameter in [
param
# Note that we access module.named_parameters instead of
# parameters(module). parameters(module) is only needed in the
# single-process multi device case, where it accesses replicated
# parameters through _former_parameters.
for param_name, param in module.named_parameters(recurse=False)
if param.requires_grad
and f"{module_name}.{param_name}"
not in self.parameters_to_ignore
]
]
for replica in self._module_copies
]
# Deduplicate any parameters that might be shared across child modules.
memo = set()
modules_and_parameters = [
# "p not in memo" is the deduplication check.
# "not memo.add(p)" is always True, and it's only there to cause "add(p)" if needed.
[(m, p) for m, p in replica_mps if p not in memo and not memo.add(p)]
for replica_mps in modules_and_parameters
]
# Build list of parameters.
parameters = [
list(parameter for _, parameter in replica)
for replica in modules_and_parameters
]
# Checks if a module will produce a sparse gradient.
def produces_sparse_gradient(module):
if isinstance(module, torch.nn.Embedding) or isinstance(
module, torch.nn.EmbeddingBag
):
return module.sparse
return False
# Build list of booleans indicating whether or not to expect sparse
# gradients for the corresponding parameters.
expect_sparse_gradient = [
list(produces_sparse_gradient(module) for module, _ in replica)
for replica in modules_and_parameters
]
# The following modules_params and modules_buffers are used for
# param/buffer sync in _sync_params.
self.modules_params = [
list(self._get_parameters(m)) for m in self._module_copies
]
# Collect buffers for modules, filtering out buffers that should be ignored.
named_module_buffers = [
[(buffer, buffer_name) for buffer_name, buffer in m.named_buffers()]
for m in self._module_copies
]
self.modules_buffers = [
[
buffer
for (buffer, buffer_name) in module_buffers
if buffer_name not in self.parameters_to_ignore
]
for module_buffers in named_module_buffers
]
return parameters, expect_sparse_gradient
def _get_parameters(self, m, recurse=True):
"""
Returns a generator of module parameters
"""
def model_parameters(m):
ps = (
m._former_parameters.values()
if hasattr(m, "_former_parameters")
else m.parameters(recurse=False)
)
for p in ps:
yield p
for m in m.modules() if recurse else [m]:
for p in model_parameters(m):
yield p
def _check_default_group(self):
pickle_not_supported = False
try:
if self.process_group != _get_default_group():
pickle_not_supported = True
except RuntimeError:
pickle_not_supported = True
if pickle_not_supported:
raise RuntimeError(
"DDP Pickling/Unpickling are only supported "
"when using DDP with the default process "
"group. That is, when you have called "
"init_process_group and have not passed "
"process_group argument to DDP constructor"
)
@contextmanager
def no_sync(self):
r"""
A context manager to disable gradient synchronizations across DDP
processes. Within this context, gradients will be accumulated on module
variables, which will later be synchronized in the first
forward-backward pass exiting the context.
Example::
>>> ddp = torch.nn.parallel.DistributedDataParallel(model, pg)
>>> with ddp.no_sync():
>>> for input in inputs:
>>> ddp(input).backward() # no synchronization, accumulate grads
>>> ddp(another_input).backward() # synchronize grads
"""
old_require_backward_grad_sync = self.require_backward_grad_sync
self.require_backward_grad_sync = False
try:
yield
finally:
self.require_backward_grad_sync = old_require_backward_grad_sync
def forward(self, *inputs, **kwargs):
self.reducer.save_thread_local_state()
if torch.is_grad_enabled() and self.require_backward_grad_sync:
self.logger.set_runtime_stats_and_log()
self.reducer.prepare_for_forward()
if self.ddp_uneven_inputs_config.ddp_join_enabled:
ones = torch.ones(1, device=self.device)
work = dist.all_reduce(ones, group=self.process_group, async_op=True)
self.reducer._set_forward_pass_work_handle(
work,
self.ddp_uneven_inputs_config.ddp_join_divide_by_initial_world_size,
)
# Calling _rebuild_buckets before forward compuation,
# It may allocate new buckets before deallocating old buckets
# inside _rebuild_buckets. To save peak memory usage,
# call _rebuild_buckets before the peak memory usage increases
# during forward computation.
# This should be called only once during whole training period.
if torch.is_grad_enabled() and self.reducer._rebuild_buckets():
logging.info("Reducer buckets have been rebuilt in this iteration.")
if self.require_forward_param_sync:
self._sync_params()
if self.ddp_uneven_inputs_config.ddp_join_enabled:
# Notify joined ranks whether they should sync in backwards pass or not.
self._check_global_requires_backward_grad_sync(is_joined_rank=False)
if self.device_ids:
inputs, kwargs = self.to_kwargs(inputs, kwargs, self.device_ids[0])
output = self.module(*inputs[0], **kwargs[0])
else:
output = self.module(*inputs, **kwargs)
if torch.is_grad_enabled() and self.require_backward_grad_sync:
self.require_forward_param_sync = True
# We'll return the output object verbatim since it is a freeform
# object. We need to find any tensors in this object, though,
# because we need to figure out which parameters were used during
# this forward pass, to ensure we short circuit reduction for any
# unused parameters. Only if `find_unused_parameters` is set.
if self.find_unused_parameters:
self.reducer.prepare_for_backward(list(_find_tensors(output)))
else:
self.reducer.prepare_for_backward([])
else:
self.require_forward_param_sync = False
return output
def scatter(self, inputs, kwargs, device_ids):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
def _recursive_to(self, inputs, target_gpu):
r"""
Recursively moves input to the target_gpu.
"""
def to_map(obj):
if isinstance(obj, torch.Tensor):
if not self.use_side_stream_for_tensor_copies:
return (obj.to(target_gpu),)
else:
# Perform CPU -> GPU copies in a background stream. This code is
# motivated from similar logic in torch/nn/parallel/_functions.py
stream = _get_stream(target_gpu)
with torch.cuda.stream(stream):
output = obj.to(target_gpu)
# synchronize with the copy stream
with torch.cuda.device(target_gpu):
current_stream = torch.cuda.current_stream()
# Sync the current stream with the copy stream
current_stream.wait_stream(stream)
# Ensure tensor memory is not reused until work on
# main stream is complete
output.record_stream(current_stream)
return (output,)
if is_namedtuple(obj):
return [type(obj)(*args) for args in zip(*map(to_map, obj))]
if isinstance(obj, tuple) and len(obj) > 0:
return list(zip(*map(to_map, obj)))
if isinstance(obj, list) and len(obj) > 0:
return [list(i) for i in zip(*map(to_map, obj))]
if isinstance(obj, dict) and len(obj) > 0:
return [type(obj)(i) for i in zip(*map(to_map, obj.items()))]
return [obj]
# Avoid reference cycle
try:
res = to_map(inputs)
finally:
to_map = None
return res
def to_kwargs(self, inputs, kwargs, device_id):
inputs = self._recursive_to(inputs, device_id) if inputs else []
kwargs = self._recursive_to(kwargs, device_id) if kwargs else []
if len(inputs) < len(kwargs):
inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
elif len(kwargs) < len(inputs):
kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
inputs = tuple(inputs)
kwargs = tuple(kwargs)
return inputs, kwargs
def gather(self, outputs, output_device):
return gather(outputs, output_device, dim=self.dim)
def train(self, mode=True):
super(DistributedDataParallel, self).train(mode)
for module in self._module_copies[1:]:
module.train(mode)
return self
# When running in join mode, schedules an allreduce to match the one in the
# forward pass to determine the no. of currently active processes and whether
# all processes have joined.
def _schedule_shadow_all_reduce_for_fwd_pass(self):
all_active_procs = torch.zeros(1, device=self.device)
dist.all_reduce(all_active_procs, group=self.process_group)
return all_active_procs.item()
# When running in join mode, schedules an allreduce to notify joined ranks
# of whether backwards pass synchronization will run this iteraton or not.
def _check_global_requires_backward_grad_sync(self, is_joined_rank):
if not is_joined_rank and self.require_backward_grad_sync:
requires_sync_tensor = torch.ones(1, device=self.device)
else:
requires_sync_tensor = torch.zeros(1, device=self.device)
work = dist.all_reduce(
requires_sync_tensor, group=self.process_group, async_op=True
)
return work, requires_sync_tensor
# When running in join mode, checks and performs sync of module buffers if
# the models have buffers that should be synchronized in the forward pass.
def _check_and_sync_module_buffers(self):
if self.will_sync_module_buffers():
authoritative_rank = self._find_common_rank(self._distributed_rank, False)
self._distributed_broadcast_coalesced(
self.modules_buffers[0], self.broadcast_bucket_size, authoritative_rank
)
# When running in join model, agrees upon a common rank and broadcast model
# parameters to all other ranks.
def _sync_final_model(self, is_last_joiner):
# Agree upon the process that will be the authoritative model copy.
# The current rank is a candidate for being the authoritative copy if
# is_last_joiner=True. We break ties via picking the larger rank.
self._authoritative_rank = self._find_common_rank(
self._distributed_rank, is_last_joiner
)
self._sync_params_and_buffers(authoritative_rank=self._authoritative_rank)
# Schedule allreduce ops to match those scheduled in the reducer's backward
# pass.
def _match_all_reduce_for_bwd_pass(self):
allreduce_work = []
# Schedule allreduce in the same order as Reducer schedules them, i.e.
# the order of the buckets. Retrieving the bucket order from the reducer
# ensures that we keep the same order in join mode, such as when bucket
# order is rebuilt dynamically.
all_bucket_tensors = self.reducer.get_bucket_tensors()
for bucket_tensors in all_bucket_tensors:
# Joined processes contribute zero gradient. In the case that
# divide_by_initial_world_size=True, we divide grads by the static
# world size, if not, the dividing factor is reduced by the number
# of joined processes.
zero_tensors = [torch.zeros_like(t) for t in bucket_tensors]
work = self.process_group.allreduce(zero_tensors)
allreduce_work.append(work)
for work in allreduce_work:
work.wait()
# Allreduces the used parameter mapping across ranks.
def _match_unused_params_allreduce(self):
locally_used_param_maps = self.reducer._get_local_used_maps()
self.process_group.allreduce(locally_used_param_maps)
@contextmanager
def join(self, divide_by_initial_world_size=True, enable=True):
r"""
A context manager to be used in conjunction with an instance of
:class:`torch.nn.parallel.DistributedDataParallel` to be
able to train with uneven inputs across participating processes.
This context manager will keep track of already-joined DDP processes,
and "shadow" the forward and backward passes by inserting collective
communication operations to match with the ones created by non-joined
DDP processes. This will ensure each collective call has a corresponding
call by already-joined DDP processes, preventing hangs or errors that
would otherwise happen when training with uneven inputs across
processes.
Once all DDP processes have joined, the context manager will broadcast
the model corresponding to the last joined process to all processes to
ensure the model is the same across all processes
(which is guaranteed by DDP).
To use this to enable training with uneven inputs across processes,
simply wrap this context manager around your training loop. No further
modifications to the model or data loading is required.
.. warning::
This module currently does not support custom distributed collective
operations in the forward pass, such as ``SyncBatchNorm`` or other
custom defined collectives in the model's forward pass.
Args:
divide_by_initial_world_size (bool): If ``True``, will divide
gradients by the initial ``world_size`` DDP training was launched
with. If ``False``, will compute the effective world size
(number of ranks that have not depleted their inputs yet) and
divide gradients by that during allreduce. Set
``divide_by_initial_world_size=True`` to ensure every input
sample including the uneven inputs have equal weight in terms of
how much they contribute to the global gradient. This is
achieved by always dividing the gradient by the initial
``world_size`` even when we encounter uneven inputs. If you set
this to ``False``, we divide the gradient by the remaining
number of nodes. This ensures parity with training on a smaller
``world_size`` although it also means the uneven inputs would
contribute more towards the global gradient. Typically, you
would want to set this to ``True`` for cases where the last few
inputs of your training job are uneven. In extreme cases, where
there is a large discrepancy in the number of inputs, setting
this to ``False`` might provide better results.
enable (bool): Whether to enable uneven input detection or not. Pass
in ``enable=False`` to disable in cases where you know that
inputs are even across participating processes. Default is
``True``.
Example::
>>> import torch
>>> import torch.distributed as dist
>>> import os
>>> import torch.multiprocessing as mp
>>> import torch.nn as nn
>>> # On each spawned worker
>>> def worker(rank):
>>> dist.init_process_group("nccl", rank=rank, world_size=2)
>>> torch.cuda.set_device(rank)
>>> model = nn.Linear(1, 1, bias=False).to(rank)
>>> model = torch.nn.parallel.DistributedDataParallel(
>>> model, device_ids=[rank], output_device=rank
>>> )
>>> # Rank 1 gets one more input than rank 0.
>>> inputs = [torch.tensor([1]).float() for _ in range(10 + rank)]
>>> with model.join():
>>> for _ in range(5):
>>> for inp in inputs:
>>> loss = model(inp).sum()
>>> loss.backward()
>>> # Without the join() API, the below synchronization will hang
>>> # blocking for rank 1's allreduce to complete.
>>> torch.cuda.synchronize(device=rank)
"""
# Log uneven input API usage.
self.logger._set_uneven_input_join()
try:
has_error = False
self.ddp_uneven_inputs_config = _DDPUnevenInputsConfig(
ddp_join_enabled=enable,
ddp_join_divide_by_initial_world_size=divide_by_initial_world_size,
)
yield
except Exception as e:
# Set to skip any processing in the finally block.
has_error = True
raise e
finally:
# Skip any processing to let the exception immediately be raised if
# there was one.
if enable and not has_error:
all_procs_joined = False
is_last_joiner = True
i = 0
WARN_THRESHOLD = 1000
warnings.simplefilter("once")
while not all_procs_joined:
if i > WARN_THRESHOLD:
my_rank = self._distributed_rank
warnings.warn(
"Detected uneven input skew of greater "
f"than {WARN_THRESHOLD}. This means that rank {my_rank} "
f"has at least {WARN_THRESHOLD} fewer inputs than "
"other currently active ranks. This level of skew could "
"lead to performance degradation during training."
)
# Schedules allreduce to match fwd pass allreduce in non-joined procs
num_active_procs = self._schedule_shadow_all_reduce_for_fwd_pass()
if num_active_procs == 0:
all_procs_joined = True
else:
# Some DDP process still needs to be joined.
if is_last_joiner:
is_last_joiner = False
# It will rebuild buckets only once during training period
self.reducer._rebuild_buckets()
# Schedule a corresponding broadcast if we are syncing module
# buffers in the forward pass.
self._check_and_sync_module_buffers()
(
work,
should_sync_backwards_tensor,
) = self._check_global_requires_backward_grad_sync(
is_joined_rank=True
)
work.wait()
# If nonzero, then we should sync in the bwd pass.
should_sync_backwards = should_sync_backwards_tensor.item() != 0
# Forward param sync is disabled in the next iteration
# if we are skipping grad sync this iteration. Hence, we
# set require_forward_param_sync appropriately here.
self.require_forward_param_sync = should_sync_backwards
if not should_sync_backwards:
continue
# Schedules one allreduce per gradient bucket to match
# the backwards pass allreduce.
self._match_all_reduce_for_bwd_pass()
# Check if we need to allreduce locally unused params.
if self.find_unused_parameters:
self._match_unused_params_allreduce()
# It will push rebuilt params only once during training period
self.reducer._push_all_rebuilt_params()
i += 1
# All procs joined. Agree on authoritative rank and broadcast the model.
self._sync_final_model(is_last_joiner)
def register_comm_hook(self, state: object, hook: callable):
r"""
Registers a communication hook which is an enhancement that provides a
flexible hook to users where they can specify how DDP aggregates gradients
across multiple workers.
This hook would be very useful for researchers to try out new ideas. For
example, this hook can be used to implement several algorithms like GossipGrad
and gradient compression which involve different communication strategies for
parameter syncs while running Distributed DataParallel training.
Args:
state (object): Passed to the hook to maintain any state information during the training process.
Examples include error feedback in gradient compression,
peers to communicate with next in GossipGrad, etc.
It is locally stored by each worker
and shared by all the gradient tensors on the worker.
hook (callable): Averages gradient tensors across workers and defined as:
``hook(state: object, bucket: dist.GradBucket) -> torch.futures.Future``:
This function is called once the bucket is ready. The
hook can perform whatever processing is needed and return
a Future indicating completion of any async work (ex: allreduce).
If the hook doesn't perform any communication, it can also
just return a completed Future. The Future should hold the
new value of grad bucket's tensors. Once a bucket is ready,
c10d reducer would call this hook and use the tensors returned
by the Future and copy grads to individual parameters.
We also provide an API called ``get_future`` to retrieve a
Future associated with the completion of ``c10d.ProcessGroup.work``.
.. warning ::
Grad bucket's tensors will not be predivided by world_size. User is responsible
to divide by the world_size in case of operations like allreduce.
.. warning ::
DDP communication hook can only be registered once and should be registered
before calling backward.
.. warning ::
The Future object that hook returns should contain a result that has the same
shape with the tensors inside grad bucket.
.. warning ::
DDP communication hook does not support single-process multiple-device mode.
Gradbucket tensors should consist of only a single tensor.
.. warning ::
``get_future`` API supports only NCCL backend and will return a ``torch._C.Future``
which is an internal type and should be used with caution. It can still be used by
``register_comm_hook`` API, but it is subject to some subtle differences compared
to ``torch.futures.Future``.
.. warning ::
DDP communication hook is experimental and subject to change.
Example::
Below is an example of a noop hook that returns the same tensors.
>>> def noop(state: object, bucket: dist.GradBucket): -> torch.futures.Future
>>> fut = torch.futures.Future()
>>> fut.set_result(bucket.get_tensors())
>>> return fut
>>> ddp.register_comm_hook(state = None, hook = noop)
Example::
Below is an example of a Parallel SGD algorithm where gradients are encoded before
allreduce, and then decoded after allreduce.
>>> def encode_and_decode(state: object, bucket: dist.GradBucket): -> torch.futures.Future
>>> tensors = [t / process_group.world_size for t in bucket.get_tensors()]
>>> encoded_tensors = encode(tensors) # encode gradients
>>> fut = process_group.allreduce(encoded_tensors).get_future()
>>> # Define the then callback to decode.
>>> def decode(fut):
>>> decoded_tensors = decode(fut.value()) # decode gradients
>>> return decoded_tensors
>>> return fut.then(decode)
>>> ddp.register_comm_hook(state = None, hook = encode_and_decode)
"""
self._check_comm_hook(hook)
self.logger._set_comm_hook_name(hook.__qualname__)
dist._register_comm_hook(self.reducer, state, hook)
def _register_builtin_comm_hook(self, comm_hook_type):
r"""
Registers a built-in communication hook that specifies how DDP
aggregates gradients across multiple workers.
The built-in hooks aim to provide efficient C++ implementations for certain hooks,
which might not be as efficient if implemented in Python using a Python communication hook.
Args:
comm_hook_type (dist.BuiltinCommHookType): type of communication hook, such as
ALLREDUCE, FP16_COMPRESS, etc.
.. warning ::
DDP communication hook can only be registered once and should be registered
before calling backward.
.. warning ::
DDP communication hook does not support single-process multiple-device mode.
Gradbucket tensors should consist of only a single tensor.
.. warning ::
DDP communication hook is experimental and subject to change.
Example::
Below is an example of a FP16 compression where gradients are
compressed into 16-bit floating-point numbers before allreduce, and
then decompressed after allreduce.
>>> ddp._register_builtin_comm_hook(dist.BuiltinCommHookType.FP16_COMPRESS)
"""
self.logger._set_comm_hook_name(str(comm_hook_type))
dist._register_builtin_comm_hook(self.reducer, comm_hook_type)
def _distributed_broadcast_coalesced(
self, tensors, buffer_size, authoritative_rank=0
):
dist._broadcast_coalesced(
self.process_group, tensors, buffer_size, authoritative_rank
)
def will_sync_module_buffers(self):
return (
self.require_forward_param_sync
and self.broadcast_buffers
and len(self.modules_buffers[0]) > 0
)
def _find_common_rank(self, input_rank, rank_cond):
# -1 indicates that this rank is not under consideration to be the
# common_rank
rank_to_use = torch.tensor(
[input_rank if rank_cond else -1],
device=self.device,
)
dist.all_reduce(rank_to_use, op=ReduceOp.MAX, group=self.process_group)
if rank_to_use.item() == -1:
raise ValueError(
"BUG! Expected rank_cond to be true for at least one process."
)
return rank_to_use.item()
def _sync_params(self):
with torch.no_grad():
# module buffer sync
if self.will_sync_module_buffers():
# Synchronize buffers across processes.
# If we are running DDP with the join manager, we have to agree
# upon a rank to sync module buffers from, since rank 0 may
# already have been joined and have stale module buffers.
if self.ddp_uneven_inputs_config.ddp_join_enabled:
authoritative_rank = self._find_common_rank(
self._distributed_rank, True
)
else:
# The process with rank 0 is considered the authoritative copy.
authoritative_rank = 0
self._distributed_broadcast_coalesced(
self.modules_buffers[0],
self.broadcast_bucket_size,
authoritative_rank,
)
def _passing_sync_batchnorm_handle(self, module_copies):
for dev_idx, module in enumerate(module_copies):
for layer in module.modules():
if isinstance(layer, torch.nn.modules.SyncBatchNorm):
assert (
self.device_type != "cpu"
), "SyncBatchNorm layers only work with GPU modules"
layer._specify_ddp_gpu_num(1)
def _check_comm_hook(self, hook):
if not callable(hook):
raise TypeError("Communication hook must be callable.")
sig = inspect.signature(hook)
if (
sig.parameters["bucket"].annotation != inspect._empty
and sig.parameters["bucket"].annotation != dist.GradBucket
):
raise ValueError(
"Communication hook: bucket annotation should be dist.GradBucket."
)
if sig.return_annotation != inspect._empty and (
sig.return_annotation != torch.futures.Future
and sig.return_annotation != torch._C.Future
):
raise ValueError(
"Communication hook: return annotation should be torch.futures.Future or torch._C.Future."
)
@property
def _distributed_rank(self):
return dist.get_rank(self.process_group)
@staticmethod
def _set_params_and_buffers_to_ignore_for_model(
module, params_and_buffers_to_ignore
):
# This is a workaround to set parameters and buffers DDP should ignore
# during synchronization. It will be removed when the API is finalized
# as part of addressing https://github.com/pytorch/pytorch/issues/43690.
module._ddp_params_and_buffers_to_ignore = params_and_buffers_to_ignore
def get_ddp_logging_data(self):
r"""
This interface can be called after DistributedDataParallel() is
constructed. It returns DDPLoggingData for debugging and analysis.
More detailed explanation of the fields in DDPLoggingData are in
``torch/c10/util/Logging.h``.
"""
return self.logger._get_ddp_logging_data()
def set_ddp_runtime_logging_sample_rate(self, sample_rate):
r"""
This interface allows users to set sample_rate of collecting
runtime stats. The runtime stats will be recorded for the
first 10 iterations, after 10 iteratons runtime stats will be
recorded once every "sample_rate" training iterations. In
default, runtime stats are recorded for the first 10 iterations,
after 10 iterations runtime stats are recorded once every
"kDDPRuntimeLoggingSampleRate=100" training iterations.
"""
if sample_rate < 1:
raise ValueError(
"DDP runtime logging sample rate should be equal or greater than 1"
)
self.reducer._set_ddp_runtime_logging_sample_rate(sample_rate)
| 46.015773 | 116 | 0.622935 | import copy
import inspect
import itertools
import logging
import os
import warnings
from contextlib import contextmanager
from typing import NamedTuple
import torch
import torch.distributed as dist
RPC_AVAILABLE = False
if dist.is_available():
from torch.distributed.distributed_c10d import ReduceOp
from torch.distributed.distributed_c10d import _get_default_group
if torch.distributed.rpc.is_available():
RPC_AVAILABLE = True
from torch.distributed.rpc import RRef
from torch._utils import _get_device_index
from ..modules import Module
from ._functions import _get_stream
from .scatter_gather import scatter_kwargs, gather, is_namedtuple
def _find_tensors(obj):
if RPC_AVAILABLE and isinstance(obj, RRef):
if obj.is_owner():
return _find_tensors(obj.local_value())
if isinstance(obj, torch.Tensor):
return [obj]
if isinstance(obj, (list, tuple)):
return itertools.chain(*map(_find_tensors, obj))
if isinstance(obj, dict):
return itertools.chain(*map(_find_tensors, obj.values()))
return []
def _dump_DDP_relevant_env_vars():
relevant_env_vars = [
"RANK",
"LOCAL_RANK",
"WORLD_SIZE",
"MASTER_PORT",
"MASTER_ADDR",
"CUDA_VISIBLE_DEVICES",
"GLOO_SOCKET_IFNAME",
"GLOO_DEVICE_TRANSPORT",
"NCCL_SOCKET_IFNAME",
"NCCL_BLOCKING_WAIT",
"NCCL_DEBUG",
"NCCL_DEBUG_SUBSYS",
"NCCL_IB_DISABLE",
"NCCL_P2P_DISABLE",
"NCCL_P2P_LEVEL",
"NCCL_SHM_DISABLE",
"NCCL_SOCKET_NTHREADS",
"NCCL_NSOCKS_PERTHREAD",
"NCCL_BUFFSIZE",
"NCCL_NTHREADS",
"NCCL_RINGS",
"NCCL_MAX_NCHANNELS",
"NCCL_MIN_NCHANNELS",
"NCCL_CHECKS_DISABLE",
"NCCL_CHECK_POINTERS",
"NCCL_LAUNCH_MODE",
"NCCL_IB_HCA",
"NCCL_IB_TIMEOUT",
"NCCL_IB_RETRY_CNT",
"NCCL_IB_GID_INDEX",
"NCCL_IB_SL",
"NCCL_IB_TC",
"NCCL_IB_AR_THRESHOLD",
"NCCL_IB_CUDA_SUPPORT",
"NCCL_NET_GDR_LEVEL",
"NCCL_NET_GDR_READ",
"NCCL_SINGLE_RING_THRESHOLD",
"NCCL_LL_THRESHOLD",
"NCCL_TREE_THRESHOLD",
"NCCL_ALGO",
"NCCL_PROTO",
"NCCL_IGNORE_CPU_AFFINITY",
"NCCL_DEBUG_FILE",
"NCCL_COLLNET_ENABLE",
"NCCL_TOPO_FILE",
"NCCL_TOPO_DUMP_FILE",
]
formatted_output = ""
for var in relevant_env_vars:
value = os.environ[var] if var in os.environ else "N/A"
formatted_output += "env:%s=%s\n" % (var, value)
print(formatted_output)
class _DDPUnevenInputsConfig(NamedTuple):
ddp_join_enabled: bool
ddp_join_divide_by_initial_world_size: bool
class DistributedDataParallel(Module):
def __init__(
self,
module,
device_ids=None,
output_device=None,
dim=0,
broadcast_buffers=True,
process_group=None,
bucket_cap_mb=25,
find_unused_parameters=False,
check_reduction=False,
gradient_as_bucket_view=False,
):
super(DistributedDataParallel, self).__init__()
assert any((p.requires_grad for p in module.parameters())), (
"DistributedDataParallel is not needed when a module "
"doesn't have any parameter that requires a gradient."
)
if device_ids is not None and len(device_ids) > 1:
raise ValueError("device_ids can only be None or contain a single element.")
self.is_multi_device_module = len({p.device for p in module.parameters()}) > 1
distinct_device_types = {p.device.type for p in module.parameters()}
if len(distinct_device_types) != 1:
raise ValueError(
"DistributedDataParallel's input module must be on "
"the same type of devices, but input module parameters locate in {}.".format(
distinct_device_types
)
)
self.device_type = list(distinct_device_types)[0]
if (
device_ids is None
or len(device_ids) == 0
or self.device_type == "cpu"
or self.is_multi_device_module
):
if device_ids or output_device:
raise ValueError(
"DistributedDataParallel device_ids and output_device arguments "
"only work with single-device/multiple-device GPU modules or CPU modules, "
"but got device_ids {}, output_device {}, and module parameters {}.".format(
device_ids,
output_device,
{p.device for p in module.parameters()},
)
)
self.device_ids = None
self.output_device = None
else:
self.device_ids = [_get_device_index(x, True) for x in device_ids]
if output_device is None:
output_device = device_ids[0]
self.output_device = _get_device_index(output_device, True)
if process_group is None:
self.process_group = _get_default_group()
else:
self.process_group = process_group
self.dim = dim
self.module = module
self.device = list(self.module.parameters())[0].device
self.broadcast_buffers = broadcast_buffers
self.find_unused_parameters = find_unused_parameters
self.require_backward_grad_sync = True
self.require_forward_param_sync = True
self.ddp_uneven_inputs_config = _DDPUnevenInputsConfig(
ddp_join_enabled=False, ddp_join_divide_by_initial_world_size=False
)
self.gradient_as_bucket_view = gradient_as_bucket_view
if hasattr(module, "_ddp_params_and_buffers_to_ignore"):
self.parameters_to_ignore = module._ddp_params_and_buffers_to_ignore
else:
self.parameters_to_ignore = []
if check_reduction:
warnings.warn(
"The `check_reduction` argument in `DistributedDataParallel` "
"module is deprecated. Please avoid using it."
)
for param in module.parameters():
if isinstance(param, torch.nn.parameter.UninitializedParameter):
raise RuntimeError(
"Modules with uninitialized parameters can't be used with `DistributedDataParallel`. "
"Run a dummy forward pass to correctly initialize the modules"
)
# used for intra-node param sync and inter-node sync as wel
self.broadcast_bucket_size = int(250 * 1024 * 1024)
# reduction bucket size
self.bucket_bytes_cap = int(bucket_cap_mb * 1024 * 1024)
# Whether to perform input tensor CPU to GPU copies on a side-stream
self.use_side_stream_for_tensor_copies = (
os.environ.get("PYTORCH_DDP_USE_SIDE_STREAM", "1") == "1"
)
# TODO(wayi@): Remove this field since SPMD is no longer supported,
# and also remove all the relevant unnecessary loops.
# Module replication within process (single-process multi device)
self._module_copies = [self.module]
# Build parameters for reducer.
parameters, expect_sparse_gradient = self._build_params_for_reducer()
# Verify model equivalence.
dist._verify_model_across_ranks(self.process_group, parameters)
# Sync params and buffers. Ensures all DDP models start off at the same value.
self._sync_params_and_buffers(authoritative_rank=0)
# Builds reducer.
self._ddp_init_helper(parameters, expect_sparse_gradient)
def _sync_params_and_buffers(self, authoritative_rank=0):
module_states = []
for name, param in self.module.state_dict().items():
if name not in self.parameters_to_ignore:
module_states.append(param)
if len(module_states) > 0:
self._distributed_broadcast_coalesced(
module_states, self.broadcast_bucket_size, authoritative_rank
)
def _ddp_init_helper(self, parameters, expect_sparse_gradient):
# The bucket size limit is specified in the constructor.
# Additionally, we allow for a single small bucket for parameters
# that are defined first, such that their gradients don't spill into
bucket_indices = dist._compute_bucket_assignment_by_size(
parameters[0],
[dist._DEFAULT_FIRST_BUCKET_BYTES, self.bucket_bytes_cap],
expect_sparse_gradient[0],
)
self.reducer = dist.Reducer(
parameters,
list(reversed(bucket_indices)),
self.process_group,
expect_sparse_gradient,
self.bucket_bytes_cap,
self.find_unused_parameters,
self.gradient_as_bucket_view,
)
self.logger = dist.Logger(self.reducer)
self.logger.set_construction_data_and_log(
self.module.__class__.__name__,
[] if self.device_ids is None else self.device_ids,
-1 if self.output_device is None else self.output_device,
self.broadcast_buffers,
)
self._passing_sync_batchnorm_handle(self._module_copies)
def __getstate__(self):
self._check_default_group()
attrs = copy.copy(self.__dict__)
del attrs["process_group"]
del attrs["reducer"]
del attrs["logger"]
return attrs
def __setstate__(self, state):
self.process_group = _get_default_group()
super(DistributedDataParallel, self).__setstate__(state)
self.__dict__.setdefault("require_forward_param_sync", True)
self.__dict__.setdefault("require_backward_grad_sync", True)
parameters, expect_sparse_gradient = self._build_params_for_reducer()
self._ddp_init_helper(parameters, expect_sparse_gradient)
def _build_params_for_reducer(self):
modules_and_parameters = [
[
(module, parameter)
for module_name, module in replica.named_modules()
for parameter in [
param
for param_name, param in module.named_parameters(recurse=False)
if param.requires_grad
and f"{module_name}.{param_name}"
not in self.parameters_to_ignore
]
]
for replica in self._module_copies
]
memo = set()
modules_and_parameters = [
[(m, p) for m, p in replica_mps if p not in memo and not memo.add(p)]
for replica_mps in modules_and_parameters
]
# Build list of parameters.
parameters = [
list(parameter for _, parameter in replica)
for replica in modules_and_parameters
]
# Checks if a module will produce a sparse gradient.
def produces_sparse_gradient(module):
if isinstance(module, torch.nn.Embedding) or isinstance(
module, torch.nn.EmbeddingBag
):
return module.sparse
return False
# Build list of booleans indicating whether or not to expect sparse
# gradients for the corresponding parameters.
expect_sparse_gradient = [
list(produces_sparse_gradient(module) for module, _ in replica)
for replica in modules_and_parameters
]
# The following modules_params and modules_buffers are used for
# param/buffer sync in _sync_params.
self.modules_params = [
list(self._get_parameters(m)) for m in self._module_copies
]
# Collect buffers for modules, filtering out buffers that should be ignored.
named_module_buffers = [
[(buffer, buffer_name) for buffer_name, buffer in m.named_buffers()]
for m in self._module_copies
]
self.modules_buffers = [
[
buffer
for (buffer, buffer_name) in module_buffers
if buffer_name not in self.parameters_to_ignore
]
for module_buffers in named_module_buffers
]
return parameters, expect_sparse_gradient
def _get_parameters(self, m, recurse=True):
def model_parameters(m):
ps = (
m._former_parameters.values()
if hasattr(m, "_former_parameters")
else m.parameters(recurse=False)
)
for p in ps:
yield p
for m in m.modules() if recurse else [m]:
for p in model_parameters(m):
yield p
def _check_default_group(self):
pickle_not_supported = False
try:
if self.process_group != _get_default_group():
pickle_not_supported = True
except RuntimeError:
pickle_not_supported = True
if pickle_not_supported:
raise RuntimeError(
"DDP Pickling/Unpickling are only supported "
"when using DDP with the default process "
"group. That is, when you have called "
"init_process_group and have not passed "
"process_group argument to DDP constructor"
)
@contextmanager
def no_sync(self):
old_require_backward_grad_sync = self.require_backward_grad_sync
self.require_backward_grad_sync = False
try:
yield
finally:
self.require_backward_grad_sync = old_require_backward_grad_sync
def forward(self, *inputs, **kwargs):
self.reducer.save_thread_local_state()
if torch.is_grad_enabled() and self.require_backward_grad_sync:
self.logger.set_runtime_stats_and_log()
self.reducer.prepare_for_forward()
if self.ddp_uneven_inputs_config.ddp_join_enabled:
ones = torch.ones(1, device=self.device)
work = dist.all_reduce(ones, group=self.process_group, async_op=True)
self.reducer._set_forward_pass_work_handle(
work,
self.ddp_uneven_inputs_config.ddp_join_divide_by_initial_world_size,
)
# Calling _rebuild_buckets before forward compuation,
# It may allocate new buckets before deallocating old buckets
# inside _rebuild_buckets. To save peak memory usage,
# call _rebuild_buckets before the peak memory usage increases
# during forward computation.
# This should be called only once during whole training period.
if torch.is_grad_enabled() and self.reducer._rebuild_buckets():
logging.info("Reducer buckets have been rebuilt in this iteration.")
if self.require_forward_param_sync:
self._sync_params()
if self.ddp_uneven_inputs_config.ddp_join_enabled:
# Notify joined ranks whether they should sync in backwards pass or not.
self._check_global_requires_backward_grad_sync(is_joined_rank=False)
if self.device_ids:
inputs, kwargs = self.to_kwargs(inputs, kwargs, self.device_ids[0])
output = self.module(*inputs[0], **kwargs[0])
else:
output = self.module(*inputs, **kwargs)
if torch.is_grad_enabled() and self.require_backward_grad_sync:
self.require_forward_param_sync = True
# We'll return the output object verbatim since it is a freeform
if self.find_unused_parameters:
self.reducer.prepare_for_backward(list(_find_tensors(output)))
else:
self.reducer.prepare_for_backward([])
else:
self.require_forward_param_sync = False
return output
def scatter(self, inputs, kwargs, device_ids):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
def _recursive_to(self, inputs, target_gpu):
def to_map(obj):
if isinstance(obj, torch.Tensor):
if not self.use_side_stream_for_tensor_copies:
return (obj.to(target_gpu),)
else:
stream = _get_stream(target_gpu)
with torch.cuda.stream(stream):
output = obj.to(target_gpu)
with torch.cuda.device(target_gpu):
current_stream = torch.cuda.current_stream()
current_stream.wait_stream(stream)
output.record_stream(current_stream)
return (output,)
if is_namedtuple(obj):
return [type(obj)(*args) for args in zip(*map(to_map, obj))]
if isinstance(obj, tuple) and len(obj) > 0:
return list(zip(*map(to_map, obj)))
if isinstance(obj, list) and len(obj) > 0:
return [list(i) for i in zip(*map(to_map, obj))]
if isinstance(obj, dict) and len(obj) > 0:
return [type(obj)(i) for i in zip(*map(to_map, obj.items()))]
return [obj]
try:
res = to_map(inputs)
finally:
to_map = None
return res
def to_kwargs(self, inputs, kwargs, device_id):
inputs = self._recursive_to(inputs, device_id) if inputs else []
kwargs = self._recursive_to(kwargs, device_id) if kwargs else []
if len(inputs) < len(kwargs):
inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
elif len(kwargs) < len(inputs):
kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
inputs = tuple(inputs)
kwargs = tuple(kwargs)
return inputs, kwargs
def gather(self, outputs, output_device):
return gather(outputs, output_device, dim=self.dim)
def train(self, mode=True):
super(DistributedDataParallel, self).train(mode)
for module in self._module_copies[1:]:
module.train(mode)
return self
def _schedule_shadow_all_reduce_for_fwd_pass(self):
all_active_procs = torch.zeros(1, device=self.device)
dist.all_reduce(all_active_procs, group=self.process_group)
return all_active_procs.item()
def _check_global_requires_backward_grad_sync(self, is_joined_rank):
if not is_joined_rank and self.require_backward_grad_sync:
requires_sync_tensor = torch.ones(1, device=self.device)
else:
requires_sync_tensor = torch.zeros(1, device=self.device)
work = dist.all_reduce(
requires_sync_tensor, group=self.process_group, async_op=True
)
return work, requires_sync_tensor
def _check_and_sync_module_buffers(self):
if self.will_sync_module_buffers():
authoritative_rank = self._find_common_rank(self._distributed_rank, False)
self._distributed_broadcast_coalesced(
self.modules_buffers[0], self.broadcast_bucket_size, authoritative_rank
)
def _sync_final_model(self, is_last_joiner):
self._authoritative_rank = self._find_common_rank(
self._distributed_rank, is_last_joiner
)
self._sync_params_and_buffers(authoritative_rank=self._authoritative_rank)
# pass.
def _match_all_reduce_for_bwd_pass(self):
allreduce_work = []
# Schedule allreduce in the same order as Reducer schedules them, i.e.
# the order of the buckets. Retrieving the bucket order from the reducer
# ensures that we keep the same order in join mode, such as when bucket
# order is rebuilt dynamically.
all_bucket_tensors = self.reducer.get_bucket_tensors()
for bucket_tensors in all_bucket_tensors:
# Joined processes contribute zero gradient. In the case that
# divide_by_initial_world_size=True, we divide grads by the static
# world size, if not, the dividing factor is reduced by the number
# of joined processes.
zero_tensors = [torch.zeros_like(t) for t in bucket_tensors]
work = self.process_group.allreduce(zero_tensors)
allreduce_work.append(work)
for work in allreduce_work:
work.wait()
# Allreduces the used parameter mapping across ranks.
def _match_unused_params_allreduce(self):
locally_used_param_maps = self.reducer._get_local_used_maps()
self.process_group.allreduce(locally_used_param_maps)
@contextmanager
def join(self, divide_by_initial_world_size=True, enable=True):
# Log uneven input API usage.
self.logger._set_uneven_input_join()
try:
has_error = False
self.ddp_uneven_inputs_config = _DDPUnevenInputsConfig(
ddp_join_enabled=enable,
ddp_join_divide_by_initial_world_size=divide_by_initial_world_size,
)
yield
except Exception as e:
# Set to skip any processing in the finally block.
has_error = True
raise e
finally:
# Skip any processing to let the exception immediately be raised if
# there was one.
if enable and not has_error:
all_procs_joined = False
is_last_joiner = True
i = 0
WARN_THRESHOLD = 1000
warnings.simplefilter("once")
while not all_procs_joined:
if i > WARN_THRESHOLD:
my_rank = self._distributed_rank
warnings.warn(
"Detected uneven input skew of greater "
f"than {WARN_THRESHOLD}. This means that rank {my_rank} "
f"has at least {WARN_THRESHOLD} fewer inputs than "
"other currently active ranks. This level of skew could "
"lead to performance degradation during training."
)
# Schedules allreduce to match fwd pass allreduce in non-joined procs
num_active_procs = self._schedule_shadow_all_reduce_for_fwd_pass()
if num_active_procs == 0:
all_procs_joined = True
else:
# Some DDP process still needs to be joined.
if is_last_joiner:
is_last_joiner = False
# It will rebuild buckets only once during training period
self.reducer._rebuild_buckets()
# Schedule a corresponding broadcast if we are syncing module
# buffers in the forward pass.
self._check_and_sync_module_buffers()
(
work,
should_sync_backwards_tensor,
) = self._check_global_requires_backward_grad_sync(
is_joined_rank=True
)
work.wait()
# If nonzero, then we should sync in the bwd pass.
should_sync_backwards = should_sync_backwards_tensor.item() != 0
# Forward param sync is disabled in the next iteration
# if we are skipping grad sync this iteration. Hence, we
# set require_forward_param_sync appropriately here.
self.require_forward_param_sync = should_sync_backwards
if not should_sync_backwards:
continue
# Schedules one allreduce per gradient bucket to match
# the backwards pass allreduce.
self._match_all_reduce_for_bwd_pass()
# Check if we need to allreduce locally unused params.
if self.find_unused_parameters:
self._match_unused_params_allreduce()
# It will push rebuilt params only once during training period
self.reducer._push_all_rebuilt_params()
i += 1
# All procs joined. Agree on authoritative rank and broadcast the model.
self._sync_final_model(is_last_joiner)
def register_comm_hook(self, state: object, hook: callable):
self._check_comm_hook(hook)
self.logger._set_comm_hook_name(hook.__qualname__)
dist._register_comm_hook(self.reducer, state, hook)
def _register_builtin_comm_hook(self, comm_hook_type):
self.logger._set_comm_hook_name(str(comm_hook_type))
dist._register_builtin_comm_hook(self.reducer, comm_hook_type)
def _distributed_broadcast_coalesced(
self, tensors, buffer_size, authoritative_rank=0
):
dist._broadcast_coalesced(
self.process_group, tensors, buffer_size, authoritative_rank
)
def will_sync_module_buffers(self):
return (
self.require_forward_param_sync
and self.broadcast_buffers
and len(self.modules_buffers[0]) > 0
)
def _find_common_rank(self, input_rank, rank_cond):
# -1 indicates that this rank is not under consideration to be the
# common_rank
rank_to_use = torch.tensor(
[input_rank if rank_cond else -1],
device=self.device,
)
dist.all_reduce(rank_to_use, op=ReduceOp.MAX, group=self.process_group)
if rank_to_use.item() == -1:
raise ValueError(
"BUG! Expected rank_cond to be true for at least one process."
)
return rank_to_use.item()
def _sync_params(self):
with torch.no_grad():
# module buffer sync
if self.will_sync_module_buffers():
# Synchronize buffers across processes.
# If we are running DDP with the join manager, we have to agree
# upon a rank to sync module buffers from, since rank 0 may
# already have been joined and have stale module buffers.
if self.ddp_uneven_inputs_config.ddp_join_enabled:
authoritative_rank = self._find_common_rank(
self._distributed_rank, True
)
else:
# The process with rank 0 is considered the authoritative copy.
authoritative_rank = 0
self._distributed_broadcast_coalesced(
self.modules_buffers[0],
self.broadcast_bucket_size,
authoritative_rank,
)
def _passing_sync_batchnorm_handle(self, module_copies):
for dev_idx, module in enumerate(module_copies):
for layer in module.modules():
if isinstance(layer, torch.nn.modules.SyncBatchNorm):
assert (
self.device_type != "cpu"
), "SyncBatchNorm layers only work with GPU modules"
layer._specify_ddp_gpu_num(1)
def _check_comm_hook(self, hook):
if not callable(hook):
raise TypeError("Communication hook must be callable.")
sig = inspect.signature(hook)
if (
sig.parameters["bucket"].annotation != inspect._empty
and sig.parameters["bucket"].annotation != dist.GradBucket
):
raise ValueError(
"Communication hook: bucket annotation should be dist.GradBucket."
)
if sig.return_annotation != inspect._empty and (
sig.return_annotation != torch.futures.Future
and sig.return_annotation != torch._C.Future
):
raise ValueError(
"Communication hook: return annotation should be torch.futures.Future or torch._C.Future."
)
@property
def _distributed_rank(self):
return dist.get_rank(self.process_group)
@staticmethod
def _set_params_and_buffers_to_ignore_for_model(
module, params_and_buffers_to_ignore
):
# This is a workaround to set parameters and buffers DDP should ignore
# during synchronization. It will be removed when the API is finalized
# as part of addressing https://github.com/pytorch/pytorch/issues/43690.
module._ddp_params_and_buffers_to_ignore = params_and_buffers_to_ignore
def get_ddp_logging_data(self):
return self.logger._get_ddp_logging_data()
def set_ddp_runtime_logging_sample_rate(self, sample_rate):
if sample_rate < 1:
raise ValueError(
"DDP runtime logging sample rate should be equal or greater than 1"
)
self.reducer._set_ddp_runtime_logging_sample_rate(sample_rate)
| true | true |
f715a88fabf1954be8dfa7b40347f927f0d59c06 | 362 | py | Python | test.py | fahmirevo/sign-language-recognition | ff5e3f4ffb7ecba15667be8870db62717f1fab66 | [
"MIT"
] | null | null | null | test.py | fahmirevo/sign-language-recognition | ff5e3f4ffb7ecba15667be8870db62717f1fab66 | [
"MIT"
] | null | null | null | test.py | fahmirevo/sign-language-recognition | ff5e3f4ffb7ecba15667be8870db62717f1fab66 | [
"MIT"
] | null | null | null | from keras.models import load_model
import numpy as np
X = np.load("dataset/X_test.npy")
Y = np.load("dataset/Y_test.npy")
model = load_model("model")
score = model.evaluate(X, Y)
print(score[0], score[1])
# print(np.argmax(model.predict(X[:200]), axis=1))
# print(np.argmax(model.predict(X), axis=1) == np.argmax(Y, axis=1))
# print(model.predict(X[:50]))
| 22.625 | 68 | 0.685083 | from keras.models import load_model
import numpy as np
X = np.load("dataset/X_test.npy")
Y = np.load("dataset/Y_test.npy")
model = load_model("model")
score = model.evaluate(X, Y)
print(score[0], score[1])
| true | true |
f715aa3a3e29c2e7729e963647247ab81b1771d1 | 7,083 | py | Python | deepchem/molnet/load_function/factors_datasets.py | deloragaskins/deepchem | 234ab699cdb997e5963966a8b6926cb2cda7c064 | [
"MIT"
] | 3,782 | 2016-02-21T03:53:11.000Z | 2022-03-31T16:10:26.000Z | deepchem/molnet/load_function/factors_datasets.py | deloragaskins/deepchem | 234ab699cdb997e5963966a8b6926cb2cda7c064 | [
"MIT"
] | 2,666 | 2016-02-11T01:54:54.000Z | 2022-03-31T11:14:33.000Z | deepchem/molnet/load_function/factors_datasets.py | deloragaskins/deepchem | 234ab699cdb997e5963966a8b6926cb2cda7c064 | [
"MIT"
] | 1,597 | 2016-02-21T03:10:08.000Z | 2022-03-30T13:21:28.000Z | """
FACTOR dataset loader
"""
import os
import logging
import time
import numpy as np
import deepchem
from deepchem.molnet.load_function.kaggle_features import merck_descriptors
logger = logging.getLogger(__name__)
TRAIN_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/FACTORS_training_disguised_combined_full.csv.gz"
VALID_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/FACTORS_test1_disguised_combined_full.csv.gz"
TEST_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/FACTORS_test2_disguised_combined_full.csv.gz"
TRAIN_FILENAME = "FACTORS_training_disguised_combined_full.csv.gz"
VALID_FILENAME = "FACTORS_test1_disguised_combined_full.csv.gz"
TEST_FILENAME = "FACTORS_test2_disguised_combined_full.csv.gz"
def remove_missing_entries(dataset):
"""Remove missing entries.
Some of the datasets have missing entries that sneak in as zero'd out
feature vectors. Get rid of them.
"""
for i, (X, y, w, ids) in enumerate(dataset.itershards()):
available_rows = X.any(axis=1)
logger.info("Shard %d has %d missing entries." %
(i, np.count_nonzero(~available_rows)))
X = X[available_rows]
y = y[available_rows]
w = w[available_rows]
ids = ids[available_rows]
dataset.set_shard(i, X, y, w, ids)
def get_transformers(train_dataset):
"""Gets transformers applied to the dataset"""
transformers = list()
# TODO: Check if anything needs to be added
return transformers
def gen_factors(FACTORS_tasks,
data_dir,
train_dir,
valid_dir,
test_dir,
shard_size=2000):
"""Loads the FACTORS dataset; does not do train/test split"""
time1 = time.time()
train_files = os.path.join(data_dir, TRAIN_FILENAME)
valid_files = os.path.join(data_dir, VALID_FILENAME)
test_files = os.path.join(data_dir, TEST_FILENAME)
if not os.path.exists(train_files):
logger.info("Downloading train file...")
deepchem.utils.data_utils.download_url(url=TRAIN_URL, dest_dir=data_dir)
logger.info("Training file download complete.")
logger.info("Downloading validation file...")
deepchem.utils.data_utils.download_url(url=VALID_URL, dest_dir=data_dir)
logger.info("Validation file download complete.")
logger.info("Downloading test file...")
deepchem.utils.data_utils.download_url(url=TEST_URL, dest_dir=data_dir)
logger.info("Test file download complete")
# Featurize the FACTORS dataset
logger.info("About to featurize the FACTORS dataset")
featurizer = deepchem.feat.UserDefinedFeaturizer(merck_descriptors)
loader = deepchem.data.UserCSVLoader(
tasks=FACTORS_tasks, id_field="Molecule", featurizer=featurizer)
logger.info("Featurizing the train dataset...")
train_dataset = loader.featurize(train_files, shard_size=shard_size)
logger.info("Featurizing the validation dataset...")
valid_dataset = loader.featurize(valid_files, shard_size=shard_size)
logger.info("Featurizing the test dataset...")
test_dataset = loader.featurize(test_files, shard_size=shard_size)
logger.info("Remove missing entries from dataset")
remove_missing_entries(train_dataset)
remove_missing_entries(valid_dataset)
remove_missing_entries(test_dataset)
# Shuffle the training data
logger.info("Shuffling the training dataset")
train_dataset.sparse_shuffle()
# Apply transformations
logger.info("Transforming datasets with transformers")
transformers = get_transformers(train_dataset)
for transformer in transformers:
logger.info("Performing transformations with {}".format(
transformer.__class__.__name__))
logger.info("Transforming the training dataset...")
train_dataset = transformer.transform(train_dataset)
logger.info("Transforming the validation dataset...")
valid_dataset = transformer.transform(valid_dataset)
logger.info("Transforming the test dataset...")
test_dataset = transformer.transform(test_dataset)
logger.info("Transformations complete.")
logger.info("Moving datasets to corresponding directories")
train_dataset.move(train_dir)
logger.info("Train dataset moved.")
valid_dataset.move(valid_dir)
logger.info("Validation dataset moved.")
test_dataset.move(test_dir)
logger.info("Test dataset moved.")
time2 = time.time()
# TIMING
logger.info("TIMING: FACTORS fitting took %0.3f s" % (time2 - time1))
return train_dataset, valid_dataset, test_dataset
def load_factors(shard_size=2000, featurizer=None, split=None, reload=True):
"""Loads FACTOR dataset; does not do train/test split
The Factors dataset is an in-house dataset from Merck that was first introduced in the following paper:
Ramsundar, Bharath, et al. "Is multitask deep learning practical for pharma?." Journal of chemical information and modeling 57.8 (2017): 2068-2076.
It contains 1500 Merck in-house compounds that were measured
for IC50 of inhibition on 12 serine proteases. Unlike most of
the other datasets featured in MoleculeNet, the Factors
collection does not have structures for the compounds tested
since they were proprietary Merck compounds. However, the
collection does feature pre-computed descriptors for these
compounds.
Note that the original train/valid/test split from the source
data was preserved here, so this function doesn't allow for
alternate modes of splitting. Similarly, since the source data
came pre-featurized, it is not possible to apply alternative
featurizations.
Parameters
----------
shard_size: int, optional
Size of the DiskDataset shards to write on disk
featurizer: optional
Ignored since featurization pre-computed
split: optional
Ignored since split pre-computed
reload: bool, optional
Whether to automatically re-load from disk
"""
FACTORS_tasks = [
'T_00001', 'T_00002', 'T_00003', 'T_00004', 'T_00005', 'T_00006',
'T_00007', 'T_00008', 'T_00009', 'T_00010', 'T_00011', 'T_00012'
]
data_dir = deepchem.utils.data_utils.get_data_dir()
data_dir = os.path.join(data_dir, "factors")
if not os.path.exists(data_dir):
os.mkdir(data_dir)
train_dir = os.path.join(data_dir, "train_dir")
valid_dir = os.path.join(data_dir, "valid_dir")
test_dir = os.path.join(data_dir, "test_dir")
if (os.path.exists(train_dir) and os.path.exists(valid_dir) and
os.path.exists(test_dir)):
logger.info("Reloading existing datasets")
train_dataset = deepchem.data.DiskDataset(train_dir)
valid_dataset = deepchem.data.DiskDataset(valid_dir)
test_dataset = deepchem.data.DiskDataset(test_dir)
else:
logger.info("Featurizing datasets")
train_dataset, valid_dataset, test_dataset = gen_factors(
FACTORS_tasks=FACTORS_tasks,
data_dir=data_dir,
train_dir=train_dir,
valid_dir=valid_dir,
test_dir=test_dir,
shard_size=shard_size)
transformers = get_transformers(train_dataset)
return FACTORS_tasks, (train_dataset, valid_dataset,
test_dataset), transformers
| 34.217391 | 149 | 0.743188 | import os
import logging
import time
import numpy as np
import deepchem
from deepchem.molnet.load_function.kaggle_features import merck_descriptors
logger = logging.getLogger(__name__)
TRAIN_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/FACTORS_training_disguised_combined_full.csv.gz"
VALID_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/FACTORS_test1_disguised_combined_full.csv.gz"
TEST_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/FACTORS_test2_disguised_combined_full.csv.gz"
TRAIN_FILENAME = "FACTORS_training_disguised_combined_full.csv.gz"
VALID_FILENAME = "FACTORS_test1_disguised_combined_full.csv.gz"
TEST_FILENAME = "FACTORS_test2_disguised_combined_full.csv.gz"
def remove_missing_entries(dataset):
for i, (X, y, w, ids) in enumerate(dataset.itershards()):
available_rows = X.any(axis=1)
logger.info("Shard %d has %d missing entries." %
(i, np.count_nonzero(~available_rows)))
X = X[available_rows]
y = y[available_rows]
w = w[available_rows]
ids = ids[available_rows]
dataset.set_shard(i, X, y, w, ids)
def get_transformers(train_dataset):
transformers = list()
return transformers
def gen_factors(FACTORS_tasks,
data_dir,
train_dir,
valid_dir,
test_dir,
shard_size=2000):
time1 = time.time()
train_files = os.path.join(data_dir, TRAIN_FILENAME)
valid_files = os.path.join(data_dir, VALID_FILENAME)
test_files = os.path.join(data_dir, TEST_FILENAME)
if not os.path.exists(train_files):
logger.info("Downloading train file...")
deepchem.utils.data_utils.download_url(url=TRAIN_URL, dest_dir=data_dir)
logger.info("Training file download complete.")
logger.info("Downloading validation file...")
deepchem.utils.data_utils.download_url(url=VALID_URL, dest_dir=data_dir)
logger.info("Validation file download complete.")
logger.info("Downloading test file...")
deepchem.utils.data_utils.download_url(url=TEST_URL, dest_dir=data_dir)
logger.info("Test file download complete")
logger.info("About to featurize the FACTORS dataset")
featurizer = deepchem.feat.UserDefinedFeaturizer(merck_descriptors)
loader = deepchem.data.UserCSVLoader(
tasks=FACTORS_tasks, id_field="Molecule", featurizer=featurizer)
logger.info("Featurizing the train dataset...")
train_dataset = loader.featurize(train_files, shard_size=shard_size)
logger.info("Featurizing the validation dataset...")
valid_dataset = loader.featurize(valid_files, shard_size=shard_size)
logger.info("Featurizing the test dataset...")
test_dataset = loader.featurize(test_files, shard_size=shard_size)
logger.info("Remove missing entries from dataset")
remove_missing_entries(train_dataset)
remove_missing_entries(valid_dataset)
remove_missing_entries(test_dataset)
logger.info("Shuffling the training dataset")
train_dataset.sparse_shuffle()
logger.info("Transforming datasets with transformers")
transformers = get_transformers(train_dataset)
for transformer in transformers:
logger.info("Performing transformations with {}".format(
transformer.__class__.__name__))
logger.info("Transforming the training dataset...")
train_dataset = transformer.transform(train_dataset)
logger.info("Transforming the validation dataset...")
valid_dataset = transformer.transform(valid_dataset)
logger.info("Transforming the test dataset...")
test_dataset = transformer.transform(test_dataset)
logger.info("Transformations complete.")
logger.info("Moving datasets to corresponding directories")
train_dataset.move(train_dir)
logger.info("Train dataset moved.")
valid_dataset.move(valid_dir)
logger.info("Validation dataset moved.")
test_dataset.move(test_dir)
logger.info("Test dataset moved.")
time2 = time.time()
logger.info("TIMING: FACTORS fitting took %0.3f s" % (time2 - time1))
return train_dataset, valid_dataset, test_dataset
def load_factors(shard_size=2000, featurizer=None, split=None, reload=True):
FACTORS_tasks = [
'T_00001', 'T_00002', 'T_00003', 'T_00004', 'T_00005', 'T_00006',
'T_00007', 'T_00008', 'T_00009', 'T_00010', 'T_00011', 'T_00012'
]
data_dir = deepchem.utils.data_utils.get_data_dir()
data_dir = os.path.join(data_dir, "factors")
if not os.path.exists(data_dir):
os.mkdir(data_dir)
train_dir = os.path.join(data_dir, "train_dir")
valid_dir = os.path.join(data_dir, "valid_dir")
test_dir = os.path.join(data_dir, "test_dir")
if (os.path.exists(train_dir) and os.path.exists(valid_dir) and
os.path.exists(test_dir)):
logger.info("Reloading existing datasets")
train_dataset = deepchem.data.DiskDataset(train_dir)
valid_dataset = deepchem.data.DiskDataset(valid_dir)
test_dataset = deepchem.data.DiskDataset(test_dir)
else:
logger.info("Featurizing datasets")
train_dataset, valid_dataset, test_dataset = gen_factors(
FACTORS_tasks=FACTORS_tasks,
data_dir=data_dir,
train_dir=train_dir,
valid_dir=valid_dir,
test_dir=test_dir,
shard_size=shard_size)
transformers = get_transformers(train_dataset)
return FACTORS_tasks, (train_dataset, valid_dataset,
test_dataset), transformers
| true | true |
f715aab0451804e3126d9a43d6e5f34e22e7a392 | 15,596 | py | Python | ch16-deployment/.venv/lib/python3.10/site-packages/psycopg/sql.py | wsvincent/djangoforbeginners_32 | aba7c99aa6050cfe8fb9d588af58c9f67411ae8a | [
"MIT"
] | 5 | 2021-12-14T03:33:39.000Z | 2022-01-11T14:13:21.000Z | ch16-deployment/.venv/lib/python3.10/site-packages/psycopg/sql.py | wsvincent/djangoforbeginners_32 | aba7c99aa6050cfe8fb9d588af58c9f67411ae8a | [
"MIT"
] | null | null | null | ch16-deployment/.venv/lib/python3.10/site-packages/psycopg/sql.py | wsvincent/djangoforbeginners_32 | aba7c99aa6050cfe8fb9d588af58c9f67411ae8a | [
"MIT"
] | null | null | null | """
SQL composition utility module
"""
# Copyright (C) 2020-2021 The Psycopg Team
import codecs
import string
from abc import ABC, abstractmethod
from typing import Any, Iterator, List, Optional, Sequence, Union
from .pq import Escaping
from .abc import AdaptContext
from .adapt import Transformer, PyFormat
from ._encodings import pgconn_encoding
def quote(obj: Any, context: Optional[AdaptContext] = None) -> str:
"""
Adapt a Python object to a quoted SQL string.
Use this function only if you absolutely want to convert a Python string to
an SQL quoted literal to use e.g. to generate batch SQL and you won't have
a connection avaliable when you will need to use it.
This function is relatively inefficient, because it doesn't cache the
adaptation rules. If you pass a *context* you can adapt the adaptation
rules used, otherwise only global rules are used.
"""
return Literal(obj).as_string(context)
class Composable(ABC):
"""
Abstract base class for objects that can be used to compose an SQL string.
`!Composable` objects can be passed directly to
`~psycopg.Cursor.execute()`, `~psycopg.Cursor.executemany()`,
`~psycopg.Cursor.copy()` in place of the query string.
`!Composable` objects can be joined using the ``+`` operator: the result
will be a `Composed` instance containing the objects joined. The operator
``*`` is also supported with an integer argument: the result is a
`!Composed` instance containing the left argument repeated as many times as
requested.
"""
def __init__(self, obj: Any):
self._obj = obj
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self._obj!r})"
@abstractmethod
def as_bytes(self, context: Optional[AdaptContext]) -> bytes:
"""
Return the value of the object as bytes.
:param context: the context to evaluate the object into.
:type context: `connection` or `cursor`
The method is automatically invoked by `~psycopg.Cursor.execute()`,
`~psycopg.Cursor.executemany()`, `~psycopg.Cursor.copy()` if a
`!Composable` is passed instead of the query string.
"""
raise NotImplementedError
def as_string(self, context: Optional[AdaptContext]) -> str:
"""
Return the value of the object as string.
:param context: the context to evaluate the string into.
:type context: `connection` or `cursor`
"""
conn = context.connection if context else None
enc = pgconn_encoding(conn.pgconn) if conn else "utf-8"
b = self.as_bytes(context)
if isinstance(b, bytes):
return b.decode(enc)
else:
# buffer object
return codecs.lookup(enc).decode(b)[0]
def __add__(self, other: "Composable") -> "Composed":
if isinstance(other, Composed):
return Composed([self]) + other
if isinstance(other, Composable):
return Composed([self]) + Composed([other])
else:
return NotImplemented
def __mul__(self, n: int) -> "Composed":
return Composed([self] * n)
def __eq__(self, other: Any) -> bool:
return type(self) is type(other) and self._obj == other._obj
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
class Composed(Composable):
"""
A `Composable` object made of a sequence of `!Composable`.
The object is usually created using `!Composable` operators and methods.
However it is possible to create a `!Composed` directly specifying a
sequence of objects as arguments: if they are not `!Composable` they will
be wrapped in a `Literal`.
Example::
>>> comp = sql.Composed(
... [sql.SQL("INSERT INTO "), sql.Identifier("table")])
>>> print(comp.as_string(conn))
INSERT INTO "table"
`!Composed` objects are iterable (so they can be used in `SQL.join` for
instance).
"""
_obj: List[Composable]
def __init__(self, seq: Sequence[Any]):
seq = [
obj if isinstance(obj, Composable) else Literal(obj) for obj in seq
]
super().__init__(seq)
def as_bytes(self, context: Optional[AdaptContext]) -> bytes:
return b"".join(obj.as_bytes(context) for obj in self._obj)
def __iter__(self) -> Iterator[Composable]:
return iter(self._obj)
def __add__(self, other: Composable) -> "Composed":
if isinstance(other, Composed):
return Composed(self._obj + other._obj)
if isinstance(other, Composable):
return Composed(self._obj + [other])
else:
return NotImplemented
def join(self, joiner: Union["SQL", str]) -> "Composed":
"""
Return a new `!Composed` interposing the *joiner* with the `!Composed` items.
The *joiner* must be a `SQL` or a string which will be interpreted as
an `SQL`.
Example::
>>> fields = sql.Identifier('foo') + sql.Identifier('bar') # a Composed
>>> print(fields.join(', ').as_string(conn))
"foo", "bar"
"""
if isinstance(joiner, str):
joiner = SQL(joiner)
elif not isinstance(joiner, SQL):
raise TypeError(
f"Composed.join() argument must be strings or SQL,"
f" got {joiner!r} instead"
)
return joiner.join(self._obj)
class SQL(Composable):
"""
A `Composable` representing a snippet of SQL statement.
`!SQL` exposes `join()` and `format()` methods useful to create a template
where to merge variable parts of a query (for instance field or table
names).
The *string* doesn't undergo any form of escaping, so it is not suitable to
represent variable identifiers or values: you should only use it to pass
constant strings representing templates or snippets of SQL statements; use
other objects such as `Identifier` or `Literal` to represent variable
parts.
Example::
>>> query = sql.SQL("SELECT {0} FROM {1}").format(
... sql.SQL(', ').join([sql.Identifier('foo'), sql.Identifier('bar')]),
... sql.Identifier('table'))
>>> print(query.as_string(conn))
SELECT "foo", "bar" FROM "table"
"""
_obj: str
_formatter = string.Formatter()
def __init__(self, obj: str):
super().__init__(obj)
if not isinstance(obj, str):
raise TypeError(f"SQL values must be strings, got {obj!r} instead")
def as_string(self, context: Optional[AdaptContext]) -> str:
return self._obj
def as_bytes(self, context: Optional[AdaptContext]) -> bytes:
enc = "utf-8"
if context:
conn = context.connection
if conn:
enc = pgconn_encoding(conn.pgconn)
return self._obj.encode(enc)
def format(self, *args: Any, **kwargs: Any) -> Composed:
"""
Merge `Composable` objects into a template.
:param args: parameters to replace to numbered (``{0}``, ``{1}``) or
auto-numbered (``{}``) placeholders
:param kwargs: parameters to replace to named (``{name}``) placeholders
:return: the union of the `!SQL` string with placeholders replaced
:rtype: `Composed`
The method is similar to the Python `str.format()` method: the string
template supports auto-numbered (``{}``), numbered (``{0}``,
``{1}``...), and named placeholders (``{name}``), with positional
arguments replacing the numbered placeholders and keywords replacing
the named ones. However placeholder modifiers (``{0!r}``, ``{0:<10}``)
are not supported.
If a `!Composable` objects is passed to the template it will be merged
according to its `as_string()` method. If any other Python object is
passed, it will be wrapped in a `Literal` object and so escaped
according to SQL rules.
Example::
>>> print(sql.SQL("SELECT * FROM {} WHERE {} = %s")
... .format(sql.Identifier('people'), sql.Identifier('id'))
... .as_string(conn))
SELECT * FROM "people" WHERE "id" = %s
>>> print(sql.SQL("SELECT * FROM {tbl} WHERE name = {name}")
... .format(tbl=sql.Identifier('people'), name="O'Rourke"))
... .as_string(conn))
SELECT * FROM "people" WHERE name = 'O''Rourke'
"""
rv: List[Composable] = []
autonum: Optional[int] = 0
for pre, name, spec, conv in self._formatter.parse(self._obj):
if spec:
raise ValueError("no format specification supported by SQL")
if conv:
raise ValueError("no format conversion supported by SQL")
if pre:
rv.append(SQL(pre))
if name is None:
continue
if name.isdigit():
if autonum:
raise ValueError(
"cannot switch from automatic field numbering to manual"
)
rv.append(args[int(name)])
autonum = None
elif not name:
if autonum is None:
raise ValueError(
"cannot switch from manual field numbering to automatic"
)
rv.append(args[autonum])
autonum += 1
else:
rv.append(kwargs[name])
return Composed(rv)
def join(self, seq: Sequence[Composable]) -> Composed:
"""
Join a sequence of `Composable`.
:param seq: the elements to join.
:type seq: iterable of `!Composable`
Use the `!SQL` object's *string* to separate the elements in *seq*.
Note that `Composed` objects are iterable too, so they can be used as
argument for this method.
Example::
>>> snip = sql.SQL(', ').join(
... sql.Identifier(n) for n in ['foo', 'bar', 'baz'])
>>> print(snip.as_string(conn))
"foo", "bar", "baz"
"""
rv = []
it = iter(seq)
try:
rv.append(next(it))
except StopIteration:
pass
else:
for i in it:
rv.append(self)
rv.append(i)
return Composed(rv)
class Identifier(Composable):
"""
A `Composable` representing an SQL identifier or a dot-separated sequence.
Identifiers usually represent names of database objects, such as tables or
fields. PostgreSQL identifiers follow `different rules`__ than SQL string
literals for escaping (e.g. they use double quotes instead of single).
.. __: https://www.postgresql.org/docs/current/sql-syntax-lexical.html# \
SQL-SYNTAX-IDENTIFIERS
Example::
>>> t1 = sql.Identifier("foo")
>>> t2 = sql.Identifier("ba'r")
>>> t3 = sql.Identifier('ba"z')
>>> print(sql.SQL(', ').join([t1, t2, t3]).as_string(conn))
"foo", "ba'r", "ba""z"
Multiple strings can be passed to the object to represent a qualified name,
i.e. a dot-separated sequence of identifiers.
Example::
>>> query = sql.SQL("SELECT {} FROM {}").format(
... sql.Identifier("table", "field"),
... sql.Identifier("schema", "table"))
>>> print(query.as_string(conn))
SELECT "table"."field" FROM "schema"."table"
"""
_obj: Sequence[str]
def __init__(self, *strings: str):
# init super() now to make the __repr__ not explode in case of error
super().__init__(strings)
if not strings:
raise TypeError("Identifier cannot be empty")
for s in strings:
if not isinstance(s, str):
raise TypeError(
f"SQL identifier parts must be strings, got {s!r} instead"
)
def __repr__(self) -> str:
return f"{self.__class__.__name__}({', '.join(map(repr, self._obj))})"
def as_bytes(self, context: Optional[AdaptContext]) -> bytes:
conn = context.connection if context else None
if not conn:
raise ValueError("a connection is necessary for Identifier")
esc = Escaping(conn.pgconn)
enc = pgconn_encoding(conn.pgconn)
escs = [esc.escape_identifier(s.encode(enc)) for s in self._obj]
return b".".join(escs)
class Literal(Composable):
"""
A `Composable` representing an SQL value to include in a query.
Usually you will want to include placeholders in the query and pass values
as `~cursor.execute()` arguments. If however you really really need to
include a literal value in the query you can use this object.
The string returned by `!as_string()` follows the normal :ref:`adaptation
rules <types-adaptation>` for Python objects.
Example::
>>> s1 = sql.Literal("foo")
>>> s2 = sql.Literal("ba'r")
>>> s3 = sql.Literal(42)
>>> print(sql.SQL(', ').join([s1, s2, s3]).as_string(conn))
'foo', 'ba''r', 42
"""
def as_bytes(self, context: Optional[AdaptContext]) -> bytes:
tx = Transformer(context)
dumper = tx.get_dumper(self._obj, PyFormat.TEXT)
return dumper.quote(self._obj)
class Placeholder(Composable):
"""A `Composable` representing a placeholder for query parameters.
If the name is specified, generate a named placeholder (e.g. ``%(name)s``,
``%(name)b``), otherwise generate a positional placeholder (e.g. ``%s``,
``%b``).
The object is useful to generate SQL queries with a variable number of
arguments.
Examples::
>>> names = ['foo', 'bar', 'baz']
>>> q1 = sql.SQL("INSERT INTO my_table ({}) VALUES ({})").format(
... sql.SQL(', ').join(map(sql.Identifier, names)),
... sql.SQL(', ').join(sql.Placeholder() * len(names)))
>>> print(q1.as_string(conn))
INSERT INTO my_table ("foo", "bar", "baz") VALUES (%s, %s, %s)
>>> q2 = sql.SQL("INSERT INTO my_table ({}) VALUES ({})").format(
... sql.SQL(', ').join(map(sql.Identifier, names)),
... sql.SQL(', ').join(map(sql.Placeholder, names)))
>>> print(q2.as_string(conn))
INSERT INTO my_table ("foo", "bar", "baz") VALUES (%(foo)s, %(bar)s, %(baz)s)
"""
def __init__(self, name: str = "", format: PyFormat = PyFormat.AUTO):
super().__init__(name)
if not isinstance(name, str):
raise TypeError(f"expected string as name, got {name!r}")
if ")" in name:
raise ValueError(f"invalid name: {name!r}")
self._format = format
def __repr__(self) -> str:
parts = []
if self._obj:
parts.append(repr(self._obj))
if self._format != PyFormat.AUTO:
parts.append(f"format={PyFormat(self._format).name}")
return f"{self.__class__.__name__}({', '.join(parts)})"
def as_string(self, context: Optional[AdaptContext]) -> str:
code = self._format
return f"%({self._obj}){code}" if self._obj else f"%{code}"
def as_bytes(self, context: Optional[AdaptContext]) -> bytes:
conn = context.connection if context else None
enc = pgconn_encoding(conn.pgconn) if conn else "utf-8"
return self.as_string(context).encode(enc)
# Literals
NULL = SQL("NULL")
DEFAULT = SQL("DEFAULT")
| 33.757576 | 85 | 0.591883 |
import codecs
import string
from abc import ABC, abstractmethod
from typing import Any, Iterator, List, Optional, Sequence, Union
from .pq import Escaping
from .abc import AdaptContext
from .adapt import Transformer, PyFormat
from ._encodings import pgconn_encoding
def quote(obj: Any, context: Optional[AdaptContext] = None) -> str:
return Literal(obj).as_string(context)
class Composable(ABC):
def __init__(self, obj: Any):
self._obj = obj
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self._obj!r})"
@abstractmethod
def as_bytes(self, context: Optional[AdaptContext]) -> bytes:
raise NotImplementedError
def as_string(self, context: Optional[AdaptContext]) -> str:
conn = context.connection if context else None
enc = pgconn_encoding(conn.pgconn) if conn else "utf-8"
b = self.as_bytes(context)
if isinstance(b, bytes):
return b.decode(enc)
else:
return codecs.lookup(enc).decode(b)[0]
def __add__(self, other: "Composable") -> "Composed":
if isinstance(other, Composed):
return Composed([self]) + other
if isinstance(other, Composable):
return Composed([self]) + Composed([other])
else:
return NotImplemented
def __mul__(self, n: int) -> "Composed":
return Composed([self] * n)
def __eq__(self, other: Any) -> bool:
return type(self) is type(other) and self._obj == other._obj
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
class Composed(Composable):
_obj: List[Composable]
def __init__(self, seq: Sequence[Any]):
seq = [
obj if isinstance(obj, Composable) else Literal(obj) for obj in seq
]
super().__init__(seq)
def as_bytes(self, context: Optional[AdaptContext]) -> bytes:
return b"".join(obj.as_bytes(context) for obj in self._obj)
def __iter__(self) -> Iterator[Composable]:
return iter(self._obj)
def __add__(self, other: Composable) -> "Composed":
if isinstance(other, Composed):
return Composed(self._obj + other._obj)
if isinstance(other, Composable):
return Composed(self._obj + [other])
else:
return NotImplemented
def join(self, joiner: Union["SQL", str]) -> "Composed":
if isinstance(joiner, str):
joiner = SQL(joiner)
elif not isinstance(joiner, SQL):
raise TypeError(
f"Composed.join() argument must be strings or SQL,"
f" got {joiner!r} instead"
)
return joiner.join(self._obj)
class SQL(Composable):
_obj: str
_formatter = string.Formatter()
def __init__(self, obj: str):
super().__init__(obj)
if not isinstance(obj, str):
raise TypeError(f"SQL values must be strings, got {obj!r} instead")
def as_string(self, context: Optional[AdaptContext]) -> str:
return self._obj
def as_bytes(self, context: Optional[AdaptContext]) -> bytes:
enc = "utf-8"
if context:
conn = context.connection
if conn:
enc = pgconn_encoding(conn.pgconn)
return self._obj.encode(enc)
def format(self, *args: Any, **kwargs: Any) -> Composed:
rv: List[Composable] = []
autonum: Optional[int] = 0
for pre, name, spec, conv in self._formatter.parse(self._obj):
if spec:
raise ValueError("no format specification supported by SQL")
if conv:
raise ValueError("no format conversion supported by SQL")
if pre:
rv.append(SQL(pre))
if name is None:
continue
if name.isdigit():
if autonum:
raise ValueError(
"cannot switch from automatic field numbering to manual"
)
rv.append(args[int(name)])
autonum = None
elif not name:
if autonum is None:
raise ValueError(
"cannot switch from manual field numbering to automatic"
)
rv.append(args[autonum])
autonum += 1
else:
rv.append(kwargs[name])
return Composed(rv)
def join(self, seq: Sequence[Composable]) -> Composed:
rv = []
it = iter(seq)
try:
rv.append(next(it))
except StopIteration:
pass
else:
for i in it:
rv.append(self)
rv.append(i)
return Composed(rv)
class Identifier(Composable):
_obj: Sequence[str]
def __init__(self, *strings: str):
super().__init__(strings)
if not strings:
raise TypeError("Identifier cannot be empty")
for s in strings:
if not isinstance(s, str):
raise TypeError(
f"SQL identifier parts must be strings, got {s!r} instead"
)
def __repr__(self) -> str:
return f"{self.__class__.__name__}({', '.join(map(repr, self._obj))})"
def as_bytes(self, context: Optional[AdaptContext]) -> bytes:
conn = context.connection if context else None
if not conn:
raise ValueError("a connection is necessary for Identifier")
esc = Escaping(conn.pgconn)
enc = pgconn_encoding(conn.pgconn)
escs = [esc.escape_identifier(s.encode(enc)) for s in self._obj]
return b".".join(escs)
class Literal(Composable):
def as_bytes(self, context: Optional[AdaptContext]) -> bytes:
tx = Transformer(context)
dumper = tx.get_dumper(self._obj, PyFormat.TEXT)
return dumper.quote(self._obj)
class Placeholder(Composable):
def __init__(self, name: str = "", format: PyFormat = PyFormat.AUTO):
super().__init__(name)
if not isinstance(name, str):
raise TypeError(f"expected string as name, got {name!r}")
if ")" in name:
raise ValueError(f"invalid name: {name!r}")
self._format = format
def __repr__(self) -> str:
parts = []
if self._obj:
parts.append(repr(self._obj))
if self._format != PyFormat.AUTO:
parts.append(f"format={PyFormat(self._format).name}")
return f"{self.__class__.__name__}({', '.join(parts)})"
def as_string(self, context: Optional[AdaptContext]) -> str:
code = self._format
return f"%({self._obj}){code}" if self._obj else f"%{code}"
def as_bytes(self, context: Optional[AdaptContext]) -> bytes:
conn = context.connection if context else None
enc = pgconn_encoding(conn.pgconn) if conn else "utf-8"
return self.as_string(context).encode(enc)
NULL = SQL("NULL")
DEFAULT = SQL("DEFAULT")
| true | true |
f715ab79d63a14aca43b177b0113ad356a236fd3 | 1,008 | py | Python | stubs.min/System/Windows/Forms/__init___parts/ToolStripItemAlignment.py | ricardyn/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | 1 | 2021-02-02T13:39:16.000Z | 2021-02-02T13:39:16.000Z | stubs.min/System/Windows/Forms/__init___parts/ToolStripItemAlignment.py | hdm-dt-fb/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | null | null | null | stubs.min/System/Windows/Forms/__init___parts/ToolStripItemAlignment.py | hdm-dt-fb/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | null | null | null | class ToolStripItemAlignment(Enum,IComparable,IFormattable,IConvertible):
"""
Determines the alignment of a System.Windows.Forms.ToolStripItem in a System.Windows.Forms.ToolStrip.
enum ToolStripItemAlignment,values: Left (0),Right (1)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
Left=None
Right=None
value__=None
| 29.647059 | 215 | 0.675595 | class ToolStripItemAlignment(Enum,IComparable,IFormattable,IConvertible):
pass
""" __format__(formattable: IFormattable,format: str) -> str """
pass
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
Left=None
Right=None
value__=None
| true | true |
f715ab87bac08f07d5539e7b64cc21481970b063 | 8,682 | py | Python | gitinfo/utils.py | Secozzi/gitinfo | 4d218c724f5533f4bfc3f1e6ceb30cd78392eae6 | [
"MIT"
] | null | null | null | gitinfo/utils.py | Secozzi/gitinfo | 4d218c724f5533f4bfc3f1e6ceb30cd78392eae6 | [
"MIT"
] | null | null | null | gitinfo/utils.py | Secozzi/gitinfo | 4d218c724f5533f4bfc3f1e6ceb30cd78392eae6 | [
"MIT"
] | null | null | null | from __future__ import annotations
from anytree import NodeMixin
from datetime import datetime, timezone
from dotenv import load_dotenv
from os import environ
from os.path import join, dirname
from typing import Tuple, List, Any, Dict, Optional
import re
import requests
from rich.box import Box
__all__ = [
"get_data", "get_token", "get_url_info", "human_size", "humanize_time",
"populate_tree", "ROUNDED_BORDER", "run_query", "set_token", "sort_entries"
]
ROUNDED_BORDER: Box = Box(
"""\
╭──╮
│ │
│ │
│ │
│ │
│ │
│ │
╰──╯
"""
)
def get_token() -> str:
"""
Retrieves the Github Personal Access Token from .env file
"""
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
return environ.get("GITSORT_TOKEN")
def set_token(token: str) -> None:
"""
Set your Github personal access token in order to access
private repositories and extend the usage of the GraphQL API.
"""
import os
from dotenv import load_dotenv
from os.path import join, dirname
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
gitsort_token = os.environ.get("GITSORT_TOKEN")
if not gitsort_token:
with open(dotenv_path, "w") as f:
f.write(f"GITSORT_TOKEN={token}")
print("Github Token set!")
else:
inp = input("Github token already set! Do you want to update it? [y/n] ").lower()
while inp not in ["y", "n"]:
print("Invalid answer")
inp = input("Github token already set! Do you want to update it? [y/n] ").lower()
if inp == "y":
with open(dotenv_path, "w") as f:
f.write(f"GITSORT_TOKEN={token}")
print("Github Token updated!")
def run_query(
query: str,
token: str,
variables: dict | None = None,
headers: dict | None = None
) -> Tuple[dict, str]:
"""
Runs a Github GraphQL query and returns the result
:param query: str
GraphQL query
:param token: str
The users Github Personal Access Token
:param variables: dict
GraphQL Variables
:param headers: dict
Request headers
:return: tuple
The response and rate limit
"""
if not headers:
headers = {"Authorization": f"Bearer {token}"}
request = requests.post(
'https://api.github.com/graphql',
json={'query': query, 'variables': variables},
headers=headers
)
if request.status_code == 200:
return request.json(), request.headers["X-RateLimit-Remaining"]
else:
raise Exception("Query failed to run by returning code of {}. {}".format(request.status_code, query))
def get_data(
query: str,
token: str,
query_variables: Dict[str, str]
) -> Tuple[bool, Any, str]:
"""
Get data from query
:param query: str
Graphql Query
:param token: str
Github Personal Access Token
:param query_variables: dict
Variables used in query
:return: tuple
returns a tuple of tree items:
0. bool: True if query failed and return error messages else False
1. Any: Data returned from query
2. str: Rate limit
"""
data, rate_limit = run_query(query, token, query_variables)
if list(data.keys())[0] == "errors":
return True, data["errors"][0]["message"], rate_limit
try:
return False, data["data"]["repository"], rate_limit
except TypeError:
return True, "Query failed. Make sure path and branch is valid.", rate_limit
def get_url_info(url: str) -> Tuple[str, str] | List[str]:
"""
Retrieves owner and repository from a string
:param url: str
Either some form of Github Url or path such as `user/repo/whatever`
:return: tuple | list
Tuple containing owner and repo
"""
is_link = re.compile(r"^(git(hub)?|https?)")
is_git_path = re.compile(r"^[a-zA-Z0-9\-_.]+/[a-zA-Z0-9\-_.]+")
git_url_regex = re.compile(r"^(https|git)?(://|@)?([^/:]+)[/:](?P<owner>[^/:]+)/(?P<name>.+)(.git)?$")
is_git_repo = re.compile(r"((.git)|/)$")
if is_link.match(url):
if is_git_path.match(url):
return url.split("/")[:2]
match = git_url_regex.match(url)
if not match:
raise Exception("Invalid path")
name = match.group("name").split("/")[0]
name = is_git_repo.sub("", name)
owner = match.group("owner")
return owner, name
else:
if url.count("/") > 0:
return url.split("/")[:2]
raise Exception("Link/path must contain both user and repo")
def humanize_time(time_str: str) -> str:
"""
Convert datetime into a more human-friendly format
:param time_str: str
Time string in the ISO 8601 format
:return: str
Human friendly format: <number> <time_period> ago
"""
if not time_str:
return "null"
now = datetime.now()
date = datetime.strptime(time_str, "%Y-%m-%dT%H:%M:%SZ")
date = date.replace(tzinfo=timezone.utc)
diff = int(now.timestamp() - date.timestamp())
times = [
1, 60, 3600, 86400, 604800, 2629746, 31556925
]
times_str = [
"Second", "Minute", "Hour", "Day", "Week", "Month", "Year"
]
temp = [diff // t for t in times][::-1]
for i, t in enumerate(temp):
if t != 0:
return f"{t} {times_str[6-i]}{'' if t == 1 else 's'} ago"
def human_size(bytes: int | float, units: Optional[List[str]] = None) -> str:
"""
Convert bytes into a more human-friendly format
:param bytes: int
Number of bytes
:param units: Optional[List[str]]
units used
:return: str
Return size in human friendly format: <number> <size_unit>
"""
if units is None:
units = ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB']
return f"{round(bytes, 2)} " + units[0] if bytes < 1024 else human_size(bytes / 1024, units[1:])
class FileEntry(NodeMixin):
def __init__(
self,
name: str,
size: str | int = None,
parent=None,
children=None
) -> None:
super(FileEntry, self).__init__()
if size != None:
self.name = f"{name} ([green]{human_size(size)}[/])"
else:
self.name = f"[blue]{name}/[/]"
self.parent = parent
if children:
self.children = children
class FileEntryRoot(NodeMixin):
def __init__(self, name: str, parent=None, children=None):
super(FileEntryRoot, self).__init__()
self.name = name
self.parent = parent
if children:
self.children = children
def populate_tree(
root_name: str,
data: list,
collapse_blobs: bool = False
) -> "anytree.Node":
"""
Populate the tree
:param root_name: str
Name of root node
:param data: dict
Data
:param collapse_blobs: bool
Collapse files or not
:return: anytree.node
"""
root = FileEntryRoot(root_name)
def edges(tree: FileEntry | FileEntryRoot, parent=None):
collapsed_count = 0
collapsed_size = 0
for entry in tree:
if entry["type"] == "blob":
if collapse_blobs:
collapsed_size += entry["object"]["byteSize"]
collapsed_count += 1
else:
_ = FileEntry(entry["name"], entry["object"]["byteSize"], parent=parent)
else:
node = FileEntry(entry["name"], parent=parent)
if entry["object"]:
edges(entry["object"]["entries"], parent=node)
if collapse_blobs:
_ = FileEntry(f"[orange1]{collapsed_count}[/] Files", collapsed_size, parent=parent)
edges(data, root)
return root
class Reversor:
def __init__(self, obj: Any) -> None:
self.obj = obj
def __eq__(self, other: Any) -> bool:
return other.obj == self.obj
def __lt__(self, other: Any) -> bool:
return other.obj < self.obj
def sort_entries(entries: List[Any]) -> List[Any]:
"""
Recursively sort the data first based on type
then alphabetically
:param entries: list
Entries
:return: list
Entries but sorted
"""
entries = sorted(
entries, key=lambda x: (
Reversor(x["type"]), # First sort by type (reversed)
x["name"].lower() # Then sort by alphabetical
)
)
for entry in entries:
if entry["type"] == "tree" and entry["object"]:
entry["object"]["entries"] = sort_entries(entry["object"]["entries"])
return entries
| 27.738019 | 109 | 0.584543 | from __future__ import annotations
from anytree import NodeMixin
from datetime import datetime, timezone
from dotenv import load_dotenv
from os import environ
from os.path import join, dirname
from typing import Tuple, List, Any, Dict, Optional
import re
import requests
from rich.box import Box
__all__ = [
"get_data", "get_token", "get_url_info", "human_size", "humanize_time",
"populate_tree", "ROUNDED_BORDER", "run_query", "set_token", "sort_entries"
]
ROUNDED_BORDER: Box = Box(
"""\
╭──╮
│ │
│ │
│ │
│ │
│ │
│ │
╰──╯
"""
)
def get_token() -> str:
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
return environ.get("GITSORT_TOKEN")
def set_token(token: str) -> None:
import os
from dotenv import load_dotenv
from os.path import join, dirname
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
gitsort_token = os.environ.get("GITSORT_TOKEN")
if not gitsort_token:
with open(dotenv_path, "w") as f:
f.write(f"GITSORT_TOKEN={token}")
print("Github Token set!")
else:
inp = input("Github token already set! Do you want to update it? [y/n] ").lower()
while inp not in ["y", "n"]:
print("Invalid answer")
inp = input("Github token already set! Do you want to update it? [y/n] ").lower()
if inp == "y":
with open(dotenv_path, "w") as f:
f.write(f"GITSORT_TOKEN={token}")
print("Github Token updated!")
def run_query(
query: str,
token: str,
variables: dict | None = None,
headers: dict | None = None
) -> Tuple[dict, str]:
if not headers:
headers = {"Authorization": f"Bearer {token}"}
request = requests.post(
'https://api.github.com/graphql',
json={'query': query, 'variables': variables},
headers=headers
)
if request.status_code == 200:
return request.json(), request.headers["X-RateLimit-Remaining"]
else:
raise Exception("Query failed to run by returning code of {}. {}".format(request.status_code, query))
def get_data(
query: str,
token: str,
query_variables: Dict[str, str]
) -> Tuple[bool, Any, str]:
data, rate_limit = run_query(query, token, query_variables)
if list(data.keys())[0] == "errors":
return True, data["errors"][0]["message"], rate_limit
try:
return False, data["data"]["repository"], rate_limit
except TypeError:
return True, "Query failed. Make sure path and branch is valid.", rate_limit
def get_url_info(url: str) -> Tuple[str, str] | List[str]:
is_link = re.compile(r"^(git(hub)?|https?)")
is_git_path = re.compile(r"^[a-zA-Z0-9\-_.]+/[a-zA-Z0-9\-_.]+")
git_url_regex = re.compile(r"^(https|git)?(://|@)?([^/:]+)[/:](?P<owner>[^/:]+)/(?P<name>.+)(.git)?$")
is_git_repo = re.compile(r"((.git)|/)$")
if is_link.match(url):
if is_git_path.match(url):
return url.split("/")[:2]
match = git_url_regex.match(url)
if not match:
raise Exception("Invalid path")
name = match.group("name").split("/")[0]
name = is_git_repo.sub("", name)
owner = match.group("owner")
return owner, name
else:
if url.count("/") > 0:
return url.split("/")[:2]
raise Exception("Link/path must contain both user and repo")
def humanize_time(time_str: str) -> str:
if not time_str:
return "null"
now = datetime.now()
date = datetime.strptime(time_str, "%Y-%m-%dT%H:%M:%SZ")
date = date.replace(tzinfo=timezone.utc)
diff = int(now.timestamp() - date.timestamp())
times = [
1, 60, 3600, 86400, 604800, 2629746, 31556925
]
times_str = [
"Second", "Minute", "Hour", "Day", "Week", "Month", "Year"
]
temp = [diff // t for t in times][::-1]
for i, t in enumerate(temp):
if t != 0:
return f"{t} {times_str[6-i]}{'' if t == 1 else 's'} ago"
def human_size(bytes: int | float, units: Optional[List[str]] = None) -> str:
if units is None:
units = ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB']
return f"{round(bytes, 2)} " + units[0] if bytes < 1024 else human_size(bytes / 1024, units[1:])
class FileEntry(NodeMixin):
def __init__(
self,
name: str,
size: str | int = None,
parent=None,
children=None
) -> None:
super(FileEntry, self).__init__()
if size != None:
self.name = f"{name} ([green]{human_size(size)}[/])"
else:
self.name = f"[blue]{name}/[/]"
self.parent = parent
if children:
self.children = children
class FileEntryRoot(NodeMixin):
def __init__(self, name: str, parent=None, children=None):
super(FileEntryRoot, self).__init__()
self.name = name
self.parent = parent
if children:
self.children = children
def populate_tree(
root_name: str,
data: list,
collapse_blobs: bool = False
) -> "anytree.Node":
root = FileEntryRoot(root_name)
def edges(tree: FileEntry | FileEntryRoot, parent=None):
collapsed_count = 0
collapsed_size = 0
for entry in tree:
if entry["type"] == "blob":
if collapse_blobs:
collapsed_size += entry["object"]["byteSize"]
collapsed_count += 1
else:
_ = FileEntry(entry["name"], entry["object"]["byteSize"], parent=parent)
else:
node = FileEntry(entry["name"], parent=parent)
if entry["object"]:
edges(entry["object"]["entries"], parent=node)
if collapse_blobs:
_ = FileEntry(f"[orange1]{collapsed_count}[/] Files", collapsed_size, parent=parent)
edges(data, root)
return root
class Reversor:
def __init__(self, obj: Any) -> None:
self.obj = obj
def __eq__(self, other: Any) -> bool:
return other.obj == self.obj
def __lt__(self, other: Any) -> bool:
return other.obj < self.obj
def sort_entries(entries: List[Any]) -> List[Any]:
entries = sorted(
entries, key=lambda x: (
Reversor(x["type"]),
x["name"].lower()
)
)
for entry in entries:
if entry["type"] == "tree" and entry["object"]:
entry["object"]["entries"] = sort_entries(entry["object"]["entries"])
return entries
| true | true |
f715ac6786b1c8d153bae843595f1b3b37a7b901 | 6,518 | py | Python | casinotools/fileformat/casino3/IntensityImage.py | drix00/pycasinotools | 2e33b42fb7c7629b35f007be5a404fdd1c45c771 | [
"Apache-2.0"
] | 2 | 2019-07-14T23:16:09.000Z | 2019-10-26T10:54:38.000Z | casinotools/fileformat/casino3/IntensityImage.py | drix00/pycasinotools | 2e33b42fb7c7629b35f007be5a404fdd1c45c771 | [
"Apache-2.0"
] | 5 | 2017-02-06T16:50:48.000Z | 2020-08-21T03:50:06.000Z | casinotools/fileformat/casino3/IntensityImage.py | drix00/pycasinotools | 2e33b42fb7c7629b35f007be5a404fdd1c45c771 | [
"Apache-2.0"
] | 5 | 2016-05-03T16:41:14.000Z | 2022-01-14T22:22:58.000Z | #!/usr/bin/env python
""" """
# Script information for the file.
__author__ = "Hendrix Demers (hendrix.demers@mail.mcgill.ca)"
__version__ = ""
__date__ = ""
__copyright__ = "Copyright (c) 2009 Hendrix Demers"
__license__ = ""
# Standard library modules.
import logging
import os.path
# Third party modules.
from PIL import Image
# Local modules.
import casinotools.fileformat.casino3.File as File
import casinotools.fileformat.casino3.ScanPointResults as ScanPointResults
# Globals and constants variables.
INTENSITY_TRANSMITTED = "TransmittedIntensity"
INTENSITY_TRANSMITTED_DETECTED = "TransmittedDetectedIntensity"
class IntensityImage(object):
def __init__(self, filepath, imageName="IntensityImage", intensityType=INTENSITY_TRANSMITTED_DETECTED):
self._filepath = filepath
self._imageName = imageName
self._intensityType = intensityType
self._imageSize = (800, 600)
self._createGetIntensityMethod()
def _createGetIntensityMethod(self):
if self._intensityType == INTENSITY_TRANSMITTED:
self._getIntensity = ScanPointResults.ScanPointResults.getTransmittedCoefficient
elif self._intensityType == INTENSITY_TRANSMITTED_DETECTED:
self._getIntensity = ScanPointResults.ScanPointResults.getTransmittedDetectedCoefficient
def _createImage(self):
self._extractData()
self._analyzePositions()
self._createRawImage2()
def _extractData(self):
casinoFile = File.File(self._filepath)
casinoFile.open()
assert 1 == casinoFile.getNumberSimulations()
scanPointsResults = casinoFile.getScanPointResults()
self._numberScanPoints = len(scanPointsResults)
self._positions = []
self._intensities = {}
for scanPointResults in scanPointsResults:
position = scanPointResults.getPosition()
self._positions.append(position)
self._intensities[position] = self._getIntensity(scanPointResults)
def _analyzePositions(self):
self._xSet = set()
self._ySet = set()
self._zSet = set()
for position in self._positions:
x, y, z = position
self._xSet.add(x)
self._ySet.add(y)
self._zSet.add(z)
numberUniqueX = len(self._xSet)
numberUniqueY = len(self._ySet)
numberUniqueZ = len(self._zSet)
imageType = None
if numberUniqueX > 1:
if numberUniqueY > 1:
if numberUniqueZ > 1:
imageType = "3D"
else:
imageType = "XY"
elif numberUniqueZ > 1:
imageType = "XZ"
else:
imageType = "X"
elif numberUniqueY > 1:
if numberUniqueZ > 1:
imageType = "YZ"
else:
imageType = "Y"
elif numberUniqueZ > 1:
imageType = "Z"
else:
imageType = "P"
self._imageType = imageType
logging.info("Number unique X: %i", len(self._xSet))
logging.info("Number unique Y: %i", len(self._ySet))
logging.info("Number unique Z: %i", len(self._zSet))
logging.info("Image type: %s", imageType)
def _createRawImage(self):
if self._imageType == "XY":
size = len(self._xSet), len(self._ySet)
self._imageRaw = Image.new("F", size, color="black")
z = list(self._zSet)[0]
data = []
for y in sorted(self._xSet):
for x in sorted(self._ySet):
position = x, y, z
intensity = self._intensities[position]
data.append(intensity)
self._imageRaw.putdata(data)
def _createRawImage2(self):
if self._imageType == "XY":
size = len(self._xSet), len(self._ySet)
self._imageRaw = Image.new("F", size, color="black")
z = list(self._zSet)[0]
pix = self._imageRaw.load()
for indexH, x in enumerate(sorted(self._xSet)):
for indexV, y in enumerate(sorted(self._ySet)):
position = (x, y, z)
#index = positions.index(position)
value = self._intensities[position]
pix[indexH, indexV] = value
def save(self, path):
self._saveRawImage(path)
#self._saveImage(path)
def _saveRawImage(self, path):
imageFilepath = os.path.join(path, self._imageName + "_raw.tiff")
self._imageRaw.save(imageFilepath)
def _saveImage(self, path):
size = self._imageRaw.size
zoomFactor = self._computeZoomFactor(size)
newSize = size[0] * zoomFactor, size[1] * zoomFactor
filters = {"near": Image.NEAREST, "bilin": Image.BILINEAR,
"bicub": Image.BICUBIC, "anti": Image.ANTIALIAS}
for name, filter in filters.items():
imageFilepath = os.path.join(path, self._imageName + "_" + name + ".tiff")
image = self._imageRaw.resize(newSize, filter)
image.save(imageFilepath)
imageFilepath = os.path.join(path, self._imageName + ".tiff")
tmpImage = self._imageRaw.resize(newSize, Image.BICUBIC)
#tmpImage = tmpImage.convert('L')
image = Image.new(tmpImage.mode, self._imageSize)
topCorner = (self._imageSize[0] - tmpImage.size[0]) / 2, (self._imageSize[1] - tmpImage.size[1]) / 2
box = topCorner[0], topCorner[1], topCorner[0] + tmpImage.size[0], topCorner[1] + tmpImage.size[1]
image.paste(tmpImage, box)
image.save(imageFilepath)
#tmpImage.save(imageFilepath)
def _computeZoomFactor(self, size):
xZoom = int(self._imageSize[0] / size[0])
yZoom = int(self._imageSize[1] / size[1])
zoom = min(xZoom, yZoom)
return zoom
def run():
from pkg_resources import resource_filename #@UnresolvedImport
resultsPath = resource_filename(__name__, "../../test_data/casino3.x/createImage")
casBinnedFilepath = os.path.join(resultsPath, "Au_C_thin_1nm_Inside_100ke_binned.cas")
imageBinned = IntensityImage(casBinnedFilepath)
imageBinned._createImage()
imageBinned.save(resultsPath)
if __name__ == '__main__':
run() | 35.617486 | 109 | 0.596502 |
__author__ = "Hendrix Demers (hendrix.demers@mail.mcgill.ca)"
__version__ = ""
__date__ = ""
__copyright__ = "Copyright (c) 2009 Hendrix Demers"
__license__ = ""
import logging
import os.path
from PIL import Image
import casinotools.fileformat.casino3.File as File
import casinotools.fileformat.casino3.ScanPointResults as ScanPointResults
INTENSITY_TRANSMITTED = "TransmittedIntensity"
INTENSITY_TRANSMITTED_DETECTED = "TransmittedDetectedIntensity"
class IntensityImage(object):
def __init__(self, filepath, imageName="IntensityImage", intensityType=INTENSITY_TRANSMITTED_DETECTED):
self._filepath = filepath
self._imageName = imageName
self._intensityType = intensityType
self._imageSize = (800, 600)
self._createGetIntensityMethod()
def _createGetIntensityMethod(self):
if self._intensityType == INTENSITY_TRANSMITTED:
self._getIntensity = ScanPointResults.ScanPointResults.getTransmittedCoefficient
elif self._intensityType == INTENSITY_TRANSMITTED_DETECTED:
self._getIntensity = ScanPointResults.ScanPointResults.getTransmittedDetectedCoefficient
def _createImage(self):
self._extractData()
self._analyzePositions()
self._createRawImage2()
def _extractData(self):
casinoFile = File.File(self._filepath)
casinoFile.open()
assert 1 == casinoFile.getNumberSimulations()
scanPointsResults = casinoFile.getScanPointResults()
self._numberScanPoints = len(scanPointsResults)
self._positions = []
self._intensities = {}
for scanPointResults in scanPointsResults:
position = scanPointResults.getPosition()
self._positions.append(position)
self._intensities[position] = self._getIntensity(scanPointResults)
def _analyzePositions(self):
self._xSet = set()
self._ySet = set()
self._zSet = set()
for position in self._positions:
x, y, z = position
self._xSet.add(x)
self._ySet.add(y)
self._zSet.add(z)
numberUniqueX = len(self._xSet)
numberUniqueY = len(self._ySet)
numberUniqueZ = len(self._zSet)
imageType = None
if numberUniqueX > 1:
if numberUniqueY > 1:
if numberUniqueZ > 1:
imageType = "3D"
else:
imageType = "XY"
elif numberUniqueZ > 1:
imageType = "XZ"
else:
imageType = "X"
elif numberUniqueY > 1:
if numberUniqueZ > 1:
imageType = "YZ"
else:
imageType = "Y"
elif numberUniqueZ > 1:
imageType = "Z"
else:
imageType = "P"
self._imageType = imageType
logging.info("Number unique X: %i", len(self._xSet))
logging.info("Number unique Y: %i", len(self._ySet))
logging.info("Number unique Z: %i", len(self._zSet))
logging.info("Image type: %s", imageType)
def _createRawImage(self):
if self._imageType == "XY":
size = len(self._xSet), len(self._ySet)
self._imageRaw = Image.new("F", size, color="black")
z = list(self._zSet)[0]
data = []
for y in sorted(self._xSet):
for x in sorted(self._ySet):
position = x, y, z
intensity = self._intensities[position]
data.append(intensity)
self._imageRaw.putdata(data)
def _createRawImage2(self):
if self._imageType == "XY":
size = len(self._xSet), len(self._ySet)
self._imageRaw = Image.new("F", size, color="black")
z = list(self._zSet)[0]
pix = self._imageRaw.load()
for indexH, x in enumerate(sorted(self._xSet)):
for indexV, y in enumerate(sorted(self._ySet)):
position = (x, y, z)
value = self._intensities[position]
pix[indexH, indexV] = value
def save(self, path):
self._saveRawImage(path)
def _saveRawImage(self, path):
imageFilepath = os.path.join(path, self._imageName + "_raw.tiff")
self._imageRaw.save(imageFilepath)
def _saveImage(self, path):
size = self._imageRaw.size
zoomFactor = self._computeZoomFactor(size)
newSize = size[0] * zoomFactor, size[1] * zoomFactor
filters = {"near": Image.NEAREST, "bilin": Image.BILINEAR,
"bicub": Image.BICUBIC, "anti": Image.ANTIALIAS}
for name, filter in filters.items():
imageFilepath = os.path.join(path, self._imageName + "_" + name + ".tiff")
image = self._imageRaw.resize(newSize, filter)
image.save(imageFilepath)
imageFilepath = os.path.join(path, self._imageName + ".tiff")
tmpImage = self._imageRaw.resize(newSize, Image.BICUBIC)
image = Image.new(tmpImage.mode, self._imageSize)
topCorner = (self._imageSize[0] - tmpImage.size[0]) / 2, (self._imageSize[1] - tmpImage.size[1]) / 2
box = topCorner[0], topCorner[1], topCorner[0] + tmpImage.size[0], topCorner[1] + tmpImage.size[1]
image.paste(tmpImage, box)
image.save(imageFilepath)
def _computeZoomFactor(self, size):
xZoom = int(self._imageSize[0] / size[0])
yZoom = int(self._imageSize[1] / size[1])
zoom = min(xZoom, yZoom)
return zoom
def run():
from pkg_resources import resource_filename
resultsPath = resource_filename(__name__, "../../test_data/casino3.x/createImage")
casBinnedFilepath = os.path.join(resultsPath, "Au_C_thin_1nm_Inside_100ke_binned.cas")
imageBinned = IntensityImage(casBinnedFilepath)
imageBinned._createImage()
imageBinned.save(resultsPath)
if __name__ == '__main__':
run() | true | true |
f715af0a24dd23852f403a9a2f9f37a1c461984d | 66 | py | Python | programaker_twitter_service/__init__.py | plaza-project/twitter-bridge | 0b1807fef5817b2535eecc3b795e58685ff08ff5 | [
"Apache-2.0"
] | 1 | 2020-12-19T05:04:19.000Z | 2020-12-19T05:04:19.000Z | programaker_twitter_service/__init__.py | plaza-project/twitter-bridge | 0b1807fef5817b2535eecc3b795e58685ff08ff5 | [
"Apache-2.0"
] | null | null | null | programaker_twitter_service/__init__.py | plaza-project/twitter-bridge | 0b1807fef5817b2535eecc3b795e58685ff08ff5 | [
"Apache-2.0"
] | null | null | null | from . import config, storage
from .listener import TweetListener
| 22 | 35 | 0.818182 | from . import config, storage
from .listener import TweetListener
| true | true |
f715af426554e6845a9d59b633445b811a99ff66 | 1,074 | py | Python | core/api/base.py | care2donate/care2donate | 5f99e7169653a96b6e6db44f90afee17758a4480 | [
"MIT"
] | 1 | 2021-05-14T15:21:42.000Z | 2021-05-14T15:21:42.000Z | core/api/base.py | care2donate/care2donate | 5f99e7169653a96b6e6db44f90afee17758a4480 | [
"MIT"
] | 2 | 2021-05-13T10:26:36.000Z | 2021-05-13T19:30:25.000Z | core/api/base.py | care2donate/care2donate | 5f99e7169653a96b6e6db44f90afee17758a4480 | [
"MIT"
] | null | null | null | from django.db import transaction
from rest_framework import generics, mixins
class BaseAPIView(generics.GenericAPIView,
mixins.CreateModelMixin,
mixins.ListModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin):
@transaction.atomic
def get(self, request, *args, **kwargs):
if kwargs.get('pk'):
return self.retrieve(request, *args, **kwargs)
else:
return self.list(request, *args, **kwargs)
@transaction.atomic
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
@transaction.atomic
def put(self, request, *args, **kwargs):
return self.update(request, *args, **kwargs)
@transaction.atomic
def patch(self, request, *args, **kwargs):
return self.partial_update(request, *args, **kwargs)
@transaction.atomic
def delete(self, request, *args, **kwargs):
return self.destroy(request, *args, **kwargs)
| 31.588235 | 60 | 0.620112 | from django.db import transaction
from rest_framework import generics, mixins
class BaseAPIView(generics.GenericAPIView,
mixins.CreateModelMixin,
mixins.ListModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin):
@transaction.atomic
def get(self, request, *args, **kwargs):
if kwargs.get('pk'):
return self.retrieve(request, *args, **kwargs)
else:
return self.list(request, *args, **kwargs)
@transaction.atomic
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
@transaction.atomic
def put(self, request, *args, **kwargs):
return self.update(request, *args, **kwargs)
@transaction.atomic
def patch(self, request, *args, **kwargs):
return self.partial_update(request, *args, **kwargs)
@transaction.atomic
def delete(self, request, *args, **kwargs):
return self.destroy(request, *args, **kwargs)
| true | true |
f715b0166e705758a0701b19f80e34986238aa34 | 4,522 | py | Python | tests/contrib/sensors/test_wasb_sensor.py | abhishek-ch/incubator-airflow | 3358551c8e73d9019900f7a85f18ebfd88591450 | [
"Apache-2.0"
] | 4 | 2015-11-12T10:58:54.000Z | 2017-08-05T06:41:36.000Z | tests/contrib/sensors/test_wasb_sensor.py | abhishek-ch/incubator-airflow | 3358551c8e73d9019900f7a85f18ebfd88591450 | [
"Apache-2.0"
] | 13 | 2018-07-11T10:45:30.000Z | 2018-08-18T00:43:30.000Z | tests/contrib/sensors/test_wasb_sensor.py | abhishek-ch/incubator-airflow | 3358551c8e73d9019900f7a85f18ebfd88591450 | [
"Apache-2.0"
] | 5 | 2020-05-12T13:38:14.000Z | 2022-03-17T17:17:50.000Z | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
import datetime
from airflow import DAG, configuration
from airflow.contrib.sensors.wasb_sensor import WasbBlobSensor
from airflow.contrib.sensors.wasb_sensor import WasbPrefixSensor
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
class TestWasbBlobSensor(unittest.TestCase):
_config = {
'container_name': 'container',
'blob_name': 'blob',
'wasb_conn_id': 'conn_id',
'timeout': 100,
}
def setUp(self):
configuration.load_test_config()
args = {
'owner': 'airflow',
'start_date': datetime.datetime(2017, 1, 1)
}
self.dag = DAG('test_dag_id', default_args=args)
def test_init(self):
sensor = WasbBlobSensor(
task_id='wasb_sensor',
dag=self.dag,
**self._config
)
self.assertEqual(sensor.container_name, self._config['container_name'])
self.assertEqual(sensor.blob_name, self._config['blob_name'])
self.assertEqual(sensor.wasb_conn_id, self._config['wasb_conn_id'])
self.assertEqual(sensor.check_options, {})
self.assertEqual(sensor.timeout, self._config['timeout'])
sensor = WasbBlobSensor(
task_id='wasb_sensor',
dag=self.dag,
check_options={'timeout': 2},
**self._config
)
self.assertEqual(sensor.check_options, {'timeout': 2})
@mock.patch('airflow.contrib.sensors.wasb_sensor.WasbHook',
autospec=True)
def test_poke(self, mock_hook):
mock_instance = mock_hook.return_value
sensor = WasbBlobSensor(
task_id='wasb_sensor',
dag=self.dag,
check_options={'timeout': 2},
**self._config
)
sensor.poke(None)
mock_instance.check_for_blob.assert_called_once_with(
'container', 'blob', timeout=2
)
class TestWasbPrefixSensor(unittest.TestCase):
_config = {
'container_name': 'container',
'prefix': 'prefix',
'wasb_conn_id': 'conn_id',
'timeout': 100,
}
def setUp(self):
configuration.load_test_config()
args = {
'owner': 'airflow',
'start_date': datetime.datetime(2017, 1, 1)
}
self.dag = DAG('test_dag_id', default_args=args)
def test_init(self):
sensor = WasbPrefixSensor(
task_id='wasb_sensor',
dag=self.dag,
**self._config
)
self.assertEqual(sensor.container_name, self._config['container_name'])
self.assertEqual(sensor.prefix, self._config['prefix'])
self.assertEqual(sensor.wasb_conn_id, self._config['wasb_conn_id'])
self.assertEqual(sensor.check_options, {})
self.assertEqual(sensor.timeout, self._config['timeout'])
sensor = WasbPrefixSensor(
task_id='wasb_sensor',
dag=self.dag,
check_options={'timeout': 2},
**self._config
)
self.assertEqual(sensor.check_options, {'timeout': 2})
@mock.patch('airflow.contrib.sensors.wasb_sensor.WasbHook',
autospec=True)
def test_poke(self, mock_hook):
mock_instance = mock_hook.return_value
sensor = WasbPrefixSensor(
task_id='wasb_sensor',
dag=self.dag,
check_options={'timeout': 2},
**self._config
)
sensor.poke(None)
mock_instance.check_for_prefix.assert_called_once_with(
'container', 'prefix', timeout=2
)
if __name__ == '__main__':
unittest.main()
| 31.402778 | 79 | 0.627377 |
import unittest
import datetime
from airflow import DAG, configuration
from airflow.contrib.sensors.wasb_sensor import WasbBlobSensor
from airflow.contrib.sensors.wasb_sensor import WasbPrefixSensor
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
class TestWasbBlobSensor(unittest.TestCase):
_config = {
'container_name': 'container',
'blob_name': 'blob',
'wasb_conn_id': 'conn_id',
'timeout': 100,
}
def setUp(self):
configuration.load_test_config()
args = {
'owner': 'airflow',
'start_date': datetime.datetime(2017, 1, 1)
}
self.dag = DAG('test_dag_id', default_args=args)
def test_init(self):
sensor = WasbBlobSensor(
task_id='wasb_sensor',
dag=self.dag,
**self._config
)
self.assertEqual(sensor.container_name, self._config['container_name'])
self.assertEqual(sensor.blob_name, self._config['blob_name'])
self.assertEqual(sensor.wasb_conn_id, self._config['wasb_conn_id'])
self.assertEqual(sensor.check_options, {})
self.assertEqual(sensor.timeout, self._config['timeout'])
sensor = WasbBlobSensor(
task_id='wasb_sensor',
dag=self.dag,
check_options={'timeout': 2},
**self._config
)
self.assertEqual(sensor.check_options, {'timeout': 2})
@mock.patch('airflow.contrib.sensors.wasb_sensor.WasbHook',
autospec=True)
def test_poke(self, mock_hook):
mock_instance = mock_hook.return_value
sensor = WasbBlobSensor(
task_id='wasb_sensor',
dag=self.dag,
check_options={'timeout': 2},
**self._config
)
sensor.poke(None)
mock_instance.check_for_blob.assert_called_once_with(
'container', 'blob', timeout=2
)
class TestWasbPrefixSensor(unittest.TestCase):
_config = {
'container_name': 'container',
'prefix': 'prefix',
'wasb_conn_id': 'conn_id',
'timeout': 100,
}
def setUp(self):
configuration.load_test_config()
args = {
'owner': 'airflow',
'start_date': datetime.datetime(2017, 1, 1)
}
self.dag = DAG('test_dag_id', default_args=args)
def test_init(self):
sensor = WasbPrefixSensor(
task_id='wasb_sensor',
dag=self.dag,
**self._config
)
self.assertEqual(sensor.container_name, self._config['container_name'])
self.assertEqual(sensor.prefix, self._config['prefix'])
self.assertEqual(sensor.wasb_conn_id, self._config['wasb_conn_id'])
self.assertEqual(sensor.check_options, {})
self.assertEqual(sensor.timeout, self._config['timeout'])
sensor = WasbPrefixSensor(
task_id='wasb_sensor',
dag=self.dag,
check_options={'timeout': 2},
**self._config
)
self.assertEqual(sensor.check_options, {'timeout': 2})
@mock.patch('airflow.contrib.sensors.wasb_sensor.WasbHook',
autospec=True)
def test_poke(self, mock_hook):
mock_instance = mock_hook.return_value
sensor = WasbPrefixSensor(
task_id='wasb_sensor',
dag=self.dag,
check_options={'timeout': 2},
**self._config
)
sensor.poke(None)
mock_instance.check_for_prefix.assert_called_once_with(
'container', 'prefix', timeout=2
)
if __name__ == '__main__':
unittest.main()
| true | true |
f715b08addca895d652922233042ac6fcca2b312 | 2,145 | py | Python | src/data/datasets/BAIR/BAIR.py | msc5/junior-iw | d356e015fcd3a3be638097a1acc02d5dea4751aa | [
"MIT"
] | null | null | null | src/data/datasets/BAIR/BAIR.py | msc5/junior-iw | d356e015fcd3a3be638097a1acc02d5dea4751aa | [
"MIT"
] | null | null | null | src/data/datasets/BAIR/BAIR.py | msc5/junior-iw | d356e015fcd3a3be638097a1acc02d5dea4751aa | [
"MIT"
] | null | null | null | import os
import io
import numpy as np
from PIL import Image
import torch
from torchvision.transforms import ToTensor
class BAIR (object):
"""Data Handler that loads robot pushing data."""
def __init__(self, data_root, train=True, seq_len=20, image_size=64):
self.root_dir = data_root
if train:
self.data_dir = '%s/processed_data/train' % self.root_dir
self.ordered = False
else:
self.data_dir = '%s/processed_data/test' % self.root_dir
self.ordered = True
self.dirs = []
for d1 in os.listdir(self.data_dir):
for d2 in os.listdir('%s/%s' % (self.data_dir, d1)):
self.dirs.append('%s/%s/%s' % (self.data_dir, d1, d2))
self.seq_len = seq_len
self.image_size = image_size
self.seed_is_set = False # multi threaded loading
self.d = 0
self.totensor = ToTensor()
def set_seed(self, seed):
if not self.seed_is_set:
self.seed_is_set = True
np.random.seed(seed)
def __len__(self):
return len(self.dirs)
def get_seq(self):
if self.ordered:
d = self.dirs[self.d]
if self.d == len(self.dirs) - 1:
self.d = 0
else:
self.d += 1
else:
d = self.dirs[np.random.randint(len(self.dirs))]
image_seq = []
for i in range(self.seq_len):
fname = '%s/%d.png' % (d, i)
# im = imread(fname).reshape(1, 64, 64, 3)
# im = np.array(Image.open(fname)).reshape((1, 3, 64, 64))
im = self.totensor(Image.open(fname)).reshape(1, 3, 64, 64)
image_seq.append(im)
image_seq = torch.cat(image_seq, axis=0)
return image_seq
def __getitem__(self, index):
self.set_seed(index)
return self.get_seq()
if __name__ == "__main__":
from torch.utils.data import DataLoader
train_dataset = BAIR('src/data/datasets/BAIR/raw', train=True)
train_dataloader = DataLoader(train_dataloader, batch_size=4)
print(len(train_dataset, train_dataloader))
| 31.086957 | 73 | 0.577622 | import os
import io
import numpy as np
from PIL import Image
import torch
from torchvision.transforms import ToTensor
class BAIR (object):
def __init__(self, data_root, train=True, seq_len=20, image_size=64):
self.root_dir = data_root
if train:
self.data_dir = '%s/processed_data/train' % self.root_dir
self.ordered = False
else:
self.data_dir = '%s/processed_data/test' % self.root_dir
self.ordered = True
self.dirs = []
for d1 in os.listdir(self.data_dir):
for d2 in os.listdir('%s/%s' % (self.data_dir, d1)):
self.dirs.append('%s/%s/%s' % (self.data_dir, d1, d2))
self.seq_len = seq_len
self.image_size = image_size
self.seed_is_set = False
self.d = 0
self.totensor = ToTensor()
def set_seed(self, seed):
if not self.seed_is_set:
self.seed_is_set = True
np.random.seed(seed)
def __len__(self):
return len(self.dirs)
def get_seq(self):
if self.ordered:
d = self.dirs[self.d]
if self.d == len(self.dirs) - 1:
self.d = 0
else:
self.d += 1
else:
d = self.dirs[np.random.randint(len(self.dirs))]
image_seq = []
for i in range(self.seq_len):
fname = '%s/%d.png' % (d, i)
im = self.totensor(Image.open(fname)).reshape(1, 3, 64, 64)
image_seq.append(im)
image_seq = torch.cat(image_seq, axis=0)
return image_seq
def __getitem__(self, index):
self.set_seed(index)
return self.get_seq()
if __name__ == "__main__":
from torch.utils.data import DataLoader
train_dataset = BAIR('src/data/datasets/BAIR/raw', train=True)
train_dataloader = DataLoader(train_dataloader, batch_size=4)
print(len(train_dataset, train_dataloader))
| true | true |
f715b222c54a26cf324ab888f732b9102ea604a6 | 20,214 | py | Python | tools/nni_cmd/config_schema.py | skyser2003/nni | b946888fadacdb761e4c3a79bd869284af1da3b3 | [
"MIT"
] | 1 | 2021-03-27T10:42:42.000Z | 2021-03-27T10:42:42.000Z | tools/nni_cmd/config_schema.py | lswzjuer/nni | e9cba778257804a2a1a6002687835233a779d7af | [
"MIT"
] | null | null | null | tools/nni_cmd/config_schema.py | lswzjuer/nni | e9cba778257804a2a1a6002687835233a779d7af | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
from schema import Schema, And, Optional, Regex, Or
from .constants import SCHEMA_TYPE_ERROR, SCHEMA_RANGE_ERROR, SCHEMA_PATH_ERROR
def setType(key, valueType):
'''check key type'''
return And(valueType, error=SCHEMA_TYPE_ERROR % (key, valueType.__name__))
def setChoice(key, *args):
'''check choice'''
return And(lambda n: n in args, error=SCHEMA_RANGE_ERROR % (key, str(args)))
def setNumberRange(key, keyType, start, end):
'''check number range'''
return And(
And(keyType, error=SCHEMA_TYPE_ERROR % (key, keyType.__name__)),
And(lambda n: start <= n <= end, error=SCHEMA_RANGE_ERROR % (key, '(%s,%s)' % (start, end))),
)
def setPathCheck(key):
'''check if path exist'''
return And(os.path.exists, error=SCHEMA_PATH_ERROR % key)
common_schema = {
'authorName': setType('authorName', str),
'experimentName': setType('experimentName', str),
Optional('description'): setType('description', str),
'trialConcurrency': setNumberRange('trialConcurrency', int, 1, 99999),
Optional('maxExecDuration'): And(Regex(r'^[1-9][0-9]*[s|m|h|d]$', error='ERROR: maxExecDuration format is [digit]{s,m,h,d}')),
Optional('maxTrialNum'): setNumberRange('maxTrialNum', int, 1, 99999),
'trainingServicePlatform': setChoice('trainingServicePlatform', 'remote', 'local', 'pai', 'kubeflow', 'frameworkcontroller'),
Optional('searchSpacePath'): And(os.path.exists, error=SCHEMA_PATH_ERROR % 'searchSpacePath'),
Optional('multiPhase'): setType('multiPhase', bool),
Optional('multiThread'): setType('multiThread', bool),
Optional('nniManagerIp'): setType('nniManagerIp', str),
Optional('logDir'): And(os.path.isdir, error=SCHEMA_PATH_ERROR % 'logDir'),
Optional('debug'): setType('debug', bool),
Optional('versionCheck'): setType('versionCheck', bool),
Optional('logLevel'): setChoice('logLevel', 'trace', 'debug', 'info', 'warning', 'error', 'fatal'),
Optional('logCollection'): setChoice('logCollection', 'http', 'none'),
'useAnnotation': setType('useAnnotation', bool),
Optional('tuner'): dict,
Optional('advisor'): dict,
Optional('assessor'): dict,
Optional('localConfig'): {
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
Optional('maxTrialNumPerGpu'): setType('maxTrialNumPerGpu', int),
Optional('useActiveGpu'): setType('useActiveGpu', bool)
}
}
tuner_schema_dict = {
('Anneal', 'SMAC'): {
'builtinTunerName': setChoice('builtinTunerName', 'Anneal', 'SMAC'),
Optional('classArgs'): {
'optimize_mode': setChoice('optimize_mode', 'maximize', 'minimize'),
},
Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
('Evolution'): {
'builtinTunerName': setChoice('builtinTunerName', 'Evolution'),
Optional('classArgs'): {
'optimize_mode': setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('population_size'): setNumberRange('population_size', int, 0, 99999),
},
Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
('BatchTuner', 'GridSearch', 'Random'): {
'builtinTunerName': setChoice('builtinTunerName', 'BatchTuner', 'GridSearch', 'Random'),
Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
'TPE': {
'builtinTunerName': 'TPE',
Optional('classArgs'): {
Optional('optimize_mode'): setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('parallel_optimize'): setType('parallel_optimize', bool),
Optional('constant_liar_type'): setChoice('constant_liar_type', 'min', 'max', 'mean')
},
Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
'NetworkMorphism': {
'builtinTunerName': 'NetworkMorphism',
Optional('classArgs'): {
Optional('optimize_mode'): setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('task'): setChoice('task', 'cv', 'nlp', 'common'),
Optional('input_width'): setType('input_width', int),
Optional('input_channel'): setType('input_channel', int),
Optional('n_output_node'): setType('n_output_node', int),
},
Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
'MetisTuner': {
'builtinTunerName': 'MetisTuner',
Optional('classArgs'): {
Optional('optimize_mode'): setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('no_resampling'): setType('no_resampling', bool),
Optional('no_candidates'): setType('no_candidates', bool),
Optional('selection_num_starting_points'): setType('selection_num_starting_points', int),
Optional('cold_start_num'): setType('cold_start_num', int),
},
Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
'GPTuner': {
'builtinTunerName': 'GPTuner',
Optional('classArgs'): {
Optional('optimize_mode'): setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('utility'): setChoice('utility', 'ei', 'ucb', 'poi'),
Optional('kappa'): setType('kappa', float),
Optional('xi'): setType('xi', float),
Optional('nu'): setType('nu', float),
Optional('alpha'): setType('alpha', float),
Optional('cold_start_num'): setType('cold_start_num', int),
Optional('selection_num_warm_up'): setType('selection_num_warm_up', int),
Optional('selection_num_starting_points'): setType('selection_num_starting_points', int),
},
Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
'PPOTuner': {
'builtinTunerName': 'PPOTuner',
'classArgs': {
'optimize_mode': setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('trials_per_update'): setNumberRange('trials_per_update', int, 0, 99999),
Optional('epochs_per_update'): setNumberRange('epochs_per_update', int, 0, 99999),
Optional('minibatch_size'): setNumberRange('minibatch_size', int, 0, 99999),
Optional('ent_coef'): setType('ent_coef', float),
Optional('lr'): setType('lr', float),
Optional('vf_coef'): setType('vf_coef', float),
Optional('max_grad_norm'): setType('max_grad_norm', float),
Optional('gamma'): setType('gamma', float),
Optional('lam'): setType('lam', float),
Optional('cliprange'): setType('cliprange', float),
},
Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
'customized': {
'codeDir': setPathCheck('codeDir'),
'classFileName': setType('classFileName', str),
'className': setType('className', str),
Optional('classArgs'): dict,
Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
}
}
advisor_schema_dict = {
'Hyperband':{
'builtinAdvisorName': Or('Hyperband'),
'classArgs': {
'optimize_mode': setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('R'): setType('R', int),
Optional('eta'): setType('eta', int)
},
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
'BOHB':{
'builtinAdvisorName': Or('BOHB'),
'classArgs': {
'optimize_mode': setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('min_budget'): setNumberRange('min_budget', int, 0, 9999),
Optional('max_budget'): setNumberRange('max_budget', int, 0, 9999),
Optional('eta'):setNumberRange('eta', int, 0, 9999),
Optional('min_points_in_model'): setNumberRange('min_points_in_model', int, 0, 9999),
Optional('top_n_percent'): setNumberRange('top_n_percent', int, 1, 99),
Optional('num_samples'): setNumberRange('num_samples', int, 1, 9999),
Optional('random_fraction'): setNumberRange('random_fraction', float, 0, 9999),
Optional('bandwidth_factor'): setNumberRange('bandwidth_factor', float, 0, 9999),
Optional('min_bandwidth'): setNumberRange('min_bandwidth', float, 0, 9999),
},
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
'customized':{
'codeDir': setPathCheck('codeDir'),
'classFileName': setType('classFileName', str),
'className': setType('className', str),
Optional('classArgs'): dict,
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
}
}
assessor_schema_dict = {
'Medianstop': {
'builtinAssessorName': 'Medianstop',
Optional('classArgs'): {
Optional('optimize_mode'): setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('start_step'): setNumberRange('start_step', int, 0, 9999),
},
},
'Curvefitting': {
'builtinAssessorName': 'Curvefitting',
Optional('classArgs'): {
'epoch_num': setNumberRange('epoch_num', int, 0, 9999),
Optional('optimize_mode'): setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('start_step'): setNumberRange('start_step', int, 0, 9999),
Optional('threshold'): setNumberRange('threshold', float, 0, 9999),
Optional('gap'): setNumberRange('gap', int, 1, 9999),
},
},
'customized': {
'codeDir': setPathCheck('codeDir'),
'classFileName': setType('classFileName', str),
'className': setType('className', str),
Optional('classArgs'): dict,
}
}
common_trial_schema = {
'trial':{
'command': setType('command', str),
'codeDir': setPathCheck('codeDir'),
Optional('gpuNum'): setNumberRange('gpuNum', int, 0, 99999),
Optional('nasMode'): setChoice('nasMode', 'classic_mode', 'enas_mode', 'oneshot_mode', 'darts_mode')
}
}
pai_trial_schema = {
'trial':{
'command': setType('command', str),
'codeDir': setPathCheck('codeDir'),
'gpuNum': setNumberRange('gpuNum', int, 0, 99999),
'cpuNum': setNumberRange('cpuNum', int, 0, 99999),
'memoryMB': setType('memoryMB', int),
'image': setType('image', str),
Optional('authFile'): And(os.path.exists, error=SCHEMA_PATH_ERROR % 'authFile'),
Optional('shmMB'): setType('shmMB', int),
Optional('dataDir'): And(Regex(r'hdfs://(([0-9]{1,3}.){3}[0-9]{1,3})(:[0-9]{2,5})?(/.*)?'),\
error='ERROR: dataDir format error, dataDir format is hdfs://xxx.xxx.xxx.xxx:xxx'),
Optional('outputDir'): And(Regex(r'hdfs://(([0-9]{1,3}.){3}[0-9]{1,3})(:[0-9]{2,5})?(/.*)?'),\
error='ERROR: outputDir format error, outputDir format is hdfs://xxx.xxx.xxx.xxx:xxx'),
Optional('virtualCluster'): setType('virtualCluster', str),
Optional('nasMode'): setChoice('nasMode', 'classic_mode', 'enas_mode', 'oneshot_mode', 'darts_mode'),
Optional('portList'): [{
"label": setType('label', str),
"beginAt": setType('beginAt', int),
"portNumber": setType('portNumber', int)
}]
}
}
pai_config_schema = {
'paiConfig': Or({
'userName': setType('userName', str),
'passWord': setType('passWord', str),
'host': setType('host', str)
}, {
'userName': setType('userName', str),
'token': setType('token', str),
'host': setType('host', str)
})
}
kubeflow_trial_schema = {
'trial':{
'codeDir': setPathCheck('codeDir'),
Optional('nasMode'): setChoice('nasMode', 'classic_mode', 'enas_mode', 'oneshot_mode', 'darts_mode'),
Optional('ps'): {
'replicas': setType('replicas', int),
'command': setType('command', str),
'gpuNum': setNumberRange('gpuNum', int, 0, 99999),
'cpuNum': setNumberRange('cpuNum', int, 0, 99999),
'memoryMB': setType('memoryMB', int),
'image': setType('image', str),
Optional('privateRegistryAuthPath'): And(os.path.exists, error=SCHEMA_PATH_ERROR % 'privateRegistryAuthPath')
},
Optional('master'): {
'replicas': setType('replicas', int),
'command': setType('command', str),
'gpuNum': setNumberRange('gpuNum', int, 0, 99999),
'cpuNum': setNumberRange('cpuNum', int, 0, 99999),
'memoryMB': setType('memoryMB', int),
'image': setType('image', str),
Optional('privateRegistryAuthPath'): And(os.path.exists, error=SCHEMA_PATH_ERROR % 'privateRegistryAuthPath')
},
Optional('worker'):{
'replicas': setType('replicas', int),
'command': setType('command', str),
'gpuNum': setNumberRange('gpuNum', int, 0, 99999),
'cpuNum': setNumberRange('cpuNum', int, 0, 99999),
'memoryMB': setType('memoryMB', int),
'image': setType('image', str),
Optional('privateRegistryAuthPath'): And(os.path.exists, error=SCHEMA_PATH_ERROR % 'privateRegistryAuthPath')
}
}
}
kubeflow_config_schema = {
'kubeflowConfig':Or({
'operator': setChoice('operator', 'tf-operator', 'pytorch-operator'),
'apiVersion': setType('apiVersion', str),
Optional('storage'): setChoice('storage', 'nfs', 'azureStorage'),
'nfs': {
'server': setType('server', str),
'path': setType('path', str)
}
}, {
'operator': setChoice('operator', 'tf-operator', 'pytorch-operator'),
'apiVersion': setType('apiVersion', str),
Optional('storage'): setChoice('storage', 'nfs', 'azureStorage'),
'keyVault': {
'vaultName': And(Regex('([0-9]|[a-z]|[A-Z]|-){1,127}'),\
error='ERROR: vaultName format error, vaultName support using (0-9|a-z|A-Z|-)'),
'name': And(Regex('([0-9]|[a-z]|[A-Z]|-){1,127}'),\
error='ERROR: name format error, name support using (0-9|a-z|A-Z|-)')
},
'azureStorage': {
'accountName': And(Regex('([0-9]|[a-z]|[A-Z]|-){3,31}'),\
error='ERROR: accountName format error, accountName support using (0-9|a-z|A-Z|-)'),
'azureShare': And(Regex('([0-9]|[a-z]|[A-Z]|-){3,63}'),\
error='ERROR: azureShare format error, azureShare support using (0-9|a-z|A-Z|-)')
},
Optional('uploadRetryCount'): setNumberRange('uploadRetryCount', int, 1, 99999)
})
}
frameworkcontroller_trial_schema = {
'trial':{
'codeDir': setPathCheck('codeDir'),
'taskRoles': [{
'name': setType('name', str),
'taskNum': setType('taskNum', int),
'frameworkAttemptCompletionPolicy': {
'minFailedTaskCount': setType('minFailedTaskCount', int),
'minSucceededTaskCount': setType('minSucceededTaskCount', int),
},
'command': setType('command', str),
'gpuNum': setNumberRange('gpuNum', int, 0, 99999),
'cpuNum': setNumberRange('cpuNum', int, 0, 99999),
'memoryMB': setType('memoryMB', int),
'image': setType('image', str),
Optional('privateRegistryAuthPath'): And(os.path.exists, error=SCHEMA_PATH_ERROR % 'privateRegistryAuthPath')
}]
}
}
frameworkcontroller_config_schema = {
'frameworkcontrollerConfig':Or({
Optional('storage'): setChoice('storage', 'nfs', 'azureStorage'),
Optional('serviceAccountName'): setType('serviceAccountName', str),
'nfs': {
'server': setType('server', str),
'path': setType('path', str)
}
}, {
Optional('storage'): setChoice('storage', 'nfs', 'azureStorage'),
Optional('serviceAccountName'): setType('serviceAccountName', str),
'keyVault': {
'vaultName': And(Regex('([0-9]|[a-z]|[A-Z]|-){1,127}'),\
error='ERROR: vaultName format error, vaultName support using (0-9|a-z|A-Z|-)'),
'name': And(Regex('([0-9]|[a-z]|[A-Z]|-){1,127}'),\
error='ERROR: name format error, name support using (0-9|a-z|A-Z|-)')
},
'azureStorage': {
'accountName': And(Regex('([0-9]|[a-z]|[A-Z]|-){3,31}'),\
error='ERROR: accountName format error, accountName support using (0-9|a-z|A-Z|-)'),
'azureShare': And(Regex('([0-9]|[a-z]|[A-Z]|-){3,63}'),\
error='ERROR: azureShare format error, azureShare support using (0-9|a-z|A-Z|-)')
},
Optional('uploadRetryCount'): setNumberRange('uploadRetryCount', int, 1, 99999)
})
}
machine_list_schema = {
Optional('machineList'):[Or({
'ip': setType('ip', str),
Optional('port'): setNumberRange('port', int, 1, 65535),
'username': setType('username', str),
'passwd': setType('passwd', str),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
Optional('maxTrialNumPerGpu'): setType('maxTrialNumPerGpu', int),
Optional('useActiveGpu'): setType('useActiveGpu', bool)
}, {
'ip': setType('ip', str),
Optional('port'): setNumberRange('port', int, 1, 65535),
'username': setType('username', str),
'sshKeyPath': setPathCheck('sshKeyPath'),
Optional('passphrase'): setType('passphrase', str),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
Optional('maxTrialNumPerGpu'): setType('maxTrialNumPerGpu', int),
Optional('useActiveGpu'): setType('useActiveGpu', bool)
})]
}
LOCAL_CONFIG_SCHEMA = Schema({**common_schema, **common_trial_schema})
REMOTE_CONFIG_SCHEMA = Schema({**common_schema, **common_trial_schema, **machine_list_schema})
PAI_CONFIG_SCHEMA = Schema({**common_schema, **pai_trial_schema, **pai_config_schema})
KUBEFLOW_CONFIG_SCHEMA = Schema({**common_schema, **kubeflow_trial_schema, **kubeflow_config_schema})
FRAMEWORKCONTROLLER_CONFIG_SCHEMA = Schema({**common_schema, **frameworkcontroller_trial_schema, **frameworkcontroller_config_schema})
| 50.283582 | 137 | 0.597754 |
import os
from schema import Schema, And, Optional, Regex, Or
from .constants import SCHEMA_TYPE_ERROR, SCHEMA_RANGE_ERROR, SCHEMA_PATH_ERROR
def setType(key, valueType):
return And(valueType, error=SCHEMA_TYPE_ERROR % (key, valueType.__name__))
def setChoice(key, *args):
return And(lambda n: n in args, error=SCHEMA_RANGE_ERROR % (key, str(args)))
def setNumberRange(key, keyType, start, end):
return And(
And(keyType, error=SCHEMA_TYPE_ERROR % (key, keyType.__name__)),
And(lambda n: start <= n <= end, error=SCHEMA_RANGE_ERROR % (key, '(%s,%s)' % (start, end))),
)
def setPathCheck(key):
return And(os.path.exists, error=SCHEMA_PATH_ERROR % key)
common_schema = {
'authorName': setType('authorName', str),
'experimentName': setType('experimentName', str),
Optional('description'): setType('description', str),
'trialConcurrency': setNumberRange('trialConcurrency', int, 1, 99999),
Optional('maxExecDuration'): And(Regex(r'^[1-9][0-9]*[s|m|h|d]$', error='ERROR: maxExecDuration format is [digit]{s,m,h,d}')),
Optional('maxTrialNum'): setNumberRange('maxTrialNum', int, 1, 99999),
'trainingServicePlatform': setChoice('trainingServicePlatform', 'remote', 'local', 'pai', 'kubeflow', 'frameworkcontroller'),
Optional('searchSpacePath'): And(os.path.exists, error=SCHEMA_PATH_ERROR % 'searchSpacePath'),
Optional('multiPhase'): setType('multiPhase', bool),
Optional('multiThread'): setType('multiThread', bool),
Optional('nniManagerIp'): setType('nniManagerIp', str),
Optional('logDir'): And(os.path.isdir, error=SCHEMA_PATH_ERROR % 'logDir'),
Optional('debug'): setType('debug', bool),
Optional('versionCheck'): setType('versionCheck', bool),
Optional('logLevel'): setChoice('logLevel', 'trace', 'debug', 'info', 'warning', 'error', 'fatal'),
Optional('logCollection'): setChoice('logCollection', 'http', 'none'),
'useAnnotation': setType('useAnnotation', bool),
Optional('tuner'): dict,
Optional('advisor'): dict,
Optional('assessor'): dict,
Optional('localConfig'): {
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
Optional('maxTrialNumPerGpu'): setType('maxTrialNumPerGpu', int),
Optional('useActiveGpu'): setType('useActiveGpu', bool)
}
}
tuner_schema_dict = {
('Anneal', 'SMAC'): {
'builtinTunerName': setChoice('builtinTunerName', 'Anneal', 'SMAC'),
Optional('classArgs'): {
'optimize_mode': setChoice('optimize_mode', 'maximize', 'minimize'),
},
Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
('Evolution'): {
'builtinTunerName': setChoice('builtinTunerName', 'Evolution'),
Optional('classArgs'): {
'optimize_mode': setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('population_size'): setNumberRange('population_size', int, 0, 99999),
},
Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
('BatchTuner', 'GridSearch', 'Random'): {
'builtinTunerName': setChoice('builtinTunerName', 'BatchTuner', 'GridSearch', 'Random'),
Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
'TPE': {
'builtinTunerName': 'TPE',
Optional('classArgs'): {
Optional('optimize_mode'): setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('parallel_optimize'): setType('parallel_optimize', bool),
Optional('constant_liar_type'): setChoice('constant_liar_type', 'min', 'max', 'mean')
},
Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
'NetworkMorphism': {
'builtinTunerName': 'NetworkMorphism',
Optional('classArgs'): {
Optional('optimize_mode'): setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('task'): setChoice('task', 'cv', 'nlp', 'common'),
Optional('input_width'): setType('input_width', int),
Optional('input_channel'): setType('input_channel', int),
Optional('n_output_node'): setType('n_output_node', int),
},
Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
'MetisTuner': {
'builtinTunerName': 'MetisTuner',
Optional('classArgs'): {
Optional('optimize_mode'): setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('no_resampling'): setType('no_resampling', bool),
Optional('no_candidates'): setType('no_candidates', bool),
Optional('selection_num_starting_points'): setType('selection_num_starting_points', int),
Optional('cold_start_num'): setType('cold_start_num', int),
},
Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
'GPTuner': {
'builtinTunerName': 'GPTuner',
Optional('classArgs'): {
Optional('optimize_mode'): setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('utility'): setChoice('utility', 'ei', 'ucb', 'poi'),
Optional('kappa'): setType('kappa', float),
Optional('xi'): setType('xi', float),
Optional('nu'): setType('nu', float),
Optional('alpha'): setType('alpha', float),
Optional('cold_start_num'): setType('cold_start_num', int),
Optional('selection_num_warm_up'): setType('selection_num_warm_up', int),
Optional('selection_num_starting_points'): setType('selection_num_starting_points', int),
},
Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
'PPOTuner': {
'builtinTunerName': 'PPOTuner',
'classArgs': {
'optimize_mode': setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('trials_per_update'): setNumberRange('trials_per_update', int, 0, 99999),
Optional('epochs_per_update'): setNumberRange('epochs_per_update', int, 0, 99999),
Optional('minibatch_size'): setNumberRange('minibatch_size', int, 0, 99999),
Optional('ent_coef'): setType('ent_coef', float),
Optional('lr'): setType('lr', float),
Optional('vf_coef'): setType('vf_coef', float),
Optional('max_grad_norm'): setType('max_grad_norm', float),
Optional('gamma'): setType('gamma', float),
Optional('lam'): setType('lam', float),
Optional('cliprange'): setType('cliprange', float),
},
Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
'customized': {
'codeDir': setPathCheck('codeDir'),
'classFileName': setType('classFileName', str),
'className': setType('className', str),
Optional('classArgs'): dict,
Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
}
}
advisor_schema_dict = {
'Hyperband':{
'builtinAdvisorName': Or('Hyperband'),
'classArgs': {
'optimize_mode': setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('R'): setType('R', int),
Optional('eta'): setType('eta', int)
},
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
'BOHB':{
'builtinAdvisorName': Or('BOHB'),
'classArgs': {
'optimize_mode': setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('min_budget'): setNumberRange('min_budget', int, 0, 9999),
Optional('max_budget'): setNumberRange('max_budget', int, 0, 9999),
Optional('eta'):setNumberRange('eta', int, 0, 9999),
Optional('min_points_in_model'): setNumberRange('min_points_in_model', int, 0, 9999),
Optional('top_n_percent'): setNumberRange('top_n_percent', int, 1, 99),
Optional('num_samples'): setNumberRange('num_samples', int, 1, 9999),
Optional('random_fraction'): setNumberRange('random_fraction', float, 0, 9999),
Optional('bandwidth_factor'): setNumberRange('bandwidth_factor', float, 0, 9999),
Optional('min_bandwidth'): setNumberRange('min_bandwidth', float, 0, 9999),
},
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
},
'customized':{
'codeDir': setPathCheck('codeDir'),
'classFileName': setType('classFileName', str),
'className': setType('className', str),
Optional('classArgs'): dict,
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
}
}
assessor_schema_dict = {
'Medianstop': {
'builtinAssessorName': 'Medianstop',
Optional('classArgs'): {
Optional('optimize_mode'): setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('start_step'): setNumberRange('start_step', int, 0, 9999),
},
},
'Curvefitting': {
'builtinAssessorName': 'Curvefitting',
Optional('classArgs'): {
'epoch_num': setNumberRange('epoch_num', int, 0, 9999),
Optional('optimize_mode'): setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('start_step'): setNumberRange('start_step', int, 0, 9999),
Optional('threshold'): setNumberRange('threshold', float, 0, 9999),
Optional('gap'): setNumberRange('gap', int, 1, 9999),
},
},
'customized': {
'codeDir': setPathCheck('codeDir'),
'classFileName': setType('classFileName', str),
'className': setType('className', str),
Optional('classArgs'): dict,
}
}
common_trial_schema = {
'trial':{
'command': setType('command', str),
'codeDir': setPathCheck('codeDir'),
Optional('gpuNum'): setNumberRange('gpuNum', int, 0, 99999),
Optional('nasMode'): setChoice('nasMode', 'classic_mode', 'enas_mode', 'oneshot_mode', 'darts_mode')
}
}
pai_trial_schema = {
'trial':{
'command': setType('command', str),
'codeDir': setPathCheck('codeDir'),
'gpuNum': setNumberRange('gpuNum', int, 0, 99999),
'cpuNum': setNumberRange('cpuNum', int, 0, 99999),
'memoryMB': setType('memoryMB', int),
'image': setType('image', str),
Optional('authFile'): And(os.path.exists, error=SCHEMA_PATH_ERROR % 'authFile'),
Optional('shmMB'): setType('shmMB', int),
Optional('dataDir'): And(Regex(r'hdfs://(([0-9]{1,3}.){3}[0-9]{1,3})(:[0-9]{2,5})?(/.*)?'),\
error='ERROR: dataDir format error, dataDir format is hdfs://xxx.xxx.xxx.xxx:xxx'),
Optional('outputDir'): And(Regex(r'hdfs://(([0-9]{1,3}.){3}[0-9]{1,3})(:[0-9]{2,5})?(/.*)?'),\
error='ERROR: outputDir format error, outputDir format is hdfs://xxx.xxx.xxx.xxx:xxx'),
Optional('virtualCluster'): setType('virtualCluster', str),
Optional('nasMode'): setChoice('nasMode', 'classic_mode', 'enas_mode', 'oneshot_mode', 'darts_mode'),
Optional('portList'): [{
"label": setType('label', str),
"beginAt": setType('beginAt', int),
"portNumber": setType('portNumber', int)
}]
}
}
pai_config_schema = {
'paiConfig': Or({
'userName': setType('userName', str),
'passWord': setType('passWord', str),
'host': setType('host', str)
}, {
'userName': setType('userName', str),
'token': setType('token', str),
'host': setType('host', str)
})
}
kubeflow_trial_schema = {
'trial':{
'codeDir': setPathCheck('codeDir'),
Optional('nasMode'): setChoice('nasMode', 'classic_mode', 'enas_mode', 'oneshot_mode', 'darts_mode'),
Optional('ps'): {
'replicas': setType('replicas', int),
'command': setType('command', str),
'gpuNum': setNumberRange('gpuNum', int, 0, 99999),
'cpuNum': setNumberRange('cpuNum', int, 0, 99999),
'memoryMB': setType('memoryMB', int),
'image': setType('image', str),
Optional('privateRegistryAuthPath'): And(os.path.exists, error=SCHEMA_PATH_ERROR % 'privateRegistryAuthPath')
},
Optional('master'): {
'replicas': setType('replicas', int),
'command': setType('command', str),
'gpuNum': setNumberRange('gpuNum', int, 0, 99999),
'cpuNum': setNumberRange('cpuNum', int, 0, 99999),
'memoryMB': setType('memoryMB', int),
'image': setType('image', str),
Optional('privateRegistryAuthPath'): And(os.path.exists, error=SCHEMA_PATH_ERROR % 'privateRegistryAuthPath')
},
Optional('worker'):{
'replicas': setType('replicas', int),
'command': setType('command', str),
'gpuNum': setNumberRange('gpuNum', int, 0, 99999),
'cpuNum': setNumberRange('cpuNum', int, 0, 99999),
'memoryMB': setType('memoryMB', int),
'image': setType('image', str),
Optional('privateRegistryAuthPath'): And(os.path.exists, error=SCHEMA_PATH_ERROR % 'privateRegistryAuthPath')
}
}
}
kubeflow_config_schema = {
'kubeflowConfig':Or({
'operator': setChoice('operator', 'tf-operator', 'pytorch-operator'),
'apiVersion': setType('apiVersion', str),
Optional('storage'): setChoice('storage', 'nfs', 'azureStorage'),
'nfs': {
'server': setType('server', str),
'path': setType('path', str)
}
}, {
'operator': setChoice('operator', 'tf-operator', 'pytorch-operator'),
'apiVersion': setType('apiVersion', str),
Optional('storage'): setChoice('storage', 'nfs', 'azureStorage'),
'keyVault': {
'vaultName': And(Regex('([0-9]|[a-z]|[A-Z]|-){1,127}'),\
error='ERROR: vaultName format error, vaultName support using (0-9|a-z|A-Z|-)'),
'name': And(Regex('([0-9]|[a-z]|[A-Z]|-){1,127}'),\
error='ERROR: name format error, name support using (0-9|a-z|A-Z|-)')
},
'azureStorage': {
'accountName': And(Regex('([0-9]|[a-z]|[A-Z]|-){3,31}'),\
error='ERROR: accountName format error, accountName support using (0-9|a-z|A-Z|-)'),
'azureShare': And(Regex('([0-9]|[a-z]|[A-Z]|-){3,63}'),\
error='ERROR: azureShare format error, azureShare support using (0-9|a-z|A-Z|-)')
},
Optional('uploadRetryCount'): setNumberRange('uploadRetryCount', int, 1, 99999)
})
}
frameworkcontroller_trial_schema = {
'trial':{
'codeDir': setPathCheck('codeDir'),
'taskRoles': [{
'name': setType('name', str),
'taskNum': setType('taskNum', int),
'frameworkAttemptCompletionPolicy': {
'minFailedTaskCount': setType('minFailedTaskCount', int),
'minSucceededTaskCount': setType('minSucceededTaskCount', int),
},
'command': setType('command', str),
'gpuNum': setNumberRange('gpuNum', int, 0, 99999),
'cpuNum': setNumberRange('cpuNum', int, 0, 99999),
'memoryMB': setType('memoryMB', int),
'image': setType('image', str),
Optional('privateRegistryAuthPath'): And(os.path.exists, error=SCHEMA_PATH_ERROR % 'privateRegistryAuthPath')
}]
}
}
frameworkcontroller_config_schema = {
'frameworkcontrollerConfig':Or({
Optional('storage'): setChoice('storage', 'nfs', 'azureStorage'),
Optional('serviceAccountName'): setType('serviceAccountName', str),
'nfs': {
'server': setType('server', str),
'path': setType('path', str)
}
}, {
Optional('storage'): setChoice('storage', 'nfs', 'azureStorage'),
Optional('serviceAccountName'): setType('serviceAccountName', str),
'keyVault': {
'vaultName': And(Regex('([0-9]|[a-z]|[A-Z]|-){1,127}'),\
error='ERROR: vaultName format error, vaultName support using (0-9|a-z|A-Z|-)'),
'name': And(Regex('([0-9]|[a-z]|[A-Z]|-){1,127}'),\
error='ERROR: name format error, name support using (0-9|a-z|A-Z|-)')
},
'azureStorage': {
'accountName': And(Regex('([0-9]|[a-z]|[A-Z]|-){3,31}'),\
error='ERROR: accountName format error, accountName support using (0-9|a-z|A-Z|-)'),
'azureShare': And(Regex('([0-9]|[a-z]|[A-Z]|-){3,63}'),\
error='ERROR: azureShare format error, azureShare support using (0-9|a-z|A-Z|-)')
},
Optional('uploadRetryCount'): setNumberRange('uploadRetryCount', int, 1, 99999)
})
}
machine_list_schema = {
Optional('machineList'):[Or({
'ip': setType('ip', str),
Optional('port'): setNumberRange('port', int, 1, 65535),
'username': setType('username', str),
'passwd': setType('passwd', str),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
Optional('maxTrialNumPerGpu'): setType('maxTrialNumPerGpu', int),
Optional('useActiveGpu'): setType('useActiveGpu', bool)
}, {
'ip': setType('ip', str),
Optional('port'): setNumberRange('port', int, 1, 65535),
'username': setType('username', str),
'sshKeyPath': setPathCheck('sshKeyPath'),
Optional('passphrase'): setType('passphrase', str),
Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
Optional('maxTrialNumPerGpu'): setType('maxTrialNumPerGpu', int),
Optional('useActiveGpu'): setType('useActiveGpu', bool)
})]
}
LOCAL_CONFIG_SCHEMA = Schema({**common_schema, **common_trial_schema})
REMOTE_CONFIG_SCHEMA = Schema({**common_schema, **common_trial_schema, **machine_list_schema})
PAI_CONFIG_SCHEMA = Schema({**common_schema, **pai_trial_schema, **pai_config_schema})
KUBEFLOW_CONFIG_SCHEMA = Schema({**common_schema, **kubeflow_trial_schema, **kubeflow_config_schema})
FRAMEWORKCONTROLLER_CONFIG_SCHEMA = Schema({**common_schema, **frameworkcontroller_trial_schema, **frameworkcontroller_config_schema})
| true | true |
f715b265bb35aeb0434d5be280c4ded3ce5cd7ce | 4,313 | py | Python | src/camps/migrations/0001_initial.py | pwelzel/bornhack-website | af794e6a2fba06e09626259c7768feb30ff394be | [
"BSD-3-Clause"
] | null | null | null | src/camps/migrations/0001_initial.py | pwelzel/bornhack-website | af794e6a2fba06e09626259c7768feb30ff394be | [
"BSD-3-Clause"
] | null | null | null | src/camps/migrations/0001_initial.py | pwelzel/bornhack-website | af794e6a2fba06e09626259c7768feb30ff394be | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from django.db import models, migrations
import uuid
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Camp',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),
('name', models.CharField(max_length=255, help_text='Name of the camp, ie. Bornhack.', verbose_name='Name')),
('start', models.DateTimeField(help_text='When the camp starts.', unique=True, verbose_name='Start date')),
('end', models.DateTimeField(help_text='When the camp ends.', unique=True, verbose_name='End date')),
],
options={
'verbose_name_plural': 'Camps',
'verbose_name': 'Camp',
},
),
migrations.CreateModel(
name='Day',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),
('date', models.DateField(help_text='What date?', verbose_name='Date')),
('camp', models.ForeignKey(on_delete=models.PROTECT, to='camps.Camp', help_text='Which camp does this day belong to.', verbose_name='Camp')),
],
options={
'verbose_name_plural': 'Days',
'verbose_name': 'Day',
},
),
migrations.CreateModel(
name='Expense',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),
('description', models.CharField(max_length=255, help_text='What this expense covers.', verbose_name='Description')),
('amount', models.DecimalField(max_digits=7, help_text='The amount of the expense.', verbose_name='Amount', decimal_places=2)),
('currency', models.CharField(max_length=3, choices=[('btc', 'BTC'), ('dkk', 'DKK'), ('eur', 'EUR'), ('sek', 'SEK')], help_text='What currency the amount is in.', verbose_name='Currency')),
('camp', models.ForeignKey(on_delete=models.PROTECT, to='camps.Camp', help_text='The camp to which this expense relates to.', verbose_name='Camp')),
('covered_by', models.ForeignKey(on_delete=models.PROTECT, to=settings.AUTH_USER_MODEL, blank=True, help_text='Which user, if any, covered this expense.', verbose_name='Covered by', null=True)),
],
options={
'verbose_name_plural': 'Expenses',
'verbose_name': 'Expense',
},
),
migrations.CreateModel(
name='Signup',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),
('cost', models.DecimalField(default=1500.0, decimal_places=2, help_text='What the user should/is willing to pay for this signup.', verbose_name='Cost', max_digits=7)),
('paid', models.BooleanField(help_text='Whether the user has paid.', verbose_name='Paid?', default=False)),
('camp', models.ForeignKey(on_delete=models.PROTECT, to='camps.Camp', help_text='The camp that has been signed up for.', verbose_name='Camp')),
('user', models.ForeignKey(on_delete=models.PROTECT, to=settings.AUTH_USER_MODEL, help_text='The user that has signed up.', verbose_name='User')),
],
options={
'verbose_name_plural': 'Signups',
'verbose_name': 'Signup',
},
),
]
| 54.594937 | 210 | 0.592395 |
from django.db import models, migrations
import uuid
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Camp',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),
('name', models.CharField(max_length=255, help_text='Name of the camp, ie. Bornhack.', verbose_name='Name')),
('start', models.DateTimeField(help_text='When the camp starts.', unique=True, verbose_name='Start date')),
('end', models.DateTimeField(help_text='When the camp ends.', unique=True, verbose_name='End date')),
],
options={
'verbose_name_plural': 'Camps',
'verbose_name': 'Camp',
},
),
migrations.CreateModel(
name='Day',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),
('date', models.DateField(help_text='What date?', verbose_name='Date')),
('camp', models.ForeignKey(on_delete=models.PROTECT, to='camps.Camp', help_text='Which camp does this day belong to.', verbose_name='Camp')),
],
options={
'verbose_name_plural': 'Days',
'verbose_name': 'Day',
},
),
migrations.CreateModel(
name='Expense',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),
('description', models.CharField(max_length=255, help_text='What this expense covers.', verbose_name='Description')),
('amount', models.DecimalField(max_digits=7, help_text='The amount of the expense.', verbose_name='Amount', decimal_places=2)),
('currency', models.CharField(max_length=3, choices=[('btc', 'BTC'), ('dkk', 'DKK'), ('eur', 'EUR'), ('sek', 'SEK')], help_text='What currency the amount is in.', verbose_name='Currency')),
('camp', models.ForeignKey(on_delete=models.PROTECT, to='camps.Camp', help_text='The camp to which this expense relates to.', verbose_name='Camp')),
('covered_by', models.ForeignKey(on_delete=models.PROTECT, to=settings.AUTH_USER_MODEL, blank=True, help_text='Which user, if any, covered this expense.', verbose_name='Covered by', null=True)),
],
options={
'verbose_name_plural': 'Expenses',
'verbose_name': 'Expense',
},
),
migrations.CreateModel(
name='Signup',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),
('cost', models.DecimalField(default=1500.0, decimal_places=2, help_text='What the user should/is willing to pay for this signup.', verbose_name='Cost', max_digits=7)),
('paid', models.BooleanField(help_text='Whether the user has paid.', verbose_name='Paid?', default=False)),
('camp', models.ForeignKey(on_delete=models.PROTECT, to='camps.Camp', help_text='The camp that has been signed up for.', verbose_name='Camp')),
('user', models.ForeignKey(on_delete=models.PROTECT, to=settings.AUTH_USER_MODEL, help_text='The user that has signed up.', verbose_name='User')),
],
options={
'verbose_name_plural': 'Signups',
'verbose_name': 'Signup',
},
),
]
| true | true |
f715b282dadb18b6d8c46e9e216062f47e9fa8c4 | 2,215 | py | Python | tests/models/symbol/ddc_log_data_returned_test.py | NetApp/santricity-webapi-pythonsdk | 1d3df4a00561192f4cdcdd1890f4d27547ed2de2 | [
"BSD-3-Clause-Clear"
] | 5 | 2016-08-23T17:52:22.000Z | 2019-05-16T08:45:30.000Z | tests/models/symbol/ddc_log_data_returned_test.py | NetApp/santricity-webapi-pythonsdk | 1d3df4a00561192f4cdcdd1890f4d27547ed2de2 | [
"BSD-3-Clause-Clear"
] | 2 | 2016-11-10T05:30:21.000Z | 2019-04-05T15:03:37.000Z | tests/models/symbol/ddc_log_data_returned_test.py | NetApp/santricity-webapi-pythonsdk | 1d3df4a00561192f4cdcdd1890f4d27547ed2de2 | [
"BSD-3-Clause-Clear"
] | 7 | 2016-08-25T16:11:44.000Z | 2021-02-22T05:31:25.000Z | #!/usr/bin/env python
# coding: utf-8
"""
The Clear BSD License
Copyright (c) – 2016, NetApp, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of NetApp, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import unittest
from netapp.santricity.models.symbol.ddc_log_data_returned import DdcLogDataReturned
class DdcLogDataReturnedTest(unittest.TestCase):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
# Try instantiating the model
def test_ddc_log_data_returned(self):
ddc_log_data_returned_obj = DdcLogDataReturned()
self.assertNotEqual(ddc_log_data_returned_obj, None)
| 58.289474 | 845 | 0.776975 |
import unittest
from netapp.santricity.models.symbol.ddc_log_data_returned import DdcLogDataReturned
class DdcLogDataReturnedTest(unittest.TestCase):
def test_ddc_log_data_returned(self):
ddc_log_data_returned_obj = DdcLogDataReturned()
self.assertNotEqual(ddc_log_data_returned_obj, None)
| true | true |
f715b31b59adb44a6d805b23169c7a059551b417 | 4,970 | py | Python | ravager/bot/helpers/abort_upload_handler.py | CoolFool/Ravager | 3d647115689dc23a160255221aaa493f879406a5 | [
"MIT"
] | null | null | null | ravager/bot/helpers/abort_upload_handler.py | CoolFool/Ravager | 3d647115689dc23a160255221aaa493f879406a5 | [
"MIT"
] | 1 | 2022-03-15T06:55:48.000Z | 2022-03-15T15:38:20.000Z | ravager/bot/helpers/abort_upload_handler.py | CoolFool/Ravager | 3d647115689dc23a160255221aaa493f879406a5 | [
"MIT"
] | 2 | 2022-02-09T21:30:57.000Z | 2022-03-15T06:19:57.000Z | from ravager.services.google.helpers import uploader
from ravager.database.helpers.structs import OpsDataStruct
from ravager.database.tasks import Tasks
from ravager.celery_tasks.tasks import app
from ravager.services.aria.download import Download
from telegram.ext import CallbackQueryHandler
import logging
logger = logging.getLogger(__file__)
class AbortAndUpload:
def __init__(self):
pass
def callback_handler(self, update, context):
callback_data = update.callback_query.data
callback_data = callback_data.split("|")
method = callback_data[0]
action = callback_data[1]
src_msg_id = callback_data[2]
task = OpsDataStruct()
task.source_msg_id = src_msg_id
task = Tasks(task=task).get_task()
if method == "upload" and action == "no":
update.callback_query.edit_message_text(text="Uploading cancelled")
if method == "upload" and action == "yes":
upload_msg = update.callback_query.edit_message_text(text="Starting upload")
uploader.upload_file(task, upload_msg)
if method == "abort" and action == "yes":
src_msg_id = callback_data[2]
abort_msg = update.callback_query.edit_message_text(text="Trying to abort transfer")
abort_msg_id = abort_msg.message_id
abort_task = self.abort_task(update, context, task, abort_msg_id)
if method == "abort" and action == "no":
update.callback_query.edit_message_text(text="Transfer allowed to process as per request")
@staticmethod
def abort_task(update, context, task, abort_msg_id):
msg_sent = False
try:
# update celery task id in db for uploads cause manual upload when completed will use old task id
download = Download()
celery_task_id = task.task_id
user_id = task.user_id
gid = task.gid
source_msg_id = task.source_msg_id
revoke_task = app.control.revoke(celery_task_id, terminate=True, signal="SIGKILL")
aria_stop_download = download.remove(gid)
logger.info(aria_stop_download)
if aria_stop_download:
# context.bot.delete_message(chat_id=user_id,message_id=latest_message_id)
context.bot.send_message(chat_id=user_id, text="Task aborted successfully",
reply_to_message_id=source_msg_id)
task.status = "aborted"
Tasks(task=task).set_task()
context.bot.delete_message(chat_id=user_id, message_id=abort_msg_id)
msg_sent = True
return task
else:
context.bot.delete_message(chat_id=user_id, message_id=abort_msg_id)
context.bot.send_message(chat_id=user_id, text="Failed to abort task",
reply_to_message_id=source_msg_id)
msg_sent = True
logger.error("Failed to abort task", task)
return
except Exception as e:
logger.error(e)
if str(e) == "GID {} is not found".format(gid):
context.bot.delete_message(chat_id=user_id, message_id=abort_msg_id)
context.bot.send_message(chat_id=user_id,
text="Task probably aborted,check if ongoing transfer msg updates",
reply_to_message_id=source_msg_id)
msg_sent = True
logger.error("Task probably aborted", task)
return
if str(e) == "No such download for GID#{}".format(gid):
context.bot.delete_message(chat_id=user_id, message_id=abort_msg_id)
context.bot.send_message(chat_id=user_id,
text="Task probably aborted,check if ongoing transfer msg updates",
reply_to_message_id=source_msg_id)
msg_sent = True
logger.error("Task probably aborted", task)
return
context.bot.send_message(chat_id=user_id, text="Failed to abort task", reply_to_message_id=source_msg_id)
msg_sent = True
logger.error("Failed to abort task", task)
return
finally:
if not msg_sent:
context.bot.send_message(chat_id=user_id, text="Failed to abort task",
reply_to_message_id=source_msg_id)
logger.error("Failed to abort task", task)
return
def upload_callback_handler(self):
abort_callback = CallbackQueryHandler(self.callback_handler, pattern="upload")
return abort_callback
def abort_callback_handler(self):
abort_callback = CallbackQueryHandler(self.callback_handler, pattern="abort")
return abort_callback
| 46.886792 | 117 | 0.61328 | from ravager.services.google.helpers import uploader
from ravager.database.helpers.structs import OpsDataStruct
from ravager.database.tasks import Tasks
from ravager.celery_tasks.tasks import app
from ravager.services.aria.download import Download
from telegram.ext import CallbackQueryHandler
import logging
logger = logging.getLogger(__file__)
class AbortAndUpload:
def __init__(self):
pass
def callback_handler(self, update, context):
callback_data = update.callback_query.data
callback_data = callback_data.split("|")
method = callback_data[0]
action = callback_data[1]
src_msg_id = callback_data[2]
task = OpsDataStruct()
task.source_msg_id = src_msg_id
task = Tasks(task=task).get_task()
if method == "upload" and action == "no":
update.callback_query.edit_message_text(text="Uploading cancelled")
if method == "upload" and action == "yes":
upload_msg = update.callback_query.edit_message_text(text="Starting upload")
uploader.upload_file(task, upload_msg)
if method == "abort" and action == "yes":
src_msg_id = callback_data[2]
abort_msg = update.callback_query.edit_message_text(text="Trying to abort transfer")
abort_msg_id = abort_msg.message_id
abort_task = self.abort_task(update, context, task, abort_msg_id)
if method == "abort" and action == "no":
update.callback_query.edit_message_text(text="Transfer allowed to process as per request")
@staticmethod
def abort_task(update, context, task, abort_msg_id):
msg_sent = False
try:
download = Download()
celery_task_id = task.task_id
user_id = task.user_id
gid = task.gid
source_msg_id = task.source_msg_id
revoke_task = app.control.revoke(celery_task_id, terminate=True, signal="SIGKILL")
aria_stop_download = download.remove(gid)
logger.info(aria_stop_download)
if aria_stop_download:
context.bot.send_message(chat_id=user_id, text="Task aborted successfully",
reply_to_message_id=source_msg_id)
task.status = "aborted"
Tasks(task=task).set_task()
context.bot.delete_message(chat_id=user_id, message_id=abort_msg_id)
msg_sent = True
return task
else:
context.bot.delete_message(chat_id=user_id, message_id=abort_msg_id)
context.bot.send_message(chat_id=user_id, text="Failed to abort task",
reply_to_message_id=source_msg_id)
msg_sent = True
logger.error("Failed to abort task", task)
return
except Exception as e:
logger.error(e)
if str(e) == "GID {} is not found".format(gid):
context.bot.delete_message(chat_id=user_id, message_id=abort_msg_id)
context.bot.send_message(chat_id=user_id,
text="Task probably aborted,check if ongoing transfer msg updates",
reply_to_message_id=source_msg_id)
msg_sent = True
logger.error("Task probably aborted", task)
return
if str(e) == "No such download for GID#{}".format(gid):
context.bot.delete_message(chat_id=user_id, message_id=abort_msg_id)
context.bot.send_message(chat_id=user_id,
text="Task probably aborted,check if ongoing transfer msg updates",
reply_to_message_id=source_msg_id)
msg_sent = True
logger.error("Task probably aborted", task)
return
context.bot.send_message(chat_id=user_id, text="Failed to abort task", reply_to_message_id=source_msg_id)
msg_sent = True
logger.error("Failed to abort task", task)
return
finally:
if not msg_sent:
context.bot.send_message(chat_id=user_id, text="Failed to abort task",
reply_to_message_id=source_msg_id)
logger.error("Failed to abort task", task)
return
def upload_callback_handler(self):
abort_callback = CallbackQueryHandler(self.callback_handler, pattern="upload")
return abort_callback
def abort_callback_handler(self):
abort_callback = CallbackQueryHandler(self.callback_handler, pattern="abort")
return abort_callback
| true | true |
f715b360e88e246929f30fa6b56a22448fc5ee17 | 228,126 | py | Python | cinder/volume/manager.py | sapcc/cinder | 9444ae7d2c7cfe2c277ff661ec9ef27a4f013f91 | [
"Apache-2.0"
] | null | null | null | cinder/volume/manager.py | sapcc/cinder | 9444ae7d2c7cfe2c277ff661ec9ef27a4f013f91 | [
"Apache-2.0"
] | 28 | 2017-08-17T14:46:05.000Z | 2022-03-29T12:42:12.000Z | cinder/volume/manager.py | sapcc/cinder | 9444ae7d2c7cfe2c277ff661ec9ef27a4f013f91 | [
"Apache-2.0"
] | 3 | 2017-04-27T16:11:40.000Z | 2020-02-12T21:27:00.000Z | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume manager manages creating, attaching, detaching, and persistent storage.
Persistent storage volumes keep their state independent of instances. You can
attach to an instance, terminate the instance, spawn a new instance (even
one from a different image) and re-attach the volume with the same data
intact.
**Related Flags**
:volume_manager: The module name of a class derived from
:class:`manager.Manager` (default:
:class:`cinder.volume.manager.Manager`).
:volume_driver: Used by :class:`Manager`. Defaults to
:class:`cinder.volume.drivers.lvm.LVMVolumeDriver`.
:volume_group: Name of the group that will contain exported volumes (default:
`cinder-volumes`)
:num_shell_tries: Number of times to attempt to run commands (default: 3)
"""
import requests
import time
from castellan import key_manager
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_service import periodic_task
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import units
from oslo_utils import uuidutils
profiler = importutils.try_import('osprofiler.profiler')
import six
from taskflow import exceptions as tfe
from cinder.backup import rpcapi as backup_rpcapi
from cinder.common import constants
from cinder import compute
from cinder import context
from cinder import coordination
from cinder import db
from cinder import exception
from cinder import flow_utils
from cinder.i18n import _
from cinder.image import cache as image_cache
from cinder.image import glance
from cinder.image import image_utils
from cinder.keymgr import migration as key_migration
from cinder import manager
from cinder.message import api as message_api
from cinder.message import message_field
from cinder import objects
from cinder.objects import cgsnapshot
from cinder.objects import consistencygroup
from cinder.objects import fields
from cinder import quota
from cinder import utils
from cinder import volume as cinder_volume
from cinder.volume import configuration as config
from cinder.volume.flows.manager import create_volume
from cinder.volume.flows.manager import manage_existing
from cinder.volume.flows.manager import manage_existing_snapshot
from cinder.volume import group_types
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import volume_migration
from cinder.volume import volume_types
from cinder.volume import volume_utils
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
GROUP_QUOTAS = quota.GROUP_QUOTAS
VALID_REMOVE_VOL_FROM_GROUP_STATUS = (
'available',
'in-use',
'error',
'error_deleting')
VALID_ADD_VOL_TO_GROUP_STATUS = (
'available',
'in-use')
VALID_CREATE_GROUP_SRC_SNAP_STATUS = (fields.SnapshotStatus.AVAILABLE,)
VALID_CREATE_GROUP_SRC_GROUP_STATUS = ('available',)
VA_LIST = objects.VolumeAttachmentList
volume_manager_opts = [
cfg.IntOpt('migration_create_volume_timeout_secs',
default=300,
help='Timeout for creating the volume to migrate to '
'when performing volume migration (seconds)'),
cfg.BoolOpt('volume_service_inithost_offload',
default=False,
help='Offload pending volume delete during '
'volume service startup'),
cfg.StrOpt('zoning_mode',
help="FC Zoning mode configured, only 'fabric' is "
"supported now."),
cfg.IntOpt('reinit_driver_count',
default=3,
help='Maximum times to reintialize the driver '
'if volume initialization fails. The interval of retry is '
'exponentially backoff, and will be 1s, 2s, 4s etc.'),
cfg.IntOpt('init_host_max_objects_retrieval',
default=0,
help='Max number of volumes and snapshots to be retrieved '
'per batch during volume manager host initialization. '
'Query results will be obtained in batches from the '
'database and not in one shot to avoid extreme memory '
'usage. Set 0 to turn off this functionality.'),
cfg.IntOpt('backend_stats_polling_interval',
default=60,
min=3,
help='Time in seconds between requests for usage statistics '
'from the backend. Be aware that generating usage '
'statistics is expensive for some backends, so setting '
'this value too low may adversely affect performance.'),
]
volume_backend_opts = [
cfg.StrOpt('volume_driver',
default='cinder.volume.drivers.lvm.LVMVolumeDriver',
help='Driver to use for volume creation'),
cfg.StrOpt('extra_capabilities',
default='{}',
help='User defined capabilities, a JSON formatted string '
'specifying key/value pairs. The key/value pairs can '
'be used by the CapabilitiesFilter to select between '
'backends when requests specify volume types. For '
'example, specifying a service level or the geographical '
'location of a backend, then creating a volume type to '
'allow the user to select by these different '
'properties.'),
cfg.BoolOpt('suppress_requests_ssl_warnings',
default=False,
help='Suppress requests library SSL certificate warnings.'),
cfg.IntOpt('backend_native_threads_pool_size',
default=20,
min=20,
help='Size of the native threads pool for the backend. '
'Increase for backends that heavily rely on this, like '
'the RBD driver.'),
]
CONF = cfg.CONF
CONF.register_opts(volume_manager_opts)
CONF.register_opts(volume_backend_opts, group=config.SHARED_CONF_GROUP)
# MAPPING is used for driver renames to keep backwards compatibilty. When a
# driver is renamed, add a mapping here from the old name (the dict key) to the
# new name (the dict value) for at least a cycle to allow time for deployments
# to transition.
MAPPING = {
'cinder.volume.drivers.dell_emc.vmax.iscsi.VMAXISCSIDriver':
'cinder.volume.drivers.dell_emc.powermax.iscsi.PowerMaxISCSIDriver',
'cinder.volume.drivers.dell_emc.vmax.fc.VMAXFCDriver':
'cinder.volume.drivers.dell_emc.powermax.fc.PowerMaxFCDriver',
'cinder.volume.drivers.fujitsu.eternus_dx_fc.FJDXFCDriver':
'cinder.volume.drivers.fujitsu.eternus_dx.eternus_dx_fc.FJDXFCDriver',
'cinder.volume.drivers.fujitsu.eternus_dx_iscsi.FJDXISCSIDriver':
'cinder.volume.drivers.fujitsu.eternus_dx.eternus_dx_iscsi.'
'FJDXISCSIDriver',
'cinder.volume.drivers.dell_emc.scaleio.driver.ScaleIODriver':
'cinder.volume.drivers.dell_emc.vxflexos.driver.VxFlexOSDriver',
}
class VolumeManager(manager.CleanableManager,
manager.SchedulerDependentManager):
"""Manages attachable block storage devices."""
RPC_API_VERSION = volume_rpcapi.VolumeAPI.RPC_API_VERSION
FAILBACK_SENTINEL = 'default'
target = messaging.Target(version=RPC_API_VERSION)
# On cloning a volume, we shouldn't copy volume_type, consistencygroup
# and volume_attachment, because the db sets that according to [field]_id,
# which we do copy. We also skip some other values that are set during
# creation of Volume object.
_VOLUME_CLONE_SKIP_PROPERTIES = {
'id', '_name_id', 'name_id', 'name', 'status',
'attach_status', 'migration_status', 'volume_type',
'consistencygroup', 'volume_attachment', 'group'}
def _get_service(self, host=None, binary=constants.VOLUME_BINARY):
host = host or self.host
ctxt = context.get_admin_context()
svc_host = volume_utils.extract_host(host, 'backend')
return objects.Service.get_by_args(ctxt, svc_host, binary)
def __init__(self, volume_driver=None, service_name=None,
*args, **kwargs):
"""Load the driver from the one specified in args, or from flags."""
# update_service_capabilities needs service_name to be volume
super(VolumeManager, self).__init__(service_name='volume',
*args, **kwargs)
# NOTE(dulek): service_name=None means we're running in unit tests.
service_name = service_name or 'backend_defaults'
self.configuration = config.Configuration(volume_backend_opts,
config_group=service_name)
self._set_tpool_size(
self.configuration.backend_native_threads_pool_size)
self.stats = {}
self.service_uuid = None
if not volume_driver:
# Get from configuration, which will get the default
# if its not using the multi backend
volume_driver = self.configuration.volume_driver
if volume_driver in MAPPING:
LOG.warning("Driver path %s is deprecated, update your "
"configuration to the new path.", volume_driver)
volume_driver = MAPPING[volume_driver]
vol_db_empty = self._set_voldb_empty_at_startup_indicator(
context.get_admin_context())
LOG.debug("Cinder Volume DB check: vol_db_empty=%s", vol_db_empty)
# We pass the current setting for service.active_backend_id to
# the driver on init, in case there was a restart or something
curr_active_backend_id = None
try:
service = self._get_service()
except exception.ServiceNotFound:
# NOTE(jdg): This is to solve problems with unit tests
LOG.info("Service not found for updating "
"active_backend_id, assuming default "
"for driver init.")
else:
curr_active_backend_id = service.active_backend_id
self.service_uuid = service.uuid
if self.configuration.suppress_requests_ssl_warnings:
LOG.warning("Suppressing requests library SSL Warnings")
requests.packages.urllib3.disable_warnings(
requests.packages.urllib3.exceptions.InsecureRequestWarning)
requests.packages.urllib3.disable_warnings(
requests.packages.urllib3.exceptions.InsecurePlatformWarning)
self.key_manager = key_manager.API(CONF)
# A driver can feed additional RPC endpoints into this list
driver_additional_endpoints = []
self.driver = importutils.import_object(
volume_driver,
configuration=self.configuration,
db=self.db,
host=self.host,
cluster_name=self.cluster,
is_vol_db_empty=vol_db_empty,
active_backend_id=curr_active_backend_id,
additional_endpoints=driver_additional_endpoints)
self.additional_endpoints.extend(driver_additional_endpoints)
if self.cluster and not self.driver.SUPPORTS_ACTIVE_ACTIVE:
msg = _('Active-Active configuration is not currently supported '
'by driver %s.') % volume_driver
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
self.message_api = message_api.API()
if CONF.profiler.enabled and profiler is not None:
self.driver = profiler.trace_cls("driver")(self.driver)
try:
self.extra_capabilities = jsonutils.loads(
self.driver.configuration.extra_capabilities)
except AttributeError:
self.extra_capabilities = {}
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Invalid JSON: %s",
self.driver.configuration.extra_capabilities)
# Check if a per-backend AZ has been specified
backend_zone = self.driver.configuration.safe_get(
'backend_availability_zone')
if backend_zone:
self.availability_zone = backend_zone
if self.driver.configuration.safe_get(
'image_volume_cache_enabled'):
max_cache_size = self.driver.configuration.safe_get(
'image_volume_cache_max_size_gb')
max_cache_entries = self.driver.configuration.safe_get(
'image_volume_cache_max_count')
self.image_volume_cache = image_cache.ImageVolumeCache(
self.db,
cinder_volume.API(),
max_cache_size,
max_cache_entries
)
LOG.info('Image-volume cache enabled for host %(host)s.',
{'host': self.host})
else:
LOG.info('Image-volume cache disabled for host %(host)s.',
{'host': self.host})
self.image_volume_cache = None
def _count_allocated_capacity(self, ctxt, volume):
pool = volume_utils.extract_host(volume['host'], 'pool')
if pool is None:
# No pool name encoded in host, so this is a legacy
# volume created before pool is introduced, ask
# driver to provide pool info if it has such
# knowledge and update the DB.
try:
pool = self.driver.get_pool(volume)
except Exception:
LOG.exception('Fetch volume pool name failed.',
resource=volume)
return
if pool:
new_host = volume_utils.append_host(volume['host'],
pool)
self.db.volume_update(ctxt, volume['id'],
{'host': new_host})
else:
# Otherwise, put them into a special fixed pool with
# volume_backend_name being the pool name, if
# volume_backend_name is None, use default pool name.
# This is only for counting purpose, doesn't update DB.
pool = (self.driver.configuration.safe_get(
'volume_backend_name') or volume_utils.extract_host(
volume['host'], 'pool', True))
try:
pool_stat = self.stats['pools'][pool]
except KeyError:
# First volume in the pool
self.stats['pools'][pool] = dict(
allocated_capacity_gb=0)
pool_stat = self.stats['pools'][pool]
pool_sum = pool_stat['allocated_capacity_gb']
pool_sum += volume['size']
self.stats['pools'][pool]['allocated_capacity_gb'] = pool_sum
self.stats['allocated_capacity_gb'] += volume['size']
def _set_voldb_empty_at_startup_indicator(self, ctxt):
"""Determine if the Cinder volume DB is empty.
A check of the volume DB is done to determine whether it is empty or
not at this point.
:param ctxt: our working context
"""
vol_entries = self.db.volume_get_all(ctxt, None, 1, filters=None)
if len(vol_entries) == 0:
LOG.info("Determined volume DB was empty at startup.")
return True
else:
LOG.info("Determined volume DB was not empty at startup.")
return False
def _sync_provider_info(self, ctxt, volumes, snapshots):
# NOTE(jdg): For now this just updates provider_id, we can add more
# items to the update if they're relevant but we need to be safe in
# what we allow and add a list of allowed keys. Things that make sense
# are provider_*, replication_status etc
updates, snapshot_updates = self.driver.update_provider_info(
volumes, snapshots)
if updates:
for volume in volumes:
# NOTE(JDG): Make sure returned item is in this hosts volumes
update = (
[updt for updt in updates if updt['id'] ==
volume['id']])
if update:
update = update[0]
self.db.volume_update(
ctxt,
update['id'],
{'provider_id': update['provider_id']})
if snapshot_updates:
for snap in snapshots:
# NOTE(jdg): For now we only update those that have no entry
if not snap.get('provider_id', None):
update = (
[updt for updt in snapshot_updates if updt['id'] ==
snap['id']][0])
if update:
self.db.snapshot_update(
ctxt,
update['id'],
{'provider_id': update['provider_id']})
def _include_resources_in_cluster(self, ctxt):
LOG.info('Including all resources from host %(host)s in cluster '
'%(cluster)s.',
{'host': self.host, 'cluster': self.cluster})
num_vols = objects.VolumeList.include_in_cluster(
ctxt, self.cluster, host=self.host)
num_cgs = objects.ConsistencyGroupList.include_in_cluster(
ctxt, self.cluster, host=self.host)
num_gs = objects.GroupList.include_in_cluster(
ctxt, self.cluster, host=self.host)
num_cache = db.image_volume_cache_include_in_cluster(
ctxt, self.cluster, host=self.host)
LOG.info('%(num_vols)s volumes, %(num_cgs)s consistency groups, '
'%(num_gs)s generic groups and %(num_cache)s image '
'volume caches from host %(host)s have been included in '
'cluster %(cluster)s.',
{'num_vols': num_vols, 'num_cgs': num_cgs, 'num_gs': num_gs,
'host': self.host, 'cluster': self.cluster,
'num_cache': num_cache})
def init_host(self, added_to_cluster=None, **kwargs):
"""Perform any required initialization."""
if not self.driver.supported:
utils.log_unsupported_driver_warning(self.driver)
if not self.configuration.enable_unsupported_driver:
LOG.error("Unsupported drivers are disabled."
" You can re-enable by adding "
"enable_unsupported_driver=True to the "
"driver section in cinder.conf",
resource={'type': 'driver',
'id': self.__class__.__name__})
return
self._init_host(added_to_cluster, **kwargs)
if not self.driver.initialized:
reinit_count = 0
while reinit_count < CONF.reinit_driver_count:
time.sleep(2 ** reinit_count)
self._init_host(added_to_cluster, **kwargs)
if self.driver.initialized:
return
reinit_count += 1
def _init_host(self, added_to_cluster=None, **kwargs):
ctxt = context.get_admin_context()
# If we have just added this host to a cluster we have to include all
# our resources in that cluster.
if added_to_cluster:
self._include_resources_in_cluster(ctxt)
LOG.info("Starting volume driver %(driver_name)s (%(version)s)",
{'driver_name': self.driver.__class__.__name__,
'version': self.driver.get_version()})
try:
self.driver.do_setup(ctxt)
self.driver.check_for_setup_error()
except Exception:
LOG.exception("Failed to initialize driver.",
resource={'type': 'driver',
'id': self.__class__.__name__})
# we don't want to continue since we failed
# to initialize the driver correctly.
return
# Initialize backend capabilities list
self.driver.init_capabilities()
# Zero stats
self.stats['pools'] = {}
self.stats.update({'allocated_capacity_gb': 0})
# Batch retrieval volumes and snapshots
num_vols, num_snaps, max_objs_num, req_range = None, None, None, [0]
req_limit = CONF.init_host_max_objects_retrieval
use_batch_objects_retrieval = req_limit > 0
if use_batch_objects_retrieval:
# Get total number of volumes
num_vols, __, __ = self._get_my_volumes_summary(ctxt)
# Get total number of snapshots
num_snaps, __ = self._get_my_snapshots_summary(ctxt)
# Calculate highest number of the objects (volumes or snapshots)
max_objs_num = max(num_vols, num_snaps)
# Make batch request loop counter
req_range = range(0, max_objs_num, req_limit)
volumes_to_migrate = volume_migration.VolumeMigrationList()
for req_offset in req_range:
# Retrieve 'req_limit' number of objects starting from
# 'req_offset' position
volumes, snapshots = None, None
if use_batch_objects_retrieval:
if req_offset < num_vols:
volumes = self._get_my_volumes(ctxt,
limit=req_limit,
offset=req_offset)
else:
volumes = objects.VolumeList()
if req_offset < num_snaps:
snapshots = self._get_my_snapshots(ctxt,
limit=req_limit,
offset=req_offset)
else:
snapshots = objects.SnapshotList()
# or retrieve all volumes and snapshots per single request
else:
volumes = self._get_my_volumes(ctxt)
snapshots = self._get_my_snapshots(ctxt)
self._sync_provider_info(ctxt, volumes, snapshots)
# FIXME volume count for exporting is wrong
try:
for volume in volumes:
# available volume should also be counted into allocated
if volume['status'] in ['in-use', 'available']:
# calculate allocated capacity for driver
self._count_allocated_capacity(ctxt, volume)
try:
if volume['status'] in ['in-use']:
self.driver.ensure_export(ctxt, volume)
except Exception:
LOG.exception("Failed to re-export volume, "
"setting to ERROR.",
resource=volume)
volume.conditional_update({'status': 'error'},
{'status': 'in-use'})
# All other cleanups are processed by parent class -
# CleanableManager
except Exception:
LOG.exception("Error during re-export on driver init.",
resource=volume)
return
if len(volumes):
volumes_to_migrate.append(volumes, ctxt)
del volumes
del snapshots
self.driver.set_throttle()
# at this point the driver is considered initialized.
# NOTE(jdg): Careful though because that doesn't mean
# that an entry exists in the service table
self.driver.set_initialized()
# Keep the image tmp file clean when init host.
backend_name = volume_utils.extract_host(self.service_topic_queue)
image_utils.cleanup_temporary_file(backend_name)
# Migrate any ConfKeyManager keys based on fixed_key to the currently
# configured key manager.
self._add_to_threadpool(key_migration.migrate_fixed_key,
volumes=volumes_to_migrate)
# collect and publish service capabilities
self.publish_service_capabilities(ctxt)
LOG.info("Driver initialization completed successfully.",
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
# Make sure to call CleanableManager to do the cleanup
super(VolumeManager, self).init_host(added_to_cluster=added_to_cluster,
**kwargs)
def init_host_with_rpc(self):
LOG.info("Initializing RPC dependent components of volume "
"driver %(driver_name)s (%(version)s)",
{'driver_name': self.driver.__class__.__name__,
'version': self.driver.get_version()})
try:
# Make sure the driver is initialized first
utils.log_unsupported_driver_warning(self.driver)
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
LOG.error("Cannot complete RPC initialization because "
"driver isn't initialized properly.",
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
return
stats = self.driver.get_volume_stats(refresh=True)
try:
service = self._get_service()
except exception.ServiceNotFound:
with excutils.save_and_reraise_exception():
LOG.error("Service not found for updating replication_status.")
if service.replication_status != fields.ReplicationStatus.FAILED_OVER:
if stats and stats.get('replication_enabled', False):
replication_status = fields.ReplicationStatus.ENABLED
else:
replication_status = fields.ReplicationStatus.DISABLED
if replication_status != service.replication_status:
service.replication_status = replication_status
service.save()
# Update the cluster replication status if necessary
cluster = service.cluster
if (cluster and
cluster.replication_status != service.replication_status):
cluster.replication_status = service.replication_status
cluster.save()
LOG.info("Driver post RPC initialization completed successfully.",
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
def _do_cleanup(self, ctxt, vo_resource):
if isinstance(vo_resource, objects.Volume):
if vo_resource.status == 'downloading':
self.driver.clear_download(ctxt, vo_resource)
elif vo_resource.status == 'uploading':
# Set volume status to available or in-use.
self.db.volume_update_status_based_on_attachment(
ctxt, vo_resource.id)
elif vo_resource.status == 'deleting':
if CONF.volume_service_inithost_offload:
# Offload all the pending volume delete operations to the
# threadpool to prevent the main volume service thread
# from being blocked.
self._add_to_threadpool(self.delete_volume, ctxt,
vo_resource, cascade=True)
else:
# By default, delete volumes sequentially
self.delete_volume(ctxt, vo_resource, cascade=True)
# We signal that we take care of cleaning the worker ourselves
# (with set_workers decorator in delete_volume method) so
# do_cleanup method doesn't need to remove it.
return True
# For Volume creating and downloading and for Snapshot downloading
# statuses we have to set status to error
if vo_resource.status in ('creating', 'downloading'):
vo_resource.status = 'error'
vo_resource.save()
def is_working(self):
"""Return if Manager is ready to accept requests.
This is to inform Service class that in case of volume driver
initialization failure the manager is actually down and not ready to
accept any requests.
"""
return self.driver.initialized
def _set_resource_host(self, resource):
"""Set the host field on the DB to our own when we are clustered."""
if (resource.is_clustered and
not volume_utils.hosts_are_equivalent(resource.host,
self.host)):
pool = volume_utils.extract_host(resource.host, 'pool')
resource.host = volume_utils.append_host(self.host, pool)
resource.save()
@objects.Volume.set_workers
def create_volume(self, context, volume, request_spec=None,
filter_properties=None, allow_reschedule=True):
"""Creates the volume."""
# Log about unsupported drivers
utils.log_unsupported_driver_warning(self.driver)
# Make sure the host in the DB matches our own when clustered
self._set_resource_host(volume)
# Update our allocated capacity counter early to minimize race
# conditions with the scheduler.
self._update_allocated_capacity(volume)
# We lose the host value if we reschedule, so keep it here
original_host = volume.host
context_elevated = context.elevated()
if filter_properties is None:
filter_properties = {}
if request_spec is None:
request_spec = objects.RequestSpec()
try:
# NOTE(flaper87): Driver initialization is
# verified by the task itself.
flow_engine = create_volume.get_flow(
context_elevated,
self,
self.db,
self.driver,
self.scheduler_rpcapi,
self.host,
volume,
allow_reschedule,
context,
request_spec,
filter_properties,
image_volume_cache=self.image_volume_cache,
)
except Exception:
msg = _("Create manager volume flow failed.")
LOG.exception(msg, resource={'type': 'volume', 'id': volume.id})
raise exception.CinderException(msg)
snapshot_id = request_spec.get('snapshot_id')
source_volid = request_spec.get('source_volid')
if snapshot_id is not None:
# Make sure the snapshot is not deleted until we are done with it.
locked_action = "%s-%s" % (snapshot_id, 'delete_snapshot')
elif source_volid is not None:
# Make sure the volume is not deleted until we are done with it.
locked_action = "%s-%s" % (source_volid, 'delete_volume')
else:
locked_action = None
def _run_flow():
# This code executes create volume flow. If something goes wrong,
# flow reverts all job that was done and reraises an exception.
# Otherwise, all data that was generated by flow becomes available
# in flow engine's storage.
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
# NOTE(dulek): Flag to indicate if volume was rescheduled. Used to
# decide if allocated_capacity should be incremented.
rescheduled = False
try:
if locked_action is None:
_run_flow()
else:
with coordination.COORDINATOR.get_lock(locked_action):
_run_flow()
finally:
try:
flow_engine.storage.fetch('refreshed')
except tfe.NotFound:
# If there's no vol_ref, then flow is reverted. Lets check out
# if rescheduling occurred.
try:
rescheduled = flow_engine.storage.get_revert_result(
create_volume.OnFailureRescheduleTask.make_name(
[create_volume.ACTION]))
except tfe.NotFound:
pass
if rescheduled:
# NOTE(geguileo): Volume was rescheduled so we need to update
# volume stats because the volume wasn't created here.
# Volume.host is None now, so we pass the original host value.
self._update_allocated_capacity(volume, decrement=True,
host=original_host)
# Shared targets is only relevant for iSCSI connections.
# We default to True to be on the safe side.
volume.shared_targets = (
self.driver.capabilities.get('storage_protocol') == 'iSCSI' and
self.driver.capabilities.get('shared_targets', True))
# TODO(geguileo): service_uuid won't be enough on Active/Active
# deployments. There can be 2 services handling volumes from the same
# backend.
volume.service_uuid = self.service_uuid
volume.save()
LOG.info("Created volume successfully.", resource=volume)
return volume.id
def _check_is_our_resource(self, resource):
if resource.host:
res_backend = volume_utils.extract_host(
resource.service_topic_queue)
backend = volume_utils.extract_host(self.service_topic_queue)
if res_backend != backend:
msg = (_('Invalid %(resource)s: %(resource)s %(id)s is not '
'local to %(backend)s.') %
{'resource': resource.obj_name, 'id': resource.id,
'backend': backend})
raise exception.Invalid(msg)
@coordination.synchronized('{volume.id}-{f_name}')
@objects.Volume.set_workers
def delete_volume(self, context, volume, unmanage_only=False,
cascade=False):
"""Deletes and unexports volume.
1. Delete a volume(normal case)
Delete a volume and update quotas.
2. Delete a migration volume
If deleting the volume in a migration, we want to skip
quotas but we need database updates for the volume.
3. Delete a temp volume for backup
If deleting the temp volume for backup, we want to skip
quotas but we need database updates for the volume.
"""
context = context.elevated()
try:
volume.refresh()
except exception.VolumeNotFound:
# NOTE(thingee): It could be possible for a volume to
# be deleted when resuming deletes from init_host().
LOG.debug("Attempted delete of non-existent volume: %s", volume.id)
return
if context.project_id != volume.project_id:
project_id = volume.project_id
else:
project_id = context.project_id
if volume['attach_status'] == fields.VolumeAttachStatus.ATTACHED:
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume.id)
self._check_is_our_resource(volume)
if unmanage_only and volume.encryption_key_id is not None:
raise exception.Invalid(
reason=_("Unmanaging encrypted volumes is not "
"supported."))
if unmanage_only and cascade:
# This could be done, but is ruled out for now just
# for simplicity.
raise exception.Invalid(
reason=_("Unmanage and cascade delete options "
"are mutually exclusive."))
# To backup a snapshot or a 'in-use' volume, create a temp volume
# from the snapshot or in-use volume, and back it up.
# Get admin_metadata (needs admin context) to detect temporary volume.
is_temp_vol = False
with volume.obj_as_admin():
if volume.admin_metadata.get('temporary', 'False') == 'True':
is_temp_vol = True
LOG.info("Trying to delete temp volume: %s", volume.id)
# The status 'deleting' is not included, because it only applies to
# the source volume to be deleted after a migration. No quota
# needs to be handled for it.
is_migrating = volume.migration_status not in (None, 'error',
'success')
is_migrating_dest = (is_migrating and
volume.migration_status.startswith(
'target:'))
notification = "delete.start"
if unmanage_only:
notification = "unmanage.start"
if not is_temp_vol:
self._notify_about_volume_usage(context, volume, notification)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
self.driver.remove_export(context, volume)
if unmanage_only:
self.driver.unmanage(volume)
elif cascade:
LOG.debug('Performing cascade delete.')
snapshots = objects.SnapshotList.get_all_for_volume(context,
volume.id)
for s in snapshots:
if s.status != fields.SnapshotStatus.DELETING:
self._clear_db(context, is_migrating_dest, volume,
'error_deleting')
msg = (_("Snapshot %(id)s was found in state "
"%(state)s rather than 'deleting' during "
"cascade delete.") % {'id': s.id,
'state': s.status})
raise exception.InvalidSnapshot(reason=msg)
self.delete_snapshot(context, s)
LOG.debug('Snapshots deleted, issuing volume delete')
self.driver.delete_volume(volume)
else:
self.driver.delete_volume(volume)
except exception.VolumeIsBusy:
LOG.error("Unable to delete busy volume.",
resource=volume)
# If this is a destination volume, we have to clear the database
# record to avoid user confusion.
self._clear_db(context, is_migrating_dest, volume,
'available')
return
except Exception:
with excutils.save_and_reraise_exception():
# If this is a destination volume, we have to clear the
# database record to avoid user confusion.
new_status = 'error_deleting'
if unmanage_only is True:
new_status = 'error_unmanaging'
self._clear_db(context, is_migrating_dest, volume,
new_status)
# If deleting source/destination volume in a migration or a temp
# volume for backup, we should skip quotas.
skip_quota = is_migrating or is_temp_vol
if not skip_quota:
# Get reservations
try:
reservations = None
if volume.status != 'error_managing_deleting':
reserve_opts = {'volumes': -1,
'gigabytes': -volume.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
LOG.exception("Failed to update usages deleting volume.",
resource=volume)
volume.destroy()
# If deleting source/destination volume in a migration or a temp
# volume for backup, we should skip quotas.
if not skip_quota:
notification = "delete.end"
if unmanage_only:
notification = "unmanage.end"
self._notify_about_volume_usage(context, volume, notification)
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
self._update_allocated_capacity(volume, decrement=True)
self.publish_service_capabilities(context)
msg = "Deleted volume successfully."
if unmanage_only:
msg = "Unmanaged volume successfully."
LOG.info(msg, resource=volume)
def _clear_db(self, context, is_migrating_dest, volume_ref, status):
# This method is called when driver.unmanage() or
# driver.delete_volume() fails in delete_volume(), so it is already
# in the exception handling part.
if is_migrating_dest:
volume_ref.destroy()
LOG.error("Unable to delete the destination volume "
"during volume migration, (NOTE: database "
"record needs to be deleted).", resource=volume_ref)
else:
volume_ref.status = status
volume_ref.save()
def _revert_to_snapshot_generic(self, ctxt, volume, snapshot):
"""Generic way to revert volume to a snapshot.
the framework will use the generic way to implement the revert
to snapshot feature:
1. create a temporary volume from snapshot
2. mount two volumes to host
3. copy data from temporary volume to original volume
4. detach and destroy temporary volume
"""
temp_vol = None
try:
v_options = {'display_name': '[revert] temporary volume created '
'from snapshot %s' % snapshot.id}
ctxt = context.get_internal_tenant_context() or ctxt
temp_vol = self.driver._create_temp_volume_from_snapshot(
ctxt, volume, snapshot, volume_options=v_options)
self._copy_volume_data(ctxt, temp_vol, volume)
self.driver.delete_volume(temp_vol)
temp_vol.destroy()
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(
"Failed to use snapshot %(snapshot)s to create "
"a temporary volume and copy data to volume "
" %(volume)s.",
{'snapshot': snapshot.id,
'volume': volume.id})
if temp_vol and temp_vol.status == 'available':
self.driver.delete_volume(temp_vol)
temp_vol.destroy()
def _revert_to_snapshot(self, context, volume, snapshot):
"""Use driver or generic method to rollback volume."""
try:
self.driver.revert_to_snapshot(context, volume, snapshot)
except (NotImplementedError, AttributeError):
LOG.info("Driver's 'revert_to_snapshot' is not found. "
"Try to use copy-snapshot-to-volume method.")
self._revert_to_snapshot_generic(context, volume, snapshot)
def _create_backup_snapshot(self, context, volume):
kwargs = {
'volume_id': volume.id,
'user_id': context.user_id,
'project_id': context.project_id,
'status': fields.SnapshotStatus.CREATING,
'progress': '0%',
'volume_size': volume.size,
'display_name': '[revert] volume %s backup snapshot' % volume.id,
'display_description': 'This is only used for backup when '
'reverting. If the reverting process '
'failed, you can restore you data by '
'creating new volume with this snapshot.',
'volume_type_id': volume.volume_type_id,
'encryption_key_id': volume.encryption_key_id,
'metadata': {}
}
snapshot = objects.Snapshot(context=context, **kwargs)
snapshot.create()
self.create_snapshot(context, snapshot)
return snapshot
def revert_to_snapshot(self, context, volume, snapshot):
"""Revert a volume to a snapshot.
The process of reverting to snapshot consists of several steps:
1. create a snapshot for backup (in case of data loss)
2.1. use driver's specific logic to revert volume
2.2. try the generic way to revert volume if driver's method is missing
3. delete the backup snapshot
"""
backup_snapshot = None
try:
LOG.info("Start to perform revert to snapshot process.")
self._notify_about_volume_usage(context, volume,
"revert.start")
self._notify_about_snapshot_usage(context, snapshot,
"revert.start")
# Create a snapshot which can be used to restore the volume
# data by hand if revert process failed.
if self.driver.snapshot_revert_use_temp_snapshot():
backup_snapshot = self._create_backup_snapshot(context,
volume)
self._revert_to_snapshot(context, volume, snapshot)
except Exception as error:
with excutils.save_and_reraise_exception():
self._notify_about_volume_usage(context, volume,
"revert.end")
self._notify_about_snapshot_usage(context, snapshot,
"revert.end")
msg = ('Volume %(v_id)s revert to '
'snapshot %(s_id)s failed with %(error)s.')
msg_args = {'v_id': volume.id,
's_id': snapshot.id,
'error': six.text_type(error)}
v_res = volume.update_single_status_where(
'error',
'reverting')
if not v_res:
msg_args = {"id": volume.id,
"status": 'error'}
msg += ("Failed to reset volume %(id)s "
"status to %(status)s.") % msg_args
s_res = snapshot.update_single_status_where(
fields.SnapshotStatus.AVAILABLE,
fields.SnapshotStatus.RESTORING)
if not s_res:
msg_args = {"id": snapshot.id,
"status":
fields.SnapshotStatus.AVAILABLE}
msg += ("Failed to reset snapshot %(id)s "
"status to %(status)s." % msg_args)
LOG.exception(msg, msg_args)
v_res = volume.update_single_status_where(
'available', 'reverting')
if not v_res:
msg_args = {"id": volume.id,
"status": 'available'}
msg = _("Revert finished, but failed to reset "
"volume %(id)s status to %(status)s, "
"please manually reset it.") % msg_args
raise exception.BadResetResourceStatus(reason=msg)
s_res = snapshot.update_single_status_where(
fields.SnapshotStatus.AVAILABLE,
fields.SnapshotStatus.RESTORING)
if not s_res:
msg_args = {"id": snapshot.id,
"status":
fields.SnapshotStatus.AVAILABLE}
msg = _("Revert finished, but failed to reset "
"snapshot %(id)s status to %(status)s, "
"please manually reset it.") % msg_args
raise exception.BadResetResourceStatus(reason=msg)
if backup_snapshot:
self.delete_snapshot(context,
backup_snapshot, handle_quota=False)
msg = ('Volume %(v_id)s reverted to snapshot %(snap_id)s '
'successfully.')
msg_args = {'v_id': volume.id, 'snap_id': snapshot.id}
LOG.info(msg, msg_args)
self._notify_about_volume_usage(context, volume, "revert.end")
self._notify_about_snapshot_usage(context, snapshot, "revert.end")
@objects.Snapshot.set_workers
def create_snapshot(self, context, snapshot):
"""Creates and exports the snapshot."""
context = context.elevated()
self._notify_about_snapshot_usage(
context, snapshot, "create.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot.context = context
model_update = self.driver.create_snapshot(snapshot)
if model_update:
snapshot.update(model_update)
snapshot.save()
except Exception as create_error:
with excutils.save_and_reraise_exception():
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
self.message_api.create(
context,
action=message_field.Action.SNAPSHOT_CREATE,
resource_type=message_field.Resource.VOLUME_SNAPSHOT,
resource_uuid=snapshot['id'],
exception=create_error,
detail=message_field.Detail.SNAPSHOT_CREATE_ERROR)
vol_ref = self.db.volume_get(context, snapshot.volume_id)
if vol_ref.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot.id, snapshot.volume_id)
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
except exception.CinderException as ex:
LOG.exception("Failed updating snapshot"
" metadata using the provided volumes"
" %(volume_id)s metadata",
{'volume_id': snapshot.volume_id},
resource=snapshot)
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
self.message_api.create(
context,
action=message_field.Action.SNAPSHOT_CREATE,
resource_type=message_field.Resource.VOLUME_SNAPSHOT,
resource_uuid=snapshot['id'],
exception=ex,
detail=message_field.Detail.SNAPSHOT_UPDATE_METADATA_FAILED
)
raise exception.MetadataCopyFailure(reason=six.text_type(ex))
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.progress = '100%'
# Resync with the volume's DB value. This addresses the case where
# the snapshot creation was in flight just prior to when the volume's
# fixed_key encryption key ID was migrated to Barbican.
snapshot.encryption_key_id = vol_ref.encryption_key_id
snapshot.save()
self._notify_about_snapshot_usage(context, snapshot, "create.end")
LOG.info("Create snapshot completed successfully",
resource=snapshot)
return snapshot.id
@coordination.synchronized('{snapshot.id}-{f_name}')
def delete_snapshot(self, context, snapshot,
unmanage_only=False, handle_quota=True):
"""Deletes and unexports snapshot."""
context = context.elevated()
snapshot._context = context
project_id = snapshot.project_id
self._notify_about_snapshot_usage(
context, snapshot, "delete.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot.context = context
snapshot.save()
if unmanage_only:
self.driver.unmanage_snapshot(snapshot)
else:
self.driver.delete_snapshot(snapshot)
except exception.SnapshotIsBusy as busy_error:
LOG.error("Delete snapshot failed, due to snapshot busy.",
resource=snapshot)
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.save()
self.message_api.create(
context,
action=message_field.Action.SNAPSHOT_DELETE,
resource_type=message_field.Resource.VOLUME_SNAPSHOT,
resource_uuid=snapshot['id'],
exception=busy_error)
return
except Exception as delete_error:
with excutils.save_and_reraise_exception():
snapshot.status = fields.SnapshotStatus.ERROR_DELETING
snapshot.save()
self.message_api.create(
context,
action=message_field.Action.SNAPSHOT_DELETE,
resource_type=message_field.Resource.VOLUME_SNAPSHOT,
resource_uuid=snapshot['id'],
exception=delete_error,
detail=message_field.Detail.SNAPSHOT_DELETE_ERROR)
# Get reservations
reservations = None
try:
if handle_quota:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': -1}
else:
reserve_opts = {
'snapshots': -1,
'gigabytes': -snapshot.volume_size,
}
volume_ref = self.db.volume_get(context, snapshot.volume_id)
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception("Update snapshot usages failed.",
resource=snapshot)
self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot.id)
snapshot.destroy()
self._notify_about_snapshot_usage(context, snapshot, "delete.end")
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
msg = "Delete snapshot completed successfully."
if unmanage_only:
msg = "Unmanage snapshot completed successfully."
LOG.info(msg, resource=snapshot)
@coordination.synchronized('{volume_id}')
def attach_volume(self, context, volume_id, instance_uuid, host_name,
mountpoint, mode, volume=None):
"""Updates db to show volume is attached."""
# FIXME(lixiaoy1): Remove this in v4.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look
# up the volume by its volume_id.
volume = objects.Volume.get_by_id(context, volume_id)
# Get admin_metadata. This needs admin context.
with volume.obj_as_admin():
volume_metadata = volume.admin_metadata
# check the volume status before attaching
if volume.status == 'attaching':
if (volume_metadata.get('attached_mode') and
volume_metadata.get('attached_mode') != mode):
raise exception.InvalidVolume(
reason=_("being attached by different mode"))
host_name_sanitized = volume_utils.sanitize_hostname(
host_name) if host_name else None
if instance_uuid:
attachments = (
VA_LIST.get_all_by_instance_uuid(
context, instance_uuid))
else:
attachments = (
VA_LIST.get_all_by_host(
context, host_name_sanitized))
if attachments:
# check if volume<->instance mapping is already tracked in DB
for attachment in attachments:
if attachment['volume_id'] == volume_id:
volume.status = 'in-use'
volume.save()
return attachment
if (volume.status == 'in-use' and not volume.multiattach
and not volume.migration_status):
raise exception.InvalidVolume(
reason=_("volume is already attached and multiple attachments "
"are not enabled"))
self._notify_about_volume_usage(context, volume,
"attach.start")
attachment = volume.begin_attach(mode)
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
attachment.attach_status = (
fields.VolumeAttachStatus.ERROR_ATTACHING)
attachment.save()
raise exception.InvalidUUID(uuid=instance_uuid)
try:
if volume_metadata.get('readonly') == 'True' and mode != 'ro':
raise exception.InvalidVolumeAttachMode(mode=mode,
volume_id=volume.id)
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
LOG.info('Attaching volume %(volume_id)s to instance '
'%(instance)s at mountpoint %(mount)s on host '
'%(host)s.',
{'volume_id': volume_id, 'instance': instance_uuid,
'mount': mountpoint, 'host': host_name_sanitized},
resource=volume)
self.driver.attach_volume(context,
volume,
instance_uuid,
host_name_sanitized,
mountpoint)
except Exception as excep:
with excutils.save_and_reraise_exception():
self.message_api.create(
context,
message_field.Action.ATTACH_VOLUME,
resource_uuid=volume_id,
exception=excep)
attachment.attach_status = (
fields.VolumeAttachStatus.ERROR_ATTACHING)
attachment.save()
volume = attachment.finish_attach(
instance_uuid,
host_name_sanitized,
mountpoint,
mode)
self._notify_about_volume_usage(context, volume, "attach.end")
LOG.info("Attach volume completed successfully.",
resource=volume)
return attachment
@coordination.synchronized('{volume_id}-{f_name}')
def detach_volume(self, context, volume_id, attachment_id=None,
volume=None):
"""Updates db to show volume is detached."""
# TODO(vish): refactor this into a more general "unreserve"
# FIXME(lixiaoy1): Remove this in v4.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look up the volume
# by its volume_id.
volume = objects.Volume.get_by_id(context, volume_id)
if attachment_id:
try:
attachment = objects.VolumeAttachment.get_by_id(context,
attachment_id)
except exception.VolumeAttachmentNotFound:
LOG.info("Volume detach called, but volume not attached.",
resource=volume)
# We need to make sure the volume status is set to the correct
# status. It could be in detaching status now, and we don't
# want to leave it there.
volume.finish_detach(attachment_id)
return
else:
# We can try and degrade gracefully here by trying to detach
# a volume without the attachment_id here if the volume only has
# one attachment. This is for backwards compatibility.
attachments = volume.volume_attachment
if len(attachments) > 1:
# There are more than 1 attachments for this volume
# we have to have an attachment id.
msg = _("Detach volume failed: More than one attachment, "
"but no attachment_id provided.")
LOG.error(msg, resource=volume)
raise exception.InvalidVolume(reason=msg)
elif len(attachments) == 1:
attachment = attachments[0]
else:
# there aren't any attachments for this volume.
# so set the status to available and move on.
LOG.info("Volume detach called, but volume not attached.",
resource=volume)
volume.status = 'available'
volume.attach_status = fields.VolumeAttachStatus.DETACHED
volume.save()
return
self._notify_about_volume_usage(context, volume, "detach.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
LOG.info('Detaching volume %(volume_id)s from instance '
'%(instance)s.',
{'volume_id': volume_id,
'instance': attachment.get('instance_uuid')},
resource=volume)
self.driver.detach_volume(context, volume, attachment)
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_attachment_update(
context, attachment.get('id'), {
'attach_status':
fields.VolumeAttachStatus.ERROR_DETACHING})
# NOTE(jdg): We used to do an ensure export here to
# catch upgrades while volumes were attached (E->F)
# this was necessary to convert in-use volumes from
# int ID's to UUID's. Don't need this any longer
# We're going to remove the export here
# (delete the iscsi target)
try:
utils.require_driver_initialized(self.driver)
self.driver.remove_export(context.elevated(), volume)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception("Detach volume failed, due to "
"uninitialized driver.",
resource=volume)
except Exception as ex:
LOG.exception("Detach volume failed, due to "
"remove-export failure.",
resource=volume)
raise exception.RemoveExportException(volume=volume_id,
reason=six.text_type(ex))
volume.finish_detach(attachment.id)
self._notify_about_volume_usage(context, volume, "detach.end")
LOG.info("Detach volume completed successfully.", resource=volume)
def _create_image_cache_volume_entry(self, ctx, volume_ref,
image_id, image_meta):
"""Create a new image-volume and cache entry for it.
This assumes that the image has already been downloaded and stored
in the volume described by the volume_ref.
"""
cache_entry = self.image_volume_cache.get_entry(ctx,
volume_ref,
image_id,
image_meta)
if cache_entry:
LOG.debug('Cache entry already exists with image ID %'
'(image_id)s',
{'image_id': image_id})
return
image_volume = None
try:
if not self.image_volume_cache.ensure_space(ctx, volume_ref):
LOG.warning('Unable to ensure space for image-volume in'
' cache. Will skip creating entry for image'
' %(image)s on %(service)s.',
{'image': image_id,
'service': volume_ref.service_topic_queue})
return
image_volume = self._clone_image_volume(ctx,
volume_ref,
image_meta)
if not image_volume:
LOG.warning('Unable to clone image_volume for image '
'%(image_id)s will not create cache entry.',
{'image_id': image_id})
return
self.image_volume_cache.create_cache_entry(
ctx,
image_volume,
image_id,
image_meta
)
except exception.CinderException as e:
LOG.warning('Failed to create new image-volume cache entry.'
' Error: %(exception)s', {'exception': e})
if image_volume:
self.delete_volume(ctx, image_volume)
def _clone_image_volume(self, ctx, volume, image_meta):
volume_type_id = volume.get('volume_type_id')
reserve_opts = {'volumes': 1, 'gigabytes': volume.size}
QUOTAS.add_volume_type_opts(ctx, reserve_opts, volume_type_id)
reservations = QUOTAS.reserve(ctx, **reserve_opts)
# NOTE(yikun): Skip 'snapshot_id', 'source_volid' keys to avoid
# creating tmp img vol from wrong snapshot or wrong source vol.
skip = {'snapshot_id', 'source_volid'}
skip.update(self._VOLUME_CLONE_SKIP_PROPERTIES)
try:
new_vol_values = {k: volume[k] for k in set(volume.keys()) - skip}
new_vol_values['volume_type_id'] = volume_type_id
new_vol_values['attach_status'] = (
fields.VolumeAttachStatus.DETACHED)
new_vol_values['status'] = 'creating'
new_vol_values['project_id'] = ctx.project_id
new_vol_values['display_name'] = 'image-%s' % image_meta['id']
new_vol_values['source_volid'] = volume.id
LOG.debug('Creating image volume entry: %s.', new_vol_values)
image_volume = objects.Volume(context=ctx, **new_vol_values)
image_volume.create()
except Exception as ex:
LOG.exception('Create clone_image_volume: %(volume_id)s '
'for image %(image_id)s, '
'failed (Exception: %(except)s)',
{'volume_id': volume.id,
'image_id': image_meta['id'],
'except': ex})
QUOTAS.rollback(ctx, reservations)
return
QUOTAS.commit(ctx, reservations,
project_id=new_vol_values['project_id'])
try:
self.create_volume(ctx, image_volume, allow_reschedule=False)
image_volume.refresh()
if image_volume.status != 'available':
raise exception.InvalidVolume(_('Volume is not available.'))
self.db.volume_admin_metadata_update(ctx.elevated(),
image_volume.id,
{'readonly': 'True'},
False)
return image_volume
except exception.CinderException:
LOG.exception('Failed to clone volume %(volume_id)s for '
'image %(image_id)s.',
{'volume_id': volume.id,
'image_id': image_meta['id']})
try:
self.delete_volume(ctx, image_volume)
except exception.CinderException:
LOG.exception('Could not delete the image volume %(id)s.',
{'id': volume.id})
return
def _clone_image_volume_and_add_location(self, ctx, volume, image_service,
image_meta):
"""Create a cloned volume and register its location to the image."""
if (image_meta['disk_format'] != 'raw' or
image_meta['container_format'] != 'bare'):
return False
image_volume_context = ctx
if self.driver.configuration.image_upload_use_internal_tenant:
internal_ctx = context.get_internal_tenant_context()
if internal_ctx:
image_volume_context = internal_ctx
image_volume = self._clone_image_volume(image_volume_context,
volume,
image_meta)
if not image_volume:
return False
# The image_owner metadata should be set before uri is added to
# the image so glance cinder store can check its owner.
image_volume_meta = {'image_owner': ctx.project_id}
self.db.volume_metadata_update(image_volume_context,
image_volume.id,
image_volume_meta,
False)
uri = 'cinder://%s' % image_volume.id
image_registered = None
try:
image_registered = image_service.add_location(
ctx, image_meta['id'], uri, {})
except (exception.NotAuthorized, exception.Invalid,
exception.NotFound):
LOG.exception('Failed to register image volume location '
'%(uri)s.', {'uri': uri})
if not image_registered:
LOG.warning('Registration of image volume URI %(uri)s '
'to image %(image_id)s failed.',
{'uri': uri, 'image_id': image_meta['id']})
try:
self.delete_volume(image_volume_context, image_volume)
except exception.CinderException:
LOG.exception('Could not delete failed image volume '
'%(id)s.', {'id': image_volume.id})
return False
image_volume_meta['glance_image_id'] = image_meta['id']
self.db.volume_metadata_update(image_volume_context,
image_volume.id,
image_volume_meta,
False)
return True
def copy_volume_to_image(self, context, volume_id, image_meta):
"""Uploads the specified volume to Glance.
image_meta is a dictionary containing the following keys:
'id', 'container_format', 'disk_format'
"""
payload = {'volume_id': volume_id, 'image_id': image_meta['id']}
image_service = None
try:
volume = objects.Volume.get_by_id(context, volume_id)
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
image_service, image_id = \
glance.get_remote_image_service(context, image_meta['id'])
if (self.driver.configuration.image_upload_use_cinder_backend
and self._clone_image_volume_and_add_location(
context, volume, image_service, image_meta)):
LOG.debug("Registered image volume location to glance "
"image-id: %(image_id)s.",
{'image_id': image_meta['id']},
resource=volume)
else:
self.driver.copy_volume_to_image(context, volume,
image_service, image_meta)
LOG.debug("Uploaded volume to glance image-id: %(image_id)s.",
{'image_id': image_meta['id']},
resource=volume)
except Exception as error:
LOG.error("Upload volume to image encountered an error "
"(image-id: %(image_id)s).",
{'image_id': image_meta['id']},
resource=volume)
self.message_api.create(
context,
message_field.Action.COPY_VOLUME_TO_IMAGE,
resource_uuid=volume_id,
exception=error,
detail=message_field.Detail.FAILED_TO_UPLOAD_VOLUME)
if image_service is not None:
# Deletes the image if it is in queued or saving state
self._delete_image(context, image_meta['id'], image_service)
with excutils.save_and_reraise_exception():
payload['message'] = six.text_type(error)
finally:
self.db.volume_update_status_based_on_attachment(context,
volume_id)
LOG.info("Copy volume to image completed successfully.",
resource=volume)
def _delete_image(self, context, image_id, image_service):
"""Deletes an image stuck in queued or saving state."""
try:
image_meta = image_service.show(context, image_id)
image_status = image_meta.get('status')
if image_status == 'queued' or image_status == 'saving':
LOG.warning("Deleting image in unexpected status: "
"%(image_status)s.",
{'image_status': image_status},
resource={'type': 'image', 'id': image_id})
image_service.delete(context, image_id)
except Exception:
LOG.warning("Image delete encountered an error.",
exc_info=True, resource={'type': 'image',
'id': image_id})
def _parse_connection_options(self, context, volume, conn_info):
# Add qos_specs to connection info
typeid = volume.volume_type_id
specs = None
if typeid:
res = volume_types.get_volume_type_qos_specs(typeid)
qos = res['qos_specs']
# only pass qos_specs that is designated to be consumed by
# front-end, or both front-end and back-end.
if qos and qos.get('consumer') in ['front-end', 'both']:
specs = qos.get('specs')
# NOTE(mnaser): The following configures for per-GB QoS
if specs is not None:
volume_size = int(volume.size)
tune_opts = ('read_iops_sec', 'read_bytes_sec',
'write_iops_sec', 'write_bytes_sec',
'total_iops_sec', 'total_bytes_sec')
for option in tune_opts:
option_per_gb = '%s_per_gb' % option
option_per_gb_min = '%s_per_gb_min' % option
option_max = '%s_max' % option
if option_per_gb in specs:
minimum_value = int(specs.pop(option_per_gb_min, 0))
value = int(specs[option_per_gb]) * volume_size
per_gb_value = max(minimum_value, value)
max_value = int(specs.pop(option_max, per_gb_value))
specs[option] = min(per_gb_value, max_value)
specs.pop(option_per_gb)
qos_spec = dict(qos_specs=specs)
conn_info['data'].update(qos_spec)
# Add access_mode to connection info
volume_metadata = volume.admin_metadata
access_mode = volume_metadata.get('attached_mode')
if access_mode is None:
# NOTE(zhiyan): client didn't call 'os-attach' before
access_mode = ('ro'
if volume_metadata.get('readonly') == 'True'
else 'rw')
conn_info['data']['access_mode'] = access_mode
# Add encrypted flag to connection_info if not set in the driver.
if conn_info['data'].get('encrypted') is None:
encrypted = bool(volume.encryption_key_id)
conn_info['data']['encrypted'] = encrypted
# Add discard flag to connection_info if not set in the driver and
# configured to be reported.
if conn_info['data'].get('discard') is None:
discard_supported = (self.driver.configuration
.safe_get('report_discard_supported'))
if discard_supported:
conn_info['data']['discard'] = True
return conn_info
def initialize_connection(self, context, volume, connector):
"""Prepare volume for connection from host represented by connector.
This method calls the driver initialize_connection and returns
it to the caller. The connector parameter is a dictionary with
information about the host that will connect to the volume in the
following format:
.. code:: json
{
"ip": "<ip>",
"initiator": "<initiator>"
}
ip:
the ip address of the connecting machine
initiator:
the iscsi initiator name of the connecting machine. This can be
None if the connecting machine does not support iscsi connections.
driver is responsible for doing any necessary security setup and
returning a connection_info dictionary in the following format:
.. code:: json
{
"driver_volume_type": "<driver_volume_type>",
"data": "<data>"
}
driver_volume_type:
a string to identify the type of volume. This can be used by the
calling code to determine the strategy for connecting to the
volume. This could be 'iscsi', 'rbd', 'sheepdog', etc.
data:
this is the data that the calling code will use to connect to the
volume. Keep in mind that this will be serialized to json in
various places, so it should not contain any non-json data types.
"""
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
# TODO(jdg): Add deprecation warning
utils.require_driver_initialized(self.driver)
try:
self.driver.validate_connector(connector)
except exception.InvalidConnectorException as err:
raise exception.InvalidInput(reason=six.text_type(err))
except Exception as err:
err_msg = (_("Validate volume connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.exception(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
model_update = self.driver.create_export(context.elevated(),
volume, connector)
except exception.CinderException as ex:
msg = _("Create export of volume failed (%s)") % ex.msg
LOG.exception(msg, resource=volume)
raise exception.VolumeBackendAPIException(data=msg)
try:
if model_update:
volume.update(model_update)
volume.save()
except Exception as ex:
LOG.exception("Model update failed.", resource=volume)
try:
self.driver.remove_export(context.elevated(), volume)
except Exception:
LOG.exception('Could not remove export after DB model failed.')
raise exception.ExportFailure(reason=six.text_type(ex))
try:
conn_info = self.driver.initialize_connection(volume, connector)
except exception.ConnectorRejected:
with excutils.save_and_reraise_exception():
LOG.info("The connector was rejected by the volume driver.")
except Exception as err:
err_msg = (_("Driver initialize connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.exception(err_msg, resource=volume)
self.driver.remove_export(context.elevated(), volume)
raise exception.VolumeBackendAPIException(data=err_msg)
conn_info = self._parse_connection_options(context, volume, conn_info)
LOG.info("Initialize volume connection completed successfully.",
resource=volume)
return conn_info
def initialize_connection_snapshot(self, ctxt, snapshot_id, connector):
utils.require_driver_initialized(self.driver)
snapshot = objects.Snapshot.get_by_id(ctxt, snapshot_id)
try:
self.driver.validate_connector(connector)
except exception.InvalidConnectorException as err:
raise exception.InvalidInput(reason=six.text_type(err))
except Exception as err:
err_msg = (_("Validate snapshot connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.exception(err_msg, resource=snapshot)
raise exception.VolumeBackendAPIException(data=err_msg)
model_update = None
try:
LOG.debug("Snapshot %s: creating export.", snapshot.id)
model_update = self.driver.create_export_snapshot(
ctxt.elevated(), snapshot, connector)
if model_update:
snapshot.provider_location = model_update.get(
'provider_location', None)
snapshot.provider_auth = model_update.get(
'provider_auth', None)
snapshot.save()
except exception.CinderException as ex:
msg = _("Create export of snapshot failed (%s)") % ex.msg
LOG.exception(msg, resource=snapshot)
raise exception.VolumeBackendAPIException(data=msg)
try:
if model_update:
snapshot.update(model_update)
snapshot.save()
except exception.CinderException as ex:
LOG.exception("Model update failed.", resource=snapshot)
raise exception.ExportFailure(reason=six.text_type(ex))
try:
conn = self.driver.initialize_connection_snapshot(snapshot,
connector)
except Exception as err:
try:
err_msg = (_('Unable to fetch connection information from '
'backend: %(err)s') %
{'err': six.text_type(err)})
LOG.error(err_msg)
LOG.debug("Cleaning up failed connect initialization.")
self.driver.remove_export_snapshot(ctxt.elevated(), snapshot)
except Exception as ex:
ex_msg = (_('Error encountered during cleanup '
'of a failed attach: %(ex)s') %
{'ex': six.text_type(ex)})
LOG.error(ex_msg)
raise exception.VolumeBackendAPIException(data=ex_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
LOG.info("Initialize snapshot connection completed successfully.",
resource=snapshot)
return conn
def terminate_connection(self, context, volume_id, connector, force=False):
"""Cleanup connection from host represented by connector.
The format of connector is the same as for initialize_connection.
"""
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
volume_ref = self.db.volume_get(context, volume_id)
try:
self.driver.terminate_connection(volume_ref, connector,
force=force)
except Exception as err:
err_msg = (_('Terminate volume connection failed: %(err)s')
% {'err': six.text_type(err)})
LOG.exception(err_msg, resource=volume_ref)
raise exception.VolumeBackendAPIException(data=err_msg)
LOG.info("Terminate volume connection completed successfully.",
resource=volume_ref)
def terminate_connection_snapshot(self, ctxt, snapshot_id,
connector, force=False):
utils.require_driver_initialized(self.driver)
snapshot = objects.Snapshot.get_by_id(ctxt, snapshot_id)
try:
self.driver.terminate_connection_snapshot(snapshot, connector,
force=force)
except Exception as err:
err_msg = (_('Terminate snapshot connection failed: %(err)s')
% {'err': six.text_type(err)})
LOG.exception(err_msg, resource=snapshot)
raise exception.VolumeBackendAPIException(data=err_msg)
LOG.info("Terminate snapshot connection completed successfully.",
resource=snapshot)
def remove_export(self, context, volume_id):
"""Removes an export for a volume."""
utils.require_driver_initialized(self.driver)
volume_ref = self.db.volume_get(context, volume_id)
try:
self.driver.remove_export(context, volume_ref)
except Exception:
msg = _("Remove volume export failed.")
LOG.exception(msg, resource=volume_ref)
raise exception.VolumeBackendAPIException(data=msg)
LOG.info("Remove volume export completed successfully.",
resource=volume_ref)
def remove_export_snapshot(self, ctxt, snapshot_id):
"""Removes an export for a snapshot."""
utils.require_driver_initialized(self.driver)
snapshot = objects.Snapshot.get_by_id(ctxt, snapshot_id)
try:
self.driver.remove_export_snapshot(ctxt, snapshot)
except Exception:
msg = _("Remove snapshot export failed.")
LOG.exception(msg, resource=snapshot)
raise exception.VolumeBackendAPIException(data=msg)
LOG.info("Remove snapshot export completed successfully.",
resource=snapshot)
def accept_transfer(self, context, volume_id, new_user, new_project,
no_snapshots=False):
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
# NOTE(jdg): need elevated context as we haven't "given" the vol
# yet
volume_ref = self.db.volume_get(context.elevated(), volume_id)
# NOTE(jdg): Some drivers tie provider info (CHAP) to tenant
# for those that do allow them to return updated model info
model_update = self.driver.accept_transfer(context,
volume_ref,
new_user,
new_project)
if model_update:
try:
self.db.volume_update(context.elevated(),
volume_id,
model_update)
except exception.CinderException:
with excutils.save_and_reraise_exception():
LOG.exception("Update volume model for "
"transfer operation failed.",
resource=volume_ref)
self.db.volume_update(context.elevated(),
volume_id,
{'status': 'error'})
LOG.info("Transfer volume completed successfully.",
resource=volume_ref)
return model_update
def _connect_device(self, conn):
use_multipath = self.configuration.use_multipath_for_image_xfer
device_scan_attempts = self.configuration.num_volume_device_scan_tries
protocol = conn['driver_volume_type']
connector = utils.brick_get_connector(
protocol,
use_multipath=use_multipath,
device_scan_attempts=device_scan_attempts,
conn=conn)
vol_handle = connector.connect_volume(conn['data'])
root_access = True
if not connector.check_valid_device(vol_handle['path'], root_access):
if isinstance(vol_handle['path'], six.string_types):
raise exception.DeviceUnavailable(
path=vol_handle['path'],
reason=(_("Unable to access the backend storage via the "
"path %(path)s.") %
{'path': vol_handle['path']}))
else:
raise exception.DeviceUnavailable(
path=None,
reason=(_("Unable to access the backend storage via file "
"handle.")))
return {'conn': conn, 'device': vol_handle, 'connector': connector}
def _attach_volume(self, ctxt, volume, properties, remote=False,
attach_encryptor=False):
status = volume['status']
if remote:
rpcapi = volume_rpcapi.VolumeAPI()
try:
conn = rpcapi.initialize_connection(ctxt, volume, properties)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Failed to attach volume %(vol)s.",
{'vol': volume['id']})
self.db.volume_update(ctxt, volume['id'],
{'status': status})
else:
conn = self.initialize_connection(ctxt, volume, properties)
attach_info = self._connect_device(conn)
try:
if attach_encryptor and (
volume_types.is_encrypted(ctxt,
volume.volume_type_id)):
encryption = self.db.volume_encryption_metadata_get(
ctxt.elevated(), volume.id)
if encryption:
utils.brick_attach_volume_encryptor(ctxt,
attach_info,
encryption)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Failed to attach volume encryptor"
" %(vol)s.", {'vol': volume['id']})
self._detach_volume(ctxt, attach_info, volume, properties,
force=True)
return attach_info
def _detach_volume(self, ctxt, attach_info, volume, properties,
force=False, remote=False,
attach_encryptor=False):
connector = attach_info['connector']
if attach_encryptor and (
volume_types.is_encrypted(ctxt,
volume.volume_type_id)):
encryption = self.db.volume_encryption_metadata_get(
ctxt.elevated(), volume.id)
if encryption:
utils.brick_detach_volume_encryptor(attach_info, encryption)
connector.disconnect_volume(attach_info['conn']['data'],
attach_info['device'], force=force)
if remote:
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.terminate_connection(ctxt, volume, properties, force=force)
rpcapi.remove_export(ctxt, volume)
else:
try:
self.terminate_connection(ctxt, volume['id'], properties,
force=force)
self.remove_export(ctxt, volume['id'])
except Exception as err:
with excutils.save_and_reraise_exception():
LOG.error('Unable to terminate volume connection: '
'%(err)s.', {'err': err})
def _copy_volume_data(self, ctxt, src_vol, dest_vol, remote=None):
"""Copy data from src_vol to dest_vol."""
LOG.debug('_copy_volume_data %(src)s -> %(dest)s.',
{'src': src_vol['name'], 'dest': dest_vol['name']})
attach_encryptor = False
# If the encryption method or key is changed, we have to
# copy data through dm-crypt.
if volume_types.volume_types_encryption_changed(
ctxt,
src_vol.volume_type_id,
dest_vol.volume_type_id):
attach_encryptor = True
use_multipath = self.configuration.use_multipath_for_image_xfer
enforce_multipath = self.configuration.enforce_multipath_for_image_xfer
properties = utils.brick_get_connector_properties(use_multipath,
enforce_multipath)
dest_remote = remote in ['dest', 'both']
dest_attach_info = self._attach_volume(
ctxt, dest_vol, properties,
remote=dest_remote,
attach_encryptor=attach_encryptor)
try:
src_remote = remote in ['src', 'both']
src_attach_info = self._attach_volume(
ctxt, src_vol, properties,
remote=src_remote,
attach_encryptor=attach_encryptor)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Failed to attach source volume for copy.")
self._detach_volume(ctxt, dest_attach_info, dest_vol,
properties, remote=dest_remote,
attach_encryptor=attach_encryptor,
force=True)
# Check the backend capabilities of migration destination host.
rpcapi = volume_rpcapi.VolumeAPI()
capabilities = rpcapi.get_capabilities(ctxt,
dest_vol.service_topic_queue,
False)
sparse_copy_volume = bool(capabilities and
capabilities.get('sparse_copy_volume',
False))
try:
size_in_mb = int(src_vol['size']) * units.Ki # vol size is in GB
volume_utils.copy_volume(src_attach_info['device']['path'],
dest_attach_info['device']['path'],
size_in_mb,
self.configuration.volume_dd_blocksize,
sparse=sparse_copy_volume)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Failed to copy volume %(src)s to %(dest)s.",
{'src': src_vol['id'], 'dest': dest_vol['id']})
finally:
try:
self._detach_volume(ctxt, dest_attach_info, dest_vol,
properties, force=True,
remote=dest_remote,
attach_encryptor=attach_encryptor)
finally:
self._detach_volume(ctxt, src_attach_info, src_vol,
properties, force=True,
remote=src_remote,
attach_encryptor=attach_encryptor)
def _migrate_volume_generic(self, ctxt, volume, backend, new_type_id):
rpcapi = volume_rpcapi.VolumeAPI()
# Create new volume on remote host
tmp_skip = {'snapshot_id', 'source_volid'}
skip = {'host', 'cluster_name', 'availability_zone'}
skip.update(tmp_skip)
skip.update(self._VOLUME_CLONE_SKIP_PROPERTIES)
new_vol_values = {k: volume[k] for k in set(volume.keys()) - skip}
if new_type_id:
new_vol_values['volume_type_id'] = new_type_id
if volume_types.volume_types_encryption_changed(
ctxt, volume.volume_type_id, new_type_id):
encryption_key_id = volume_utils.create_encryption_key(
ctxt, self.key_manager, new_type_id)
new_vol_values['encryption_key_id'] = encryption_key_id
dst_service = self._get_service(backend['host'])
new_volume = objects.Volume(
context=ctxt,
host=backend['host'],
availability_zone=dst_service.availability_zone,
cluster_name=backend.get('cluster_name'),
status='creating',
attach_status=fields.VolumeAttachStatus.DETACHED,
migration_status='target:%s' % volume['id'],
**new_vol_values
)
new_volume.create()
rpcapi.create_volume(ctxt, new_volume, None, None,
allow_reschedule=False)
# Wait for new_volume to become ready
starttime = time.time()
deadline = starttime + CONF.migration_create_volume_timeout_secs
new_volume.refresh()
tries = 0
while new_volume.status != 'available':
tries += 1
now = time.time()
if new_volume.status == 'error':
msg = _("failed to create new_volume on destination")
self._clean_temporary_volume(ctxt, volume,
new_volume,
clean_db_only=True)
raise exception.VolumeMigrationFailed(reason=msg)
elif now > deadline:
msg = _("timeout creating new_volume on destination")
self._clean_temporary_volume(ctxt, volume,
new_volume,
clean_db_only=True)
raise exception.VolumeMigrationFailed(reason=msg)
else:
time.sleep(tries ** 2)
new_volume.refresh()
# Set skipped value to avoid calling
# function except for _create_raw_volume
tmp_skipped_values = {k: volume[k] for k in tmp_skip if volume.get(k)}
if tmp_skipped_values:
new_volume.update(tmp_skipped_values)
new_volume.save()
# Copy the source volume to the destination volume
try:
attachments = volume.volume_attachment
# A volume might have attachments created, but if it is reserved
# it means it's being migrated prior to the attachment completion.
if not attachments or volume.status == 'reserved':
# Pre- and post-copy driver-specific actions
self.driver.before_volume_copy(ctxt, volume, new_volume,
remote='dest')
self._copy_volume_data(ctxt, volume, new_volume, remote='dest')
self.driver.after_volume_copy(ctxt, volume, new_volume,
remote='dest')
# The above call is synchronous so we complete the migration
self.migrate_volume_completion(ctxt, volume, new_volume,
error=False)
else:
nova_api = compute.API()
# This is an async call to Nova, which will call the completion
# when it's done
for attachment in attachments:
instance_uuid = attachment['instance_uuid']
nova_api.update_server_volume(ctxt, instance_uuid,
volume.id,
new_volume.id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(
"Failed to copy volume %(vol1)s to %(vol2)s", {
'vol1': volume.id, 'vol2': new_volume.id})
self._clean_temporary_volume(ctxt, volume,
new_volume)
def _clean_temporary_volume(self, ctxt, volume, new_volume,
clean_db_only=False):
# If we're in the migrating phase, we need to cleanup
# destination volume because source volume is remaining
if volume.migration_status == 'migrating':
try:
if clean_db_only:
# The temporary volume is not created, only DB data
# is created
new_volume.destroy()
else:
# The temporary volume is already created
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.delete_volume(ctxt, new_volume)
except exception.VolumeNotFound:
LOG.info("Couldn't find the temporary volume "
"%(vol)s in the database. There is no need "
"to clean up this volume.",
{'vol': new_volume.id})
else:
# If we're in the completing phase don't delete the
# destination because we may have already deleted the
# source! But the migration_status in database should
# be cleared to handle volume after migration failure
try:
new_volume.migration_status = None
new_volume.save()
except exception.VolumeNotFound:
LOG.info("Couldn't find destination volume "
"%(vol)s in the database. The entry might be "
"successfully deleted during migration "
"completion phase.",
{'vol': new_volume.id})
LOG.warning("Failed to migrate volume. The destination "
"volume %(vol)s is not deleted since the "
"source volume may have been deleted.",
{'vol': new_volume.id})
def migrate_volume_completion(self, ctxt, volume, new_volume, error=False):
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the migration status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.migration_status = 'error'
volume.save()
# NOTE(jdg): Things get a little hairy in here and we do a lot of
# things based on volume previous-status and current-status. At some
# point this should all be reworked but for now we need to maintain
# backward compatibility and NOT change the API so we're going to try
# and make this work best we can
LOG.debug("migrate_volume_completion: completing migration for "
"volume %(vol1)s (temporary volume %(vol2)s",
{'vol1': volume.id, 'vol2': new_volume.id})
rpcapi = volume_rpcapi.VolumeAPI()
orig_volume_status = volume.previous_status
if error:
LOG.info("migrate_volume_completion is cleaning up an error "
"for volume %(vol1)s (temporary volume %(vol2)s",
{'vol1': volume['id'], 'vol2': new_volume.id})
rpcapi.delete_volume(ctxt, new_volume)
updates = {'migration_status': 'error',
'status': orig_volume_status}
volume.update(updates)
volume.save()
return volume.id
volume.migration_status = 'completing'
volume.save()
volume_attachments = []
# NOTE(jdg): With new attach flow, we deleted the attachment, so the
# original volume should now be listed as available, we still need to
# do the magic swappy thing of name.id etc but we're done with the
# original attachment record
# In the "old flow" at this point the orig_volume_status will be in-use
# and the current status will be retyping. This is sort of a
# misleading deal, because Nova has already called terminate
# connection
# New Attach Flow, Nova has gone ahead and deleted the attachemnt, this
# is the source/original volume, we've already migrated the data, we're
# basically done with it at this point. We don't need to issue the
# detach to toggle the status
if orig_volume_status == 'in-use' and volume.status != 'available':
for attachment in volume.volume_attachment:
# Save the attachments the volume currently have
volume_attachments.append(attachment)
try:
self.detach_volume(ctxt, volume.id, attachment.id)
except Exception as ex:
LOG.error("Detach migration source volume "
"%(volume.id)s from attachment "
"%(attachment.id)s failed: %(err)s",
{'err': ex,
'volume.id': volume.id,
'attachment.id': attachment.id},
resource=volume)
# Give driver (new_volume) a chance to update things as needed
# after a successful migration.
# Note this needs to go through rpc to the host of the new volume
# the current host and driver object is for the "existing" volume.
rpcapi.update_migrated_volume(ctxt, volume, new_volume,
orig_volume_status)
volume.refresh()
new_volume.refresh()
# Swap src and dest DB records so we can continue using the src id and
# asynchronously delete the destination id
updated_new = volume.finish_volume_migration(new_volume)
updates = {'status': orig_volume_status,
'previous_status': volume.status,
'migration_status': 'success'}
# NOTE(jdg): With new attachment API's nova will delete the
# attachment for the source volume for us before calling the
# migration-completion, now we just need to do the swapping on the
# volume record, but don't jack with the attachments other than
# updating volume_id
# In the old flow at this point the volumes are in attaching and
# deleting status (dest/new is deleting, but we've done our magic
# swappy thing so it's a bit confusing, but it does unwind properly
# when you step through it)
# In the new flow we simlified this and we don't need it, instead of
# doing a bunch of swapping we just do attachment-create/delete on the
# nova side, and then here we just do the ID swaps that are necessary
# to maintain the old beahvior
# Restore the attachments for old flow use-case
if orig_volume_status == 'in-use' and volume.status in ['available',
'reserved',
'attaching']:
for attachment in volume_attachments:
LOG.debug('Re-attaching: %s', attachment)
# This is just a db state toggle, the volume is actually
# already attach and in-use, new attachment flow won't allow
# this
rpcapi.attach_volume(ctxt, volume,
attachment.instance_uuid,
attachment.attached_host,
attachment.mountpoint,
attachment.attach_mode or 'rw')
# At this point we now have done almost all of our swapping and
# state-changes. The target volume is now marked back to
# "in-use" the destination/worker volume is now in deleting
# state and the next steps will finish the deletion steps
volume.update(updates)
volume.save()
# Asynchronous deletion of the source volume in the back-end (now
# pointed by the target volume id)
try:
rpcapi.delete_volume(ctxt, updated_new)
except Exception as ex:
LOG.error('Failed to request async delete of migration source '
'vol %(vol)s: %(err)s',
{'vol': volume.id, 'err': ex})
# For the new flow this is really the key part. We just use the
# attachments to the worker/destination volumes that we created and
# used for the libvirt migration and we'll just swap their volume_id
# entries to coorespond with the volume.id swap we did
for attachment in VA_LIST.get_all_by_volume_id(ctxt, updated_new.id):
attachment.volume_id = volume.id
attachment.save()
# Phewww.. that was easy! Once we get to a point where the old attach
# flow can go away we really should rewrite all of this.
LOG.info("Complete-Migrate volume completed successfully.",
resource=volume)
return volume.id
def migrate_volume(self, ctxt, volume, host, force_host_copy=False,
new_type_id=None):
"""Migrate the volume to the specified host (called on source host)."""
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the migration status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.migration_status = 'error'
volume.save()
model_update = None
moved = False
status_update = None
if volume.status in ('retyping', 'maintenance'):
status_update = {'status': volume.previous_status}
volume.migration_status = 'migrating'
volume.save()
if not force_host_copy and new_type_id is None:
try:
LOG.debug("Issue driver.migrate_volume.", resource=volume)
moved, model_update = self.driver.migrate_volume(ctxt,
volume,
host)
if moved:
dst_service = self._get_service(host['host'])
updates = {
'host': host['host'],
'cluster_name': host.get('cluster_name'),
'migration_status': 'success',
'availability_zone': dst_service.availability_zone,
'previous_status': volume.status,
}
if status_update:
updates.update(status_update)
if model_update:
updates.update(model_update)
volume.update(updates)
volume.save()
except Exception:
with excutils.save_and_reraise_exception():
updates = {'migration_status': 'error'}
if status_update:
updates.update(status_update)
volume.update(updates)
volume.save()
if not moved:
try:
self._migrate_volume_generic(ctxt, volume, host, new_type_id)
except Exception:
with excutils.save_and_reraise_exception():
updates = {'migration_status': 'error'}
if status_update:
updates.update(status_update)
volume.update(updates)
volume.save()
LOG.info("Migrate volume completed successfully.",
resource=volume)
def _report_driver_status(self, context):
# It's possible during live db migration that the self.service_uuid
# value isn't set (we didn't restart services), so we'll go ahead
# and make this a part of the service periodic
if not self.service_uuid:
# We hack this with a try/except for unit tests temporarily
try:
service = self._get_service()
self.service_uuid = service.uuid
except exception.ServiceNotFound:
LOG.warning("Attempt to update service_uuid "
"resulted in a Service NotFound "
"exception, service_uuid field on "
"volumes will be NULL.")
if not self.driver.initialized:
if self.driver.configuration.config_group is None:
config_group = ''
else:
config_group = ('(config name %s)' %
self.driver.configuration.config_group)
LOG.warning("Update driver status failed: %(config_group)s "
"is uninitialized.",
{'config_group': config_group},
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
else:
volume_stats = self.driver.get_volume_stats(refresh=True)
if self.extra_capabilities:
volume_stats.update(self.extra_capabilities)
if "pools" in volume_stats:
for pool in volume_stats["pools"]:
pool.update(self.extra_capabilities)
else:
volume_stats.update(self.extra_capabilities)
if volume_stats:
# NOTE(xyang): If driver reports replication_status to be
# 'error' in volume_stats, get model updates from driver
# and update db
if volume_stats.get('replication_status') == (
fields.ReplicationStatus.ERROR):
filters = self._get_cluster_or_host_filters()
groups = objects.GroupList.get_all_replicated(
context, filters=filters)
group_model_updates, volume_model_updates = (
self.driver.get_replication_error_status(context,
groups))
for grp_update in group_model_updates:
try:
grp_obj = objects.Group.get_by_id(
context, grp_update['group_id'])
grp_obj.update(grp_update)
grp_obj.save()
except exception.GroupNotFound:
# Group may be deleted already. Log a warning
# and continue.
LOG.warning("Group %(grp)s not found while "
"updating driver status.",
{'grp': grp_update['group_id']},
resource={
'type': 'group',
'id': grp_update['group_id']})
for vol_update in volume_model_updates:
try:
vol_obj = objects.Volume.get_by_id(
context, vol_update['volume_id'])
vol_obj.update(vol_update)
vol_obj.save()
except exception.VolumeNotFound:
# Volume may be deleted already. Log a warning
# and continue.
LOG.warning("Volume %(vol)s not found while "
"updating driver status.",
{'vol': vol_update['volume_id']},
resource={
'type': 'volume',
'id': vol_update['volume_id']})
# Append volume stats with 'allocated_capacity_gb'
self._append_volume_stats(volume_stats)
# Append filter and goodness function if needed
volume_stats = (
self._append_filter_goodness_functions(volume_stats))
# queue it to be sent to the Schedulers.
self.update_service_capabilities(volume_stats)
def _append_volume_stats(self, vol_stats):
pools = vol_stats.get('pools', None)
if pools:
if isinstance(pools, list):
for pool in pools:
pool_name = pool['pool_name']
try:
pool_stats = self.stats['pools'][pool_name]
except KeyError:
# Pool not found in volume manager
pool_stats = dict(allocated_capacity_gb=0)
pool.update(pool_stats)
else:
raise exception.ProgrammingError(
reason='Pools stats reported by the driver are not '
'reported in a list')
# For drivers that are not reporting their stats by pool we will use
# the data from the special fixed pool created by
# _count_allocated_capacity.
elif self.stats.get('pools'):
vol_stats.update(next(iter(self.stats['pools'].values())))
# This is a special subcase of the above no pool case that happens when
# we don't have any volumes yet.
else:
vol_stats.update(self.stats)
vol_stats.pop('pools', None)
def _append_filter_goodness_functions(self, volume_stats):
"""Returns volume_stats updated as needed."""
# Append filter_function if needed
if 'filter_function' not in volume_stats:
volume_stats['filter_function'] = (
self.driver.get_filter_function())
# Append goodness_function if needed
if 'goodness_function' not in volume_stats:
volume_stats['goodness_function'] = (
self.driver.get_goodness_function())
return volume_stats
@periodic_task.periodic_task(spacing=CONF.backend_stats_polling_interval)
def publish_service_capabilities(self, context):
"""Collect driver status and then publish."""
self._report_driver_status(context)
self._publish_service_capabilities(context)
def _notify_about_volume_usage(self,
context,
volume,
event_suffix,
extra_usage_info=None):
volume_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_snapshot_usage(self,
context,
snapshot,
event_suffix,
extra_usage_info=None):
volume_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_group_usage(self,
context,
group,
event_suffix,
volumes=None,
extra_usage_info=None):
volume_utils.notify_about_group_usage(
context, group, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not volumes:
volumes = objects.VolumeList.get_all_by_generic_group(
context, group.id)
if volumes:
for volume in volumes:
volume_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_group_snapshot_usage(self,
context,
group_snapshot,
event_suffix,
snapshots=None,
extra_usage_info=None):
volume_utils.notify_about_group_snapshot_usage(
context, group_snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not snapshots:
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
context, group_snapshot.id)
if snapshots:
for snapshot in snapshots:
volume_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def extend_volume(self, context, volume, new_size, reservations):
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.status = 'error_extending'
volume.save()
project_id = volume.project_id
size_increase = (int(new_size)) - volume.size
self._notify_about_volume_usage(context, volume, "resize.start")
try:
self.driver.extend_volume(volume, new_size)
except exception.TargetUpdateFailed:
# We just want to log this but continue on with quota commit
LOG.warning('Volume extended but failed to update target.')
except Exception:
LOG.exception("Extend volume failed.",
resource=volume)
self.message_api.create(
context,
message_field.Action.EXTEND_VOLUME,
resource_uuid=volume.id,
detail=message_field.Detail.DRIVER_FAILED_EXTEND)
try:
self.db.volume_update(context, volume.id,
{'status': 'error_extending'})
raise exception.CinderException(_("Volume %s: Error trying "
"to extend volume") %
volume.id)
finally:
QUOTAS.rollback(context, reservations, project_id=project_id)
return
QUOTAS.commit(context, reservations, project_id=project_id)
attachments = volume.volume_attachment
if not attachments:
orig_volume_status = 'available'
else:
orig_volume_status = 'in-use'
volume.update({'size': int(new_size), 'status': orig_volume_status})
volume.save()
if orig_volume_status == 'in-use':
nova_api = compute.API()
instance_uuids = [attachment.instance_uuid
for attachment in attachments]
nova_api.extend_volume(context, instance_uuids, volume.id)
pool = volume_utils.extract_host(volume.host, 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or volume_utils.extract_host(
volume.host, 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] += size_increase
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=size_increase)
self._notify_about_volume_usage(
context, volume, "resize.end",
extra_usage_info={'size': int(new_size)})
LOG.info("Extend volume completed successfully.",
resource=volume)
def _is_our_backend(self, host, cluster_name):
return ((not cluster_name and
volume_utils.hosts_are_equivalent(self.driver.host, host)) or
(cluster_name and
volume_utils.hosts_are_equivalent(self.driver.cluster_name,
cluster_name)))
def retype(self, context, volume, new_type_id, host,
migration_policy='never', reservations=None,
old_reservations=None):
def _retype_error(context, volume, old_reservations,
new_reservations, status_update):
try:
volume.update(status_update)
volume.save()
finally:
if old_reservations:
QUOTAS.rollback(context, old_reservations)
if new_reservations:
QUOTAS.rollback(context, new_reservations)
previous_status = (
volume.previous_status or volume.status)
status_update = {'status': previous_status}
if context.project_id != volume.project_id:
project_id = volume.project_id
else:
project_id = context.project_id
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
# NOTE(flaper87): Other exceptions in this method don't
# set the volume status to error. Should that be done
# here? Setting the volume back to it's original status
# for now.
volume.update(status_update)
volume.save()
# We already got the new reservations
new_reservations = reservations
# If volume types have the same contents, no need to do anything.
# Use the admin contex to be able to access volume extra_specs
retyped = False
diff, all_equal = volume_types.volume_types_diff(
context.elevated(), volume.volume_type_id, new_type_id)
if all_equal:
retyped = True
# Call driver to try and change the type
retype_model_update = None
# NOTE(jdg): Check to see if the destination host or cluster (depending
# if it's the volume is in a clustered backend or not) is the same as
# the current. If it's not don't call the driver.retype method,
# otherwise drivers that implement retype may report success, but it's
# invalid in the case of a migrate.
# We assume that those that support pools do this internally
# so we strip off the pools designation
if (not retyped and
not diff.get('encryption') and
self._is_our_backend(host['host'], host.get('cluster_name'))):
try:
new_type = volume_types.get_volume_type(context.elevated(),
new_type_id)
with volume.obj_as_admin():
ret = self.driver.retype(context,
volume,
new_type,
diff,
host)
# Check if the driver retype provided a model update or
# just a retype indication
if type(ret) == tuple:
retyped, retype_model_update = ret
else:
retyped = ret
if retyped:
LOG.info("Volume %s: retyped successfully.", volume.id)
except Exception:
retyped = False
LOG.exception("Volume %s: driver error when trying to "
"retype, falling back to generic "
"mechanism.", volume.id)
# We could not change the type, so we need to migrate the volume, where
# the destination volume will be of the new type
if not retyped:
if migration_policy == 'never':
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Retype requires migration but is not allowed.")
raise exception.VolumeMigrationFailed(reason=msg)
snaps = objects.SnapshotList.get_all_for_volume(context,
volume.id)
if snaps:
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Volume must not have snapshots.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Don't allow volume with replicas to be migrated
rep_status = volume.replication_status
if(rep_status is not None and rep_status not in
[fields.ReplicationStatus.DISABLED,
fields.ReplicationStatus.NOT_CAPABLE]):
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Volume must not be replicated.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
volume.migration_status = 'starting'
volume.save()
try:
self.migrate_volume(context, volume, host,
new_type_id=new_type_id)
except Exception:
with excutils.save_and_reraise_exception():
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
else:
model_update = {'volume_type_id': new_type_id,
'host': host['host'],
'cluster_name': host.get('cluster_name'),
'status': status_update['status']}
if retype_model_update:
model_update.update(retype_model_update)
self._set_replication_status(diff, model_update)
volume.update(model_update)
volume.save()
if old_reservations:
QUOTAS.commit(context, old_reservations, project_id=project_id)
if new_reservations:
QUOTAS.commit(context, new_reservations, project_id=project_id)
self._notify_about_volume_usage(
context, volume, "retype",
extra_usage_info={'volume_type': new_type_id})
self.publish_service_capabilities(context)
LOG.info("Retype volume completed successfully.",
resource=volume)
@staticmethod
def _set_replication_status(diff, model_update):
"""Update replication_status in model_update if it has changed."""
if not diff or model_update.get('replication_status'):
return
diff_specs = diff.get('extra_specs', {})
replication_diff = diff_specs.get('replication_enabled')
if replication_diff:
is_replicated = volume_utils.is_boolean_str(replication_diff[1])
if is_replicated:
replication_status = fields.ReplicationStatus.ENABLED
else:
replication_status = fields.ReplicationStatus.DISABLED
model_update['replication_status'] = replication_status
def manage_existing(self, ctxt, volume, ref=None):
vol_ref = self._run_manage_existing_flow_engine(
ctxt, volume, ref)
self._update_stats_for_managed(vol_ref)
LOG.info("Manage existing volume completed successfully.",
resource=vol_ref)
return vol_ref.id
def _update_stats_for_managed(self, volume_reference):
# Update volume stats
pool = volume_utils.extract_host(volume_reference.host, 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or volume_utils.extract_host(
volume_reference.host, 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] \
+= volume_reference.size
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=volume_reference.size)
def _run_manage_existing_flow_engine(self, ctxt, volume, ref):
try:
flow_engine = manage_existing.get_flow(
ctxt,
self.db,
self.driver,
self.host,
volume,
ref,
)
except Exception:
msg = _("Failed to create manage_existing flow.")
LOG.exception(msg, resource={'type': 'volume', 'id': volume.id})
raise exception.CinderException(msg)
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
# Fetch created volume from storage
vol_ref = flow_engine.storage.fetch('volume')
return vol_ref
def _get_cluster_or_host_filters(self):
if self.cluster:
filters = {'cluster_name': self.cluster}
else:
filters = {'host': self.host}
return filters
def _get_my_volumes_summary(self, ctxt):
filters = self._get_cluster_or_host_filters()
return objects.VolumeList.get_volume_summary(ctxt, False, filters)
def _get_my_snapshots_summary(self, ctxt):
filters = self._get_cluster_or_host_filters()
return objects.SnapshotList.get_snapshot_summary(ctxt, False, filters)
def _get_my_resources(self, ctxt, ovo_class_list, limit=None, offset=None):
filters = self._get_cluster_or_host_filters()
return getattr(ovo_class_list, 'get_all')(ctxt, filters=filters,
limit=limit,
offset=offset)
def _get_my_volumes(self, ctxt, limit=None, offset=None):
return self._get_my_resources(ctxt, objects.VolumeList,
limit, offset)
def _get_my_snapshots(self, ctxt, limit=None, offset=None):
return self._get_my_resources(ctxt, objects.SnapshotList,
limit, offset)
def get_manageable_volumes(self, ctxt, marker, limit, offset, sort_keys,
sort_dirs, want_objects=False):
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception("Listing manageable volumes failed, due "
"to uninitialized driver.")
cinder_volumes = self._get_my_volumes(ctxt)
try:
driver_entries = self.driver.get_manageable_volumes(
cinder_volumes, marker, limit, offset, sort_keys, sort_dirs)
if want_objects:
driver_entries = (objects.ManageableVolumeList.
from_primitives(ctxt, driver_entries))
except AttributeError:
LOG.debug('Driver does not support listing manageable volumes.')
return []
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception("Listing manageable volumes failed, due "
"to driver error.")
return driver_entries
def create_group(self, context, group):
"""Creates the group."""
context = context.elevated()
# Make sure the host in the DB matches our own when clustered
self._set_resource_host(group)
status = fields.GroupStatus.AVAILABLE
model_update = None
self._notify_about_group_usage(context, group, "create.start")
try:
utils.require_driver_initialized(self.driver)
LOG.info("Group %s: creating", group.name)
try:
model_update = self.driver.create_group(context, group)
except NotImplementedError:
if not group_types.is_default_cgsnapshot_type(
group.group_type_id):
model_update = self._create_group_generic(context, group)
else:
cg, __ = self._convert_group_to_cg(group, [])
model_update = self.driver.create_consistencygroup(
context, cg)
if model_update:
if (model_update['status'] ==
fields.GroupStatus.ERROR):
msg = (_('Create group failed.'))
LOG.error(msg,
resource={'type': 'group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = fields.GroupStatus.ERROR
group.save()
LOG.error("Group %s: create failed",
group.name)
group.status = status
group.created_at = timeutils.utcnow()
group.save()
LOG.info("Group %s: created successfully", group.name)
self._notify_about_group_usage(context, group, "create.end")
LOG.info("Create group completed successfully.",
resource={'type': 'group',
'id': group.id})
return group
def create_group_from_src(self, context, group,
group_snapshot=None, source_group=None):
"""Creates the group from source.
The source can be a group snapshot or a source group.
"""
source_name = None
snapshots = None
source_vols = None
try:
volumes = objects.VolumeList.get_all_by_generic_group(context,
group.id)
if group_snapshot:
try:
# Check if group_snapshot still exists
group_snapshot.refresh()
except exception.GroupSnapshotNotFound:
LOG.error("Create group from snapshot-%(snap)s failed: "
"SnapshotNotFound.",
{'snap': group_snapshot.id},
resource={'type': 'group',
'id': group.id})
raise
source_name = _("snapshot-%s") % group_snapshot.id
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
context, group_snapshot.id)
for snap in snapshots:
if (snap.status not in
VALID_CREATE_GROUP_SRC_SNAP_STATUS):
msg = (_("Cannot create group "
"%(group)s because snapshot %(snap)s is "
"not in a valid state. Valid states are: "
"%(valid)s.") %
{'group': group.id,
'snap': snap['id'],
'valid': VALID_CREATE_GROUP_SRC_SNAP_STATUS})
raise exception.InvalidGroup(reason=msg)
if source_group:
try:
source_group.refresh()
except exception.GroupNotFound:
LOG.error("Create group "
"from source group-%(group)s failed: "
"GroupNotFound.",
{'group': source_group.id},
resource={'type': 'group',
'id': group.id})
raise
source_name = _("group-%s") % source_group.id
source_vols = objects.VolumeList.get_all_by_generic_group(
context, source_group.id)
for source_vol in source_vols:
if (source_vol.status not in
VALID_CREATE_GROUP_SRC_GROUP_STATUS):
msg = (_("Cannot create group "
"%(group)s because source volume "
"%(source_vol)s is not in a valid "
"state. Valid states are: "
"%(valid)s.") %
{'group': group.id,
'source_vol': source_vol.id,
'valid': VALID_CREATE_GROUP_SRC_GROUP_STATUS})
raise exception.InvalidGroup(reason=msg)
# Sort source snapshots so that they are in the same order as their
# corresponding target volumes.
sorted_snapshots = None
if group_snapshot and snapshots:
sorted_snapshots = self._sort_snapshots(volumes, snapshots)
# Sort source volumes so that they are in the same order as their
# corresponding target volumes.
sorted_source_vols = None
if source_group and source_vols:
sorted_source_vols = self._sort_source_vols(volumes,
source_vols)
self._notify_about_group_usage(
context, group, "create.start")
utils.require_driver_initialized(self.driver)
try:
model_update, volumes_model_update = (
self.driver.create_group_from_src(
context, group, volumes, group_snapshot,
sorted_snapshots, source_group, sorted_source_vols))
except NotImplementedError:
if not group_types.is_default_cgsnapshot_type(
group.group_type_id):
model_update, volumes_model_update = (
self._create_group_from_src_generic(
context, group, volumes, group_snapshot,
sorted_snapshots, source_group,
sorted_source_vols))
else:
cg, volumes = self._convert_group_to_cg(
group, volumes)
cgsnapshot, sorted_snapshots = (
self._convert_group_snapshot_to_cgsnapshot(
group_snapshot, sorted_snapshots, context))
source_cg, sorted_source_vols = (
self._convert_group_to_cg(source_group,
sorted_source_vols))
model_update, volumes_model_update = (
self.driver.create_consistencygroup_from_src(
context, cg, volumes, cgsnapshot,
sorted_snapshots, source_cg, sorted_source_vols))
self._remove_cgsnapshot_id_from_snapshots(sorted_snapshots)
self._remove_consistencygroup_id_from_volumes(volumes)
self._remove_consistencygroup_id_from_volumes(
sorted_source_vols)
if volumes_model_update:
for update in volumes_model_update:
self.db.volume_update(context, update['id'], update)
if model_update:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = fields.GroupStatus.ERROR
group.save()
LOG.error("Create group "
"from source %(source)s failed.",
{'source': source_name},
resource={'type': 'group',
'id': group.id})
# Update volume status to 'error' as well.
self._remove_consistencygroup_id_from_volumes(volumes)
for vol in volumes:
vol.status = 'error'
vol.save()
now = timeutils.utcnow()
status = 'available'
for vol in volumes:
update = {'status': status, 'created_at': now}
self._update_volume_from_src(context, vol, update, group=group)
self._update_allocated_capacity(vol)
group.status = status
group.created_at = now
group.save()
self._notify_about_group_usage(
context, group, "create.end")
LOG.info("Create group "
"from source-%(source)s completed successfully.",
{'source': source_name},
resource={'type': 'group',
'id': group.id})
return group
def _create_group_from_src_generic(self, context, group, volumes,
group_snapshot=None, snapshots=None,
source_group=None, source_vols=None):
"""Creates a group from source.
:param context: the context of the caller.
:param group: the Group object to be created.
:param volumes: a list of volume objects in the group.
:param group_snapshot: the GroupSnapshot object as source.
:param snapshots: a list of snapshot objects in group_snapshot.
:param source_group: the Group object as source.
:param source_vols: a list of volume objects in the source_group.
:returns: model_update, volumes_model_update
"""
model_update = {'status': 'available'}
volumes_model_update = []
for vol in volumes:
if snapshots:
for snapshot in snapshots:
if vol.snapshot_id == snapshot.id:
vol_model_update = {'id': vol.id}
try:
driver_update = (
self.driver.create_volume_from_snapshot(
vol, snapshot))
if driver_update:
driver_update.pop('id', None)
vol_model_update.update(driver_update)
if 'status' not in vol_model_update:
vol_model_update['status'] = 'available'
except Exception:
vol_model_update['status'] = 'error'
model_update['status'] = 'error'
volumes_model_update.append(vol_model_update)
break
elif source_vols:
for source_vol in source_vols:
if vol.source_volid == source_vol.id:
vol_model_update = {'id': vol.id}
try:
driver_update = self.driver.create_cloned_volume(
vol, source_vol)
if driver_update:
driver_update.pop('id', None)
vol_model_update.update(driver_update)
if 'status' not in vol_model_update:
vol_model_update['status'] = 'available'
except Exception:
vol_model_update['status'] = 'error'
model_update['status'] = 'error'
volumes_model_update.append(vol_model_update)
break
return model_update, volumes_model_update
def _sort_snapshots(self, volumes, snapshots):
# Sort source snapshots so that they are in the same order as their
# corresponding target volumes. Each source snapshot in the snapshots
# list should have a corresponding target volume in the volumes list.
if not volumes or not snapshots or len(volumes) != len(snapshots):
msg = _("Input volumes or snapshots are invalid.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
sorted_snapshots = []
for vol in volumes:
found_snaps = [snap for snap in snapshots
if snap['id'] == vol['snapshot_id']]
if not found_snaps:
LOG.error("Source snapshot cannot be found for target "
"volume %(volume_id)s.",
{'volume_id': vol['id']})
raise exception.SnapshotNotFound(
snapshot_id=vol['snapshot_id'])
sorted_snapshots.extend(found_snaps)
return sorted_snapshots
def _sort_source_vols(self, volumes, source_vols):
# Sort source volumes so that they are in the same order as their
# corresponding target volumes. Each source volume in the source_vols
# list should have a corresponding target volume in the volumes list.
if not volumes or not source_vols or len(volumes) != len(source_vols):
msg = _("Input volumes or source volumes are invalid.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
sorted_source_vols = []
for vol in volumes:
found_source_vols = [source_vol for source_vol in source_vols
if source_vol['id'] == vol['source_volid']]
if not found_source_vols:
LOG.error("Source volumes cannot be found for target "
"volume %(volume_id)s.",
{'volume_id': vol['id']})
raise exception.VolumeNotFound(
volume_id=vol['source_volid'])
sorted_source_vols.extend(found_source_vols)
return sorted_source_vols
def _update_volume_from_src(self, context, vol, update, group=None):
try:
snapshot_id = vol.get('snapshot_id')
source_volid = vol.get('source_volid')
if snapshot_id:
snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
orig_vref = self.db.volume_get(context,
snapshot.volume_id)
if orig_vref.bootable:
update['bootable'] = True
self.db.volume_glance_metadata_copy_to_volume(
context, vol['id'], snapshot_id)
if source_volid:
source_vol = objects.Volume.get_by_id(context, source_volid)
if source_vol.bootable:
update['bootable'] = True
self.db.volume_glance_metadata_copy_from_volume_to_volume(
context, source_volid, vol['id'])
if source_vol.multiattach:
update['multiattach'] = True
except exception.SnapshotNotFound:
LOG.error("Source snapshot %(snapshot_id)s cannot be found.",
{'snapshot_id': vol['snapshot_id']})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = fields.GroupStatus.ERROR
group.save()
raise
except exception.VolumeNotFound:
LOG.error("The source volume %(volume_id)s "
"cannot be found.",
{'volume_id': snapshot.volume_id})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = fields.GroupStatus.ERROR
group.save()
raise
except exception.CinderException as ex:
LOG.error("Failed to update %(volume_id)s"
" metadata using the provided snapshot"
" %(snapshot_id)s metadata.",
{'volume_id': vol['id'],
'snapshot_id': vol['snapshot_id']})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = fields.GroupStatus.ERROR
group.save()
raise exception.MetadataCopyFailure(reason=six.text_type(ex))
self.db.volume_update(context, vol['id'], update)
def _update_allocated_capacity(self, vol, decrement=False, host=None):
# Update allocated capacity in volume stats
host = host or vol['host']
pool = volume_utils.extract_host(host, 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or volume_utils.extract_host(host,
'pool',
True)
vol_size = -vol['size'] if decrement else vol['size']
try:
self.stats['pools'][pool]['allocated_capacity_gb'] += vol_size
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=max(vol_size, 0))
def delete_group(self, context, group):
"""Deletes group and the volumes in the group."""
context = context.elevated()
project_id = group.project_id
if context.project_id != group.project_id:
project_id = group.project_id
else:
project_id = context.project_id
volumes = objects.VolumeList.get_all_by_generic_group(
context, group.id)
for vol_obj in volumes:
if vol_obj.attach_status == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=vol_obj.id)
self._check_is_our_resource(vol_obj)
self._notify_about_group_usage(
context, group, "delete.start")
volumes_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
try:
model_update, volumes_model_update = (
self.driver.delete_group(context, group, volumes))
except NotImplementedError:
if not group_types.is_default_cgsnapshot_type(
group.group_type_id):
model_update, volumes_model_update = (
self._delete_group_generic(context, group, volumes))
else:
cg, volumes = self._convert_group_to_cg(
group, volumes)
model_update, volumes_model_update = (
self.driver.delete_consistencygroup(context, cg,
volumes))
self._remove_consistencygroup_id_from_volumes(volumes)
if volumes_model_update:
for update in volumes_model_update:
# If we failed to delete a volume, make sure the
# status for the group is set to error as well
if (update['status'] in ['error_deleting', 'error']
and model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = update['status']
self.db.volumes_update(context, volumes_model_update)
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Delete group failed.'))
LOG.error(msg,
resource={'type': 'group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = fields.GroupStatus.ERROR
group.save()
# Update volume status to 'error' if driver returns
# None for volumes_model_update.
if not volumes_model_update:
self._remove_consistencygroup_id_from_volumes(volumes)
for vol_obj in volumes:
vol_obj.status = 'error'
vol_obj.save()
# Get reservations for group
try:
reserve_opts = {'groups': -1}
grpreservations = GROUP_QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
grpreservations = None
LOG.exception("Delete group "
"failed to update usages.",
resource={'type': 'group',
'id': group.id})
for vol in volumes:
# Get reservations for volume
try:
reserve_opts = {'volumes': -1,
'gigabytes': -vol.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
vol.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception("Delete group "
"failed to update usages.",
resource={'type': 'group',
'id': group.id})
vol.destroy()
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
self.stats['allocated_capacity_gb'] -= vol.size
if grpreservations:
GROUP_QUOTAS.commit(context, grpreservations,
project_id=project_id)
group.destroy()
self._notify_about_group_usage(
context, group, "delete.end")
self.publish_service_capabilities(context)
LOG.info("Delete group "
"completed successfully.",
resource={'type': 'group',
'id': group.id})
def _convert_group_to_cg(self, group, volumes):
if not group:
return None, None
cg = consistencygroup.ConsistencyGroup()
cg.from_group(group)
for vol in volumes:
vol.consistencygroup_id = vol.group_id
vol.consistencygroup = cg
return cg, volumes
def _remove_consistencygroup_id_from_volumes(self, volumes):
if not volumes:
return
for vol in volumes:
vol.consistencygroup_id = None
vol.consistencygroup = None
def _convert_group_snapshot_to_cgsnapshot(self, group_snapshot, snapshots,
ctxt):
if not group_snapshot:
return None, None
cgsnap = cgsnapshot.CGSnapshot()
cgsnap.from_group_snapshot(group_snapshot)
# Populate consistencygroup object
grp = objects.Group.get_by_id(ctxt, group_snapshot.group_id)
cg, __ = self._convert_group_to_cg(grp, [])
cgsnap.consistencygroup = cg
for snap in snapshots:
snap.cgsnapshot_id = snap.group_snapshot_id
snap.cgsnapshot = cgsnap
return cgsnap, snapshots
def _remove_cgsnapshot_id_from_snapshots(self, snapshots):
if not snapshots:
return
for snap in snapshots:
snap.cgsnapshot_id = None
snap.cgsnapshot = None
def _create_group_generic(self, context, group):
"""Creates a group."""
# A group entry is already created in db. Just returns a status here.
model_update = {'status': fields.GroupStatus.AVAILABLE,
'created_at': timeutils.utcnow()}
return model_update
def _delete_group_generic(self, context, group, volumes):
"""Deletes a group and volumes in the group."""
model_update = {'status': group.status}
volume_model_updates = []
for volume_ref in volumes:
volume_model_update = {'id': volume_ref.id}
try:
self.driver.remove_export(context, volume_ref)
self.driver.delete_volume(volume_ref)
volume_model_update['status'] = 'deleted'
except exception.VolumeIsBusy:
volume_model_update['status'] = 'available'
except Exception:
volume_model_update['status'] = 'error'
model_update['status'] = fields.GroupStatus.ERROR
volume_model_updates.append(volume_model_update)
return model_update, volume_model_updates
def _update_group_generic(self, context, group,
add_volumes=None, remove_volumes=None):
"""Updates a group."""
# NOTE(xyang): The volume manager adds/removes the volume to/from the
# group in the database. This default implementation does not do
# anything in the backend storage.
return None, None, None
def _collect_volumes_for_group(self, context, group, volumes, add=True):
if add:
valid_status = VALID_ADD_VOL_TO_GROUP_STATUS
else:
valid_status = VALID_REMOVE_VOL_FROM_GROUP_STATUS
volumes_ref = []
if not volumes:
return volumes_ref
for add_vol in volumes.split(','):
try:
add_vol_ref = objects.Volume.get_by_id(context, add_vol)
except exception.VolumeNotFound:
LOG.error("Update group "
"failed to %(op)s volume-%(volume_id)s: "
"VolumeNotFound.",
{'volume_id': add_vol,
'op': 'add' if add else 'remove'},
resource={'type': 'group',
'id': group.id})
raise
if add_vol_ref.status not in valid_status:
msg = (_("Can not %(op)s volume %(volume_id)s to "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': add_vol_ref.id,
'group_id': group.id,
'status': add_vol_ref.status,
'valid': valid_status,
'op': 'add' if add else 'remove'})
raise exception.InvalidVolume(reason=msg)
if add:
self._check_is_our_resource(add_vol_ref)
volumes_ref.append(add_vol_ref)
return volumes_ref
def update_group(self, context, group,
add_volumes=None, remove_volumes=None):
"""Updates group.
Update group by adding volumes to the group,
or removing volumes from the group.
"""
add_volumes_ref = self._collect_volumes_for_group(context,
group,
add_volumes,
add=True)
remove_volumes_ref = self._collect_volumes_for_group(context,
group,
remove_volumes,
add=False)
self._notify_about_group_usage(
context, group, "update.start")
try:
utils.require_driver_initialized(self.driver)
try:
model_update, add_volumes_update, remove_volumes_update = (
self.driver.update_group(
context, group,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
except NotImplementedError:
if not group_types.is_default_cgsnapshot_type(
group.group_type_id):
model_update, add_volumes_update, remove_volumes_update = (
self._update_group_generic(
context, group,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
else:
cg, remove_volumes_ref = self._convert_group_to_cg(
group, remove_volumes_ref)
model_update, add_volumes_update, remove_volumes_update = (
self.driver.update_consistencygroup(
context, cg,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
self._remove_consistencygroup_id_from_volumes(
remove_volumes_ref)
volumes_to_update = []
if add_volumes_update:
volumes_to_update.extend(add_volumes_update)
if remove_volumes_update:
volumes_to_update.extend(remove_volumes_update)
self.db.volumes_update(context, volumes_to_update)
if model_update:
if model_update['status'] in (
[fields.GroupStatus.ERROR]):
msg = (_('Error occurred when updating group '
'%s.') % group.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
group.update(model_update)
group.save()
except Exception as e:
with excutils.save_and_reraise_exception():
if isinstance(e, exception.VolumeDriverException):
LOG.error("Error occurred in the volume driver when "
"updating group %(group_id)s.",
{'group_id': group.id})
else:
LOG.error("Failed to update group %(group_id)s.",
{'group_id': group.id})
group.status = fields.GroupStatus.ERROR
group.save()
for add_vol in add_volumes_ref:
add_vol.status = 'error'
add_vol.save()
for rem_vol in remove_volumes_ref:
if isinstance(e, exception.VolumeDriverException):
rem_vol.consistencygroup_id = None
rem_vol.consistencygroup = None
rem_vol.status = 'error'
rem_vol.save()
for add_vol in add_volumes_ref:
add_vol.group_id = group.id
add_vol.save()
for rem_vol in remove_volumes_ref:
rem_vol.group_id = None
rem_vol.save()
group.status = fields.GroupStatus.AVAILABLE
group.save()
self._notify_about_group_usage(
context, group, "update.end")
LOG.info("Update group completed successfully.",
resource={'type': 'group',
'id': group.id})
def create_group_snapshot(self, context, group_snapshot):
"""Creates the group_snapshot."""
caller_context = context
context = context.elevated()
LOG.info("GroupSnapshot %s: creating.", group_snapshot.id)
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
context, group_snapshot.id)
self._notify_about_group_snapshot_usage(
context, group_snapshot, "create.start")
snapshots_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
LOG.debug("Group snapshot %(grp_snap_id)s: creating.",
{'grp_snap_id': group_snapshot.id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
group_snapshot.context = caller_context
for snapshot in snapshots:
snapshot.context = caller_context
try:
model_update, snapshots_model_update = (
self.driver.create_group_snapshot(context, group_snapshot,
snapshots))
except NotImplementedError:
if not group_types.is_default_cgsnapshot_type(
group_snapshot.group_type_id):
model_update, snapshots_model_update = (
self._create_group_snapshot_generic(
context, group_snapshot, snapshots))
else:
cgsnapshot, snapshots = (
self._convert_group_snapshot_to_cgsnapshot(
group_snapshot, snapshots, context))
model_update, snapshots_model_update = (
self.driver.create_cgsnapshot(context, cgsnapshot,
snapshots))
self._remove_cgsnapshot_id_from_snapshots(snapshots)
if snapshots_model_update:
for snap_model in snapshots_model_update:
# Update db for snapshot.
# NOTE(xyang): snapshots is a list of snapshot objects.
# snapshots_model_update should be a list of dicts.
snap_id = snap_model.pop('id')
snap_obj = objects.Snapshot.get_by_id(context, snap_id)
snap_obj.update(snap_model)
snap_obj.save()
if (snap_model['status'] in [
fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
[fields.GroupSnapshotStatus.ERROR_DELETING,
fields.GroupSnapshotStatus.ERROR]):
model_update['status'] = snap_model['status']
if model_update:
if model_update['status'] == fields.GroupSnapshotStatus.ERROR:
msg = (_('Error occurred when creating group_snapshot '
'%s.') % group_snapshot.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
group_snapshot.update(model_update)
group_snapshot.save()
except exception.CinderException:
with excutils.save_and_reraise_exception():
group_snapshot.status = fields.GroupSnapshotStatus.ERROR
group_snapshot.save()
# Update snapshot status to 'error' if driver returns
# None for snapshots_model_update.
self._remove_cgsnapshot_id_from_snapshots(snapshots)
if not snapshots_model_update:
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots:
volume_id = snapshot.volume_id
snapshot_id = snapshot.id
vol_obj = objects.Volume.get_by_id(context, volume_id)
if vol_obj.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot_id, volume_id)
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
except exception.CinderException as ex:
LOG.error("Failed updating %(snapshot_id)s"
" metadata using the provided volumes"
" %(volume_id)s metadata.",
{'volume_id': volume_id,
'snapshot_id': snapshot_id})
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
raise exception.MetadataCopyFailure(
reason=six.text_type(ex))
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.progress = '100%'
snapshot.save()
group_snapshot.status = fields.GroupSnapshotStatus.AVAILABLE
group_snapshot.save()
LOG.info("group_snapshot %s: created successfully",
group_snapshot.id)
self._notify_about_group_snapshot_usage(
context, group_snapshot, "create.end")
return group_snapshot
def _create_group_snapshot_generic(self, context, group_snapshot,
snapshots):
"""Creates a group_snapshot."""
model_update = {'status': 'available'}
snapshot_model_updates = []
for snapshot in snapshots:
snapshot_model_update = {'id': snapshot.id}
try:
driver_update = self.driver.create_snapshot(snapshot)
if driver_update:
driver_update.pop('id', None)
snapshot_model_update.update(driver_update)
if 'status' not in snapshot_model_update:
snapshot_model_update['status'] = (
fields.SnapshotStatus.AVAILABLE)
except Exception:
snapshot_model_update['status'] = (
fields.SnapshotStatus.ERROR)
model_update['status'] = 'error'
snapshot_model_updates.append(snapshot_model_update)
return model_update, snapshot_model_updates
def _delete_group_snapshot_generic(self, context, group_snapshot,
snapshots):
"""Deletes a group_snapshot."""
model_update = {'status': group_snapshot.status}
snapshot_model_updates = []
for snapshot in snapshots:
snapshot_model_update = {'id': snapshot.id}
try:
self.driver.delete_snapshot(snapshot)
snapshot_model_update['status'] = (
fields.SnapshotStatus.DELETED)
except exception.SnapshotIsBusy:
snapshot_model_update['status'] = (
fields.SnapshotStatus.AVAILABLE)
except Exception:
snapshot_model_update['status'] = (
fields.SnapshotStatus.ERROR)
model_update['status'] = 'error'
snapshot_model_updates.append(snapshot_model_update)
return model_update, snapshot_model_updates
def delete_group_snapshot(self, context, group_snapshot):
"""Deletes group_snapshot."""
caller_context = context
context = context.elevated()
project_id = group_snapshot.project_id
LOG.info("group_snapshot %s: deleting", group_snapshot.id)
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
context, group_snapshot.id)
self._notify_about_group_snapshot_usage(
context, group_snapshot, "delete.start")
snapshots_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
LOG.debug("group_snapshot %(grp_snap_id)s: deleting",
{'grp_snap_id': group_snapshot.id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
group_snapshot.context = caller_context
for snapshot in snapshots:
snapshot.context = caller_context
try:
model_update, snapshots_model_update = (
self.driver.delete_group_snapshot(context, group_snapshot,
snapshots))
except NotImplementedError:
if not group_types.is_default_cgsnapshot_type(
group_snapshot.group_type_id):
model_update, snapshots_model_update = (
self._delete_group_snapshot_generic(
context, group_snapshot, snapshots))
else:
cgsnapshot, snapshots = (
self._convert_group_snapshot_to_cgsnapshot(
group_snapshot, snapshots, context))
model_update, snapshots_model_update = (
self.driver.delete_cgsnapshot(context, cgsnapshot,
snapshots))
self._remove_cgsnapshot_id_from_snapshots(snapshots)
if snapshots_model_update:
for snap_model in snapshots_model_update:
# NOTE(xyang): snapshots is a list of snapshot objects.
# snapshots_model_update should be a list of dicts.
snap = next((item for item in snapshots if
item.id == snap_model['id']), None)
if snap:
snap_model.pop('id')
snap.update(snap_model)
snap.save()
if (snap_model['status'] in
[fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = snap_model['status']
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Error occurred when deleting group_snapshot '
'%s.') % group_snapshot.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
else:
group_snapshot.update(model_update)
group_snapshot.save()
except exception.CinderException:
with excutils.save_and_reraise_exception():
group_snapshot.status = fields.GroupSnapshotStatus.ERROR
group_snapshot.save()
# Update snapshot status to 'error' if driver returns
# None for snapshots_model_update.
if not snapshots_model_update:
self._remove_cgsnapshot_id_from_snapshots(snapshots)
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots:
# Get reservations
try:
reserve_opts = {'snapshots': -1}
if not CONF.no_snapshot_gb_quota:
reserve_opts['gigabytes'] = -snapshot.volume_size
volume_ref = objects.Volume.get_by_id(context,
snapshot.volume_id)
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception("Failed to update usages deleting snapshot")
self.db.volume_glance_metadata_delete_by_snapshot(context,
snapshot.id)
snapshot.destroy()
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
group_snapshot.destroy()
LOG.info("group_snapshot %s: deleted successfully",
group_snapshot.id)
self._notify_about_group_snapshot_usage(context, group_snapshot,
"delete.end",
snapshots)
def update_migrated_volume(self, ctxt, volume, new_volume, volume_status):
"""Finalize migration process on backend device."""
model_update = None
model_update_default = {'_name_id': new_volume.name_id,
'provider_location':
new_volume.provider_location}
try:
model_update = self.driver.update_migrated_volume(ctxt,
volume,
new_volume,
volume_status)
except NotImplementedError:
# If update_migrated_volume is not implemented for the driver,
# _name_id and provider_location will be set with the values
# from new_volume.
model_update = model_update_default
if model_update:
model_update_default.update(model_update)
# Swap keys that were changed in the source so we keep their values
# in the temporary volume's DB record.
# Need to convert 'metadata' and 'admin_metadata' since
# they are not keys of volume, their corresponding keys are
# 'volume_metadata' and 'volume_admin_metadata'.
model_update_new = dict()
for key in model_update:
if key == 'metadata':
if volume.get('volume_metadata'):
model_update_new[key] = {
metadata['key']: metadata['value']
for metadata in volume.volume_metadata}
elif key == 'admin_metadata':
model_update_new[key] = {
metadata['key']: metadata['value']
for metadata in volume.volume_admin_metadata}
else:
model_update_new[key] = volume[key]
with new_volume.obj_as_admin():
new_volume.update(model_update_new)
new_volume.save()
with volume.obj_as_admin():
volume.update(model_update_default)
volume.save()
# Replication V2.1 and a/a method
def failover(self, context, secondary_backend_id=None):
"""Failover a backend to a secondary replication target.
Instructs a replication capable/configured backend to failover
to one of it's secondary replication targets. host=None is
an acceetable input, and leaves it to the driver to failover
to the only configured target, or to choose a target on it's
own. All of the hosts volumes will be passed on to the driver
in order for it to determine the replicated volumes on the host,
if needed.
:param context: security context
:param secondary_backend_id: Specifies backend_id to fail over to
"""
updates = {}
repl_status = fields.ReplicationStatus
service = self._get_service()
# TODO(geguileo): We should optimize these updates by doing them
# directly on the DB with just 3 queries, one to change the volumes
# another to change all the snapshots, and another to get replicated
# volumes.
# Change non replicated volumes and their snapshots to error if we are
# failing over, leave them as they are for failback
volumes = self._get_my_volumes(context)
replicated_vols = []
for volume in volumes:
if volume.replication_status not in (repl_status.DISABLED,
repl_status.NOT_CAPABLE):
replicated_vols.append(volume)
elif secondary_backend_id != self.FAILBACK_SENTINEL:
volume.previous_status = volume.status
volume.status = 'error'
volume.replication_status = repl_status.NOT_CAPABLE
volume.save()
for snapshot in volume.snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
volume_update_list = None
group_update_list = None
try:
# For non clustered we can call v2.1 failover_host, but for
# clustered we call a/a failover method. We know a/a method
# exists because BaseVD class wouldn't have started if it didn't.
failover = getattr(self.driver,
'failover' if service.is_clustered
else 'failover_host')
# expected form of volume_update_list:
# [{volume_id: <cinder-volid>, updates: {'provider_id': xxxx....}},
# {volume_id: <cinder-volid>, updates: {'provider_id': xxxx....}}]
# It includes volumes in replication groups and those not in them
# expected form of group_update_list:
# [{group_id: <cinder-grpid>, updates: {'xxxx': xxxx....}},
# {group_id: <cinder-grpid>, updates: {'xxxx': xxxx....}}]
filters = self._get_cluster_or_host_filters()
groups = objects.GroupList.get_all_replicated(context,
filters=filters)
active_backend_id, volume_update_list, group_update_list = (
failover(context,
replicated_vols,
secondary_id=secondary_backend_id,
groups=groups))
try:
update_data = {u['volume_id']: u['updates']
for u in volume_update_list}
except KeyError:
msg = "Update list, doesn't include volume_id"
raise exception.ProgrammingError(reason=msg)
try:
update_group_data = {g['group_id']: g['updates']
for g in group_update_list}
except KeyError:
msg = "Update list, doesn't include group_id"
raise exception.ProgrammingError(reason=msg)
except Exception as exc:
# NOTE(jdg): Drivers need to be aware if they fail during
# a failover sequence, we're expecting them to cleanup
# and make sure the driver state is such that the original
# backend is still set as primary as per driver memory
# We don't want to log the exception trace invalid replication
# target
if isinstance(exc, exception.InvalidReplicationTarget):
log_method = LOG.error
# Preserve the replication_status: Status should be failed over
# if we were failing back or if we were failing over from one
# secondary to another secondary. In both cases
# active_backend_id will be set.
if service.active_backend_id:
updates['replication_status'] = repl_status.FAILED_OVER
else:
updates['replication_status'] = repl_status.ENABLED
else:
log_method = LOG.exception
updates.update(disabled=True,
replication_status=repl_status.FAILOVER_ERROR)
log_method("Error encountered during failover on host: %(host)s "
"to %(backend_id)s: %(error)s",
{'host': self.host, 'backend_id': secondary_backend_id,
'error': exc})
# We dump the update list for manual recovery
LOG.error('Failed update_list is: %s', volume_update_list)
self.finish_failover(context, service, updates)
return
if secondary_backend_id == "default":
updates['replication_status'] = repl_status.ENABLED
updates['active_backend_id'] = ''
updates['disabled'] = service.frozen
updates['disabled_reason'] = 'frozen' if service.frozen else ''
else:
updates['replication_status'] = repl_status.FAILED_OVER
updates['active_backend_id'] = active_backend_id
updates['disabled'] = True
updates['disabled_reason'] = 'failed-over'
self.finish_failover(context, service, updates)
for volume in replicated_vols:
update = update_data.get(volume.id, {})
if update.get('status', '') == 'error':
update['replication_status'] = repl_status.FAILOVER_ERROR
elif update.get('replication_status') in (None,
repl_status.FAILED_OVER):
update['replication_status'] = updates['replication_status']
if update['replication_status'] == repl_status.FAILOVER_ERROR:
update.setdefault('status', 'error')
# Set all volume snapshots to error
for snapshot in volume.snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
if 'status' in update:
update['previous_status'] = volume.status
volume.update(update)
volume.save()
for grp in groups:
update = update_group_data.get(grp.id, {})
if update.get('status', '') == 'error':
update['replication_status'] = repl_status.FAILOVER_ERROR
elif update.get('replication_status') in (None,
repl_status.FAILED_OVER):
update['replication_status'] = updates['replication_status']
if update['replication_status'] == repl_status.FAILOVER_ERROR:
update.setdefault('status', 'error')
grp.update(update)
grp.save()
LOG.info("Failed over to replication target successfully.")
# TODO(geguileo): In P - remove this
failover_host = failover
def finish_failover(self, context, service, updates):
"""Completion of the failover locally or via RPC."""
# If the service is clustered, broadcast the service changes to all
# volume services, including this one.
if service.is_clustered:
# We have to update the cluster with the same data, and we do it
# before broadcasting the failover_completed RPC call to prevent
# races with services that may be starting..
for key, value in updates.items():
setattr(service.cluster, key, value)
service.cluster.save()
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.failover_completed(context, service, updates)
else:
service.update(updates)
service.save()
def failover_completed(self, context, updates):
"""Finalize failover of this backend.
When a service is clustered and replicated the failover has 2 stages,
one that does the failover of the volumes and another that finalizes
the failover of the services themselves.
This method takes care of the last part and is called from the service
doing the failover of the volumes after finished processing the
volumes.
"""
service = self._get_service()
service.update(updates)
try:
self.driver.failover_completed(context, service.active_backend_id)
except Exception:
msg = _('Driver reported error during replication failover '
'completion.')
LOG.exception(msg)
service.disabled = True
service.disabled_reason = msg
service.replication_status = (
fields.ReplicationStatus.ERROR)
service.save()
def freeze_host(self, context):
"""Freeze management plane on this backend.
Basically puts the control/management plane into a
Read Only state. We should handle this in the scheduler,
however this is provided to let the driver know in case it
needs/wants to do something specific on the backend.
:param context: security context
"""
# TODO(jdg): Return from driver? or catch?
# Update status column in service entry
try:
self.driver.freeze_backend(context)
except exception.VolumeDriverException:
# NOTE(jdg): In the case of freeze, we don't really
# need the backend's consent or anything, we'll just
# disable the service, so we can just log this and
# go about our business
LOG.warning('Error encountered on Cinder backend during '
'freeze operation, service is frozen, however '
'notification to driver has failed.')
service = self._get_service()
service.disabled = True
service.disabled_reason = "frozen"
service.save()
LOG.info("Set backend status to frozen successfully.")
return True
def thaw_host(self, context):
"""UnFreeze management plane on this backend.
Basically puts the control/management plane back into
a normal state. We should handle this in the scheduler,
however this is provided to let the driver know in case it
needs/wants to do something specific on the backend.
:param context: security context
"""
# TODO(jdg): Return from driver? or catch?
# Update status column in service entry
try:
self.driver.thaw_backend(context)
except exception.VolumeDriverException:
# NOTE(jdg): Thaw actually matters, if this call
# to the backend fails, we're stuck and can't re-enable
LOG.error('Error encountered on Cinder backend during '
'thaw operation, service will remain frozen.')
return False
service = self._get_service()
service.disabled = False
service.disabled_reason = ""
service.save()
LOG.info("Thawed backend successfully.")
return True
def manage_existing_snapshot(self, ctxt, snapshot, ref=None):
LOG.debug('manage_existing_snapshot: managing %s.', ref)
try:
flow_engine = manage_existing_snapshot.get_flow(
ctxt,
self.db,
self.driver,
self.host,
snapshot.id,
ref)
except Exception:
LOG.exception("Failed to create manage_existing flow: "
"%(object_type)s %(object_id)s.",
{'object_type': 'snapshot',
'object_id': snapshot.id})
raise exception.CinderException(
_("Failed to create manage existing flow."))
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
return snapshot.id
def get_manageable_snapshots(self, ctxt, marker, limit, offset,
sort_keys, sort_dirs, want_objects=False):
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception("Listing manageable snapshots failed, due "
"to uninitialized driver.")
cinder_snapshots = self._get_my_snapshots(ctxt)
try:
driver_entries = self.driver.get_manageable_snapshots(
cinder_snapshots, marker, limit, offset, sort_keys, sort_dirs)
if want_objects:
driver_entries = (objects.ManageableSnapshotList.
from_primitives(ctxt, driver_entries))
except AttributeError:
LOG.debug('Driver does not support listing manageable snapshots.')
return []
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception("Listing manageable snapshots failed, due "
"to driver error.")
return driver_entries
def get_capabilities(self, context, discover):
"""Get capabilities of backend storage."""
if discover:
self.driver.init_capabilities()
capabilities = self.driver.capabilities
LOG.debug("Obtained capabilities list: %s.", capabilities)
return capabilities
@utils.trace
def get_backup_device(self, ctxt, backup, want_objects=False,
async_call=False):
try:
(backup_device, is_snapshot) = (
self.driver.get_backup_device(ctxt, backup))
except Exception as ex:
if async_call:
LOG.exception("Failed to get backup device. "
"Calling backup continue_backup to cleanup")
rpcapi = backup_rpcapi.BackupAPI()
rpcapi.continue_backup(ctxt, backup, backup_device=None)
return
else:
while excutils.save_and_reraise_exception():
LOG.exception("Failed to get backup device.")
secure_enabled = self.driver.secure_file_operations_enabled()
backup_device_dict = {'backup_device': backup_device,
'secure_enabled': secure_enabled,
'is_snapshot': is_snapshot, }
# TODO(sborkows): from_primitive method will be removed in O, so there
# is a need to clean here then.
backup_device = (
objects.BackupDeviceInfo.from_primitive(backup_device_dict, ctxt)
if want_objects else backup_device_dict)
if async_call:
# we have to use an rpc call back to the backup manager to
# continue the backup
LOG.info("Calling backup continue_backup for: {}".format(backup))
rpcapi = backup_rpcapi.BackupAPI()
rpcapi.continue_backup(ctxt, backup, backup_device)
else:
# The rpc api version doesn't support the async callback
# so we fallback to returning the value itself.
return backup_device
def secure_file_operations_enabled(self, ctxt, volume):
secure_enabled = self.driver.secure_file_operations_enabled()
return secure_enabled
def _connection_create(self, ctxt, volume, attachment, connector):
try:
self.driver.validate_connector(connector)
except exception.InvalidConnectorException as err:
raise exception.InvalidInput(reason=six.text_type(err))
except Exception as err:
err_msg = (_("Validate volume connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.error(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
model_update = self.driver.create_export(ctxt.elevated(),
volume, connector)
except exception.CinderException as ex:
err_msg = (_("Create export for volume failed (%s).") % ex.msg)
LOG.exception(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
if model_update:
volume.update(model_update)
volume.save()
except exception.CinderException as ex:
LOG.exception("Model update failed.", resource=volume)
raise exception.ExportFailure(reason=six.text_type(ex))
try:
conn_info = self.driver.initialize_connection(volume, connector)
except exception.ConnectorRejected:
with excutils.save_and_reraise_exception():
LOG.info("The connector was rejected by the volume driver.")
except Exception as err:
err_msg = (_("Driver initialize connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.exception(err_msg, resource=volume)
self.driver.remove_export(ctxt.elevated(), volume)
raise exception.VolumeBackendAPIException(data=err_msg)
conn_info = self._parse_connection_options(ctxt, volume, conn_info)
# NOTE(jdg): Get rid of the nested dict (data key)
conn_data = conn_info.pop('data', {})
connection_info = conn_data.copy()
connection_info.update(conn_info)
values = {'volume_id': volume.id,
'attach_status': 'attaching',
'connector': jsonutils.dumps(connector)}
# TODO(mriedem): Use VolumeAttachment.save() here.
self.db.volume_attachment_update(ctxt, attachment.id, values)
connection_info['attachment_id'] = attachment.id
return connection_info
def attachment_update(self,
context,
vref,
connector,
attachment_id):
"""Update/Finalize an attachment.
This call updates a valid attachment record to associate with a volume
and provide the caller with the proper connection info. Note that
this call requires an `attachment_ref`. It's expected that prior to
this call that the volume and an attachment UUID has been reserved.
param: vref: Volume object to create attachment for
param: connector: Connector object to use for attachment creation
param: attachment_ref: ID of the attachment record to update
"""
mode = connector.get('mode', 'rw')
self._notify_about_volume_usage(context, vref, 'attach.start')
attachment_ref = objects.VolumeAttachment.get_by_id(context,
attachment_id)
# Check to see if a mode parameter was set during attachment-create;
# this seems kinda wonky, but it's how we're keeping back compatability
# with the use of connector.mode for now. In other words, we're
# making sure we still honor ro settings from the connector but
# we override that if a value was specified in attachment-create
if attachment_ref.attach_mode != 'null':
mode = attachment_ref.attach_mode
connector['mode'] = mode
connection_info = self._connection_create(context,
vref,
attachment_ref,
connector)
try:
utils.require_driver_initialized(self.driver)
self.driver.attach_volume(context,
vref,
attachment_ref.instance_uuid,
connector.get('host', ''),
connector.get('mountpoint', 'na'))
except Exception as err:
self.message_api.create(
context, message_field.Action.UPDATE_ATTACHMENT,
resource_uuid=vref.id,
exception=err)
with excutils.save_and_reraise_exception():
self.db.volume_attachment_update(
context, attachment_ref.id,
{'attach_status':
fields.VolumeAttachStatus.ERROR_ATTACHING})
self.db.volume_attached(context.elevated(),
attachment_ref.id,
attachment_ref.instance_uuid,
connector.get('host', ''),
connector.get('mountpoint', 'na'),
mode,
False)
vref.refresh()
attachment_ref.refresh()
LOG.info("attachment_update completed successfully.",
resource=vref)
return connection_info
def _connection_terminate(self, context, volume,
attachment, force=False):
"""Remove a volume connection, but leave attachment.
Exits early if the attachment does not have a connector and returns
None to indicate shared connections are irrelevant.
"""
utils.require_driver_initialized(self.driver)
connector = attachment.connector
if not connector and not force:
# It's possible to attach a volume to a shelved offloaded server
# in nova, and a shelved offloaded server is not on a compute host,
# which means the attachment was made without a host connector,
# so if we don't have a connector we can't terminate a connection
# that was never actually made to the storage backend, so just
# log a message and exit.
LOG.debug('No connector for attachment %s; skipping storage '
'backend terminate_connection call.', attachment.id)
# None indicates we don't know and don't care.
return None
try:
shared_connections = self.driver.terminate_connection(volume,
connector,
force=force)
if not isinstance(shared_connections, bool):
shared_connections = False
except Exception as err:
err_msg = (_('Terminate volume connection failed: %(err)s')
% {'err': six.text_type(err)})
LOG.exception(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
LOG.info("Terminate volume connection completed successfully.",
resource=volume)
# NOTE(jdg): Return True/False if there are other outstanding
# attachments that share this connection. If True should signify
# caller to preserve the actual host connection (work should be
# done in the brick connector as it has the knowledge of what's
# going on here.
return shared_connections
def attachment_delete(self, context, attachment_id, vref):
"""Delete/Detach the specified attachment.
Notifies the backend device that we're detaching the specified
attachment instance.
param: vref: Volume object associated with the attachment
param: attachment: Attachment reference object to remove
NOTE if the attachment reference is None, we remove all existing
attachments for the specified volume object.
"""
attachment_ref = objects.VolumeAttachment.get_by_id(context,
attachment_id)
if not attachment_ref:
for attachment in VA_LIST.get_all_by_volume_id(context, vref.id):
self._do_attachment_delete(context, vref, attachment)
else:
self._do_attachment_delete(context, vref, attachment_ref)
def _do_attachment_delete(self, context, vref, attachment):
utils.require_driver_initialized(self.driver)
self._notify_about_volume_usage(context, vref, "detach.start")
has_shared_connection = self._connection_terminate(context,
vref,
attachment)
try:
LOG.debug('Deleting attachment %(attachment_id)s.',
{'attachment_id': attachment.id},
resource=vref)
self.driver.detach_volume(context, vref, attachment)
if has_shared_connection is not None and not has_shared_connection:
self.driver.remove_export(context.elevated(), vref)
except Exception:
# FIXME(jdg): Obviously our volume object is going to need some
# changes to deal with multi-attach and figuring out how to
# represent a single failed attach out of multiple attachments
# TODO(jdg): object method here
self.db.volume_attachment_update(
context, attachment.get('id'),
{'attach_status': fields.VolumeAttachStatus.ERROR_DETACHING})
else:
self.db.volume_detached(context.elevated(), vref.id,
attachment.get('id'))
self.db.volume_admin_metadata_delete(context.elevated(),
vref.id,
'attached_mode')
self._notify_about_volume_usage(context, vref, "detach.end")
# Replication group API (Tiramisu)
def enable_replication(self, ctxt, group):
"""Enable replication."""
group.refresh()
if group.replication_status != fields.ReplicationStatus.ENABLING:
msg = _("Replication status in group %s is not "
"enabling. Cannot enable replication.") % group.id
LOG.error(msg)
raise exception.InvalidGroup(reason=msg)
volumes = group.volumes
for vol in volumes:
vol.refresh()
if vol.replication_status != fields.ReplicationStatus.ENABLING:
msg = _("Replication status in volume %s is not "
"enabling. Cannot enable replication.") % vol.id
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
self._notify_about_group_usage(
ctxt, group, "enable_replication.start")
volumes_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
model_update, volumes_model_update = (
self.driver.enable_replication(ctxt, group, volumes))
if volumes_model_update:
for update in volumes_model_update:
vol_obj = objects.Volume.get_by_id(ctxt, update['id'])
vol_obj.update(update)
vol_obj.save()
# If we failed to enable a volume, make sure the status
# for the group is set to error as well
if (update.get('replication_status') ==
fields.ReplicationStatus.ERROR and
model_update.get('replication_status') !=
fields.ReplicationStatus.ERROR):
model_update['replication_status'] = update.get(
'replication_status')
if model_update:
if (model_update.get('replication_status') ==
fields.ReplicationStatus.ERROR):
msg = _('Enable replication failed.')
LOG.error(msg,
resource={'type': 'group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except exception.CinderException as ex:
group.status = fields.GroupStatus.ERROR
group.replication_status = fields.ReplicationStatus.ERROR
group.save()
# Update volume status to 'error' if driver returns
# None for volumes_model_update.
if not volumes_model_update:
for vol in volumes:
vol.status = 'error'
vol.replication_status = fields.ReplicationStatus.ERROR
vol.save()
err_msg = _("Enable replication group failed: "
"%s.") % six.text_type(ex)
raise exception.ReplicationGroupError(reason=err_msg,
group_id=group.id)
for vol in volumes:
vol.replication_status = fields.ReplicationStatus.ENABLED
vol.save()
group.replication_status = fields.ReplicationStatus.ENABLED
group.save()
self._notify_about_group_usage(
ctxt, group, "enable_replication.end", volumes)
LOG.info("Enable replication completed successfully.",
resource={'type': 'group',
'id': group.id})
# Replication group API (Tiramisu)
def disable_replication(self, ctxt, group):
"""Disable replication."""
group.refresh()
if group.replication_status != fields.ReplicationStatus.DISABLING:
msg = _("Replication status in group %s is not "
"disabling. Cannot disable replication.") % group.id
LOG.error(msg)
raise exception.InvalidGroup(reason=msg)
volumes = group.volumes
for vol in volumes:
vol.refresh()
if (vol.replication_status !=
fields.ReplicationStatus.DISABLING):
msg = _("Replication status in volume %s is not "
"disabling. Cannot disable replication.") % vol.id
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
self._notify_about_group_usage(
ctxt, group, "disable_replication.start")
volumes_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
model_update, volumes_model_update = (
self.driver.disable_replication(ctxt, group, volumes))
if volumes_model_update:
for update in volumes_model_update:
vol_obj = objects.Volume.get_by_id(ctxt, update['id'])
vol_obj.update(update)
vol_obj.save()
# If we failed to enable a volume, make sure the status
# for the group is set to error as well
if (update.get('replication_status') ==
fields.ReplicationStatus.ERROR and
model_update.get('replication_status') !=
fields.ReplicationStatus.ERROR):
model_update['replication_status'] = update.get(
'replication_status')
if model_update:
if (model_update.get('replication_status') ==
fields.ReplicationStatus.ERROR):
msg = _('Disable replication failed.')
LOG.error(msg,
resource={'type': 'group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except exception.CinderException as ex:
group.status = fields.GroupStatus.ERROR
group.replication_status = fields.ReplicationStatus.ERROR
group.save()
# Update volume status to 'error' if driver returns
# None for volumes_model_update.
if not volumes_model_update:
for vol in volumes:
vol.status = 'error'
vol.replication_status = fields.ReplicationStatus.ERROR
vol.save()
err_msg = _("Disable replication group failed: "
"%s.") % six.text_type(ex)
raise exception.ReplicationGroupError(reason=err_msg,
group_id=group.id)
for vol in volumes:
vol.replication_status = fields.ReplicationStatus.DISABLED
vol.save()
group.replication_status = fields.ReplicationStatus.DISABLED
group.save()
self._notify_about_group_usage(
ctxt, group, "disable_replication.end", volumes)
LOG.info("Disable replication completed successfully.",
resource={'type': 'group',
'id': group.id})
# Replication group API (Tiramisu)
def failover_replication(self, ctxt, group, allow_attached_volume=False,
secondary_backend_id=None):
"""Failover replication."""
group.refresh()
if group.replication_status != fields.ReplicationStatus.FAILING_OVER:
msg = _("Replication status in group %s is not "
"failing-over. Cannot failover replication.") % group.id
LOG.error(msg)
raise exception.InvalidGroup(reason=msg)
volumes = group.volumes
for vol in volumes:
vol.refresh()
if vol.status == 'in-use' and not allow_attached_volume:
msg = _("Volume %s is attached but allow_attached_volume flag "
"is False. Cannot failover replication.") % vol.id
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
if (vol.replication_status !=
fields.ReplicationStatus.FAILING_OVER):
msg = _("Replication status in volume %s is not "
"failing-over. Cannot failover replication.") % vol.id
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
self._notify_about_group_usage(
ctxt, group, "failover_replication.start")
volumes_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
model_update, volumes_model_update = (
self.driver.failover_replication(
ctxt, group, volumes, secondary_backend_id))
if volumes_model_update:
for update in volumes_model_update:
vol_obj = objects.Volume.get_by_id(ctxt, update['id'])
vol_obj.update(update)
vol_obj.save()
# If we failed to enable a volume, make sure the status
# for the group is set to error as well
if (update.get('replication_status') ==
fields.ReplicationStatus.ERROR and
model_update.get('replication_status') !=
fields.ReplicationStatus.ERROR):
model_update['replication_status'] = update.get(
'replication_status')
if model_update:
if (model_update.get('replication_status') ==
fields.ReplicationStatus.ERROR):
msg = _('Failover replication failed.')
LOG.error(msg,
resource={'type': 'group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except exception.CinderException as ex:
group.status = fields.GroupStatus.ERROR
group.replication_status = fields.ReplicationStatus.ERROR
group.save()
# Update volume status to 'error' if driver returns
# None for volumes_model_update.
if not volumes_model_update:
for vol in volumes:
vol.status = 'error'
vol.replication_status = fields.ReplicationStatus.ERROR
vol.save()
err_msg = _("Failover replication group failed: "
"%s.") % six.text_type(ex)
raise exception.ReplicationGroupError(reason=err_msg,
group_id=group.id)
for vol in volumes:
if secondary_backend_id == "default":
vol.replication_status = fields.ReplicationStatus.ENABLED
else:
vol.replication_status = (
fields.ReplicationStatus.FAILED_OVER)
vol.save()
if secondary_backend_id == "default":
group.replication_status = fields.ReplicationStatus.ENABLED
else:
group.replication_status = fields.ReplicationStatus.FAILED_OVER
group.save()
self._notify_about_group_usage(
ctxt, group, "failover_replication.end", volumes)
LOG.info("Failover replication completed successfully.",
resource={'type': 'group',
'id': group.id})
def list_replication_targets(self, ctxt, group):
"""Provide a means to obtain replication targets for a group.
This method is used to find the replication_device config
info. 'backend_id' is a required key in 'replication_device'.
Response Example for admin:
.. code:: json
{
"replication_targets": [
{
"backend_id": "vendor-id-1",
"unique_key": "val1"
},
{
"backend_id": "vendor-id-2",
"unique_key": "val2"
}
]
}
Response example for non-admin:
.. code:: json
{
"replication_targets": [
{
"backend_id": "vendor-id-1"
},
{
"backend_id": "vendor-id-2"
}
]
}
"""
replication_targets = []
try:
group.refresh()
if self.configuration.replication_device:
if ctxt.is_admin:
for rep_dev in self.configuration.replication_device:
keys = rep_dev.keys()
dev = {}
for k in keys:
dev[k] = rep_dev[k]
replication_targets.append(dev)
else:
for rep_dev in self.configuration.replication_device:
dev = rep_dev.get('backend_id')
if dev:
replication_targets.append({'backend_id': dev})
except exception.GroupNotFound:
err_msg = (_("Get replication targets failed. Group %s not "
"found.") % group.id)
LOG.exception(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
return {'replication_targets': replication_targets}
| 45.209275 | 79 | 0.554365 |
import requests
import time
from castellan import key_manager
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_service import periodic_task
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import units
from oslo_utils import uuidutils
profiler = importutils.try_import('osprofiler.profiler')
import six
from taskflow import exceptions as tfe
from cinder.backup import rpcapi as backup_rpcapi
from cinder.common import constants
from cinder import compute
from cinder import context
from cinder import coordination
from cinder import db
from cinder import exception
from cinder import flow_utils
from cinder.i18n import _
from cinder.image import cache as image_cache
from cinder.image import glance
from cinder.image import image_utils
from cinder.keymgr import migration as key_migration
from cinder import manager
from cinder.message import api as message_api
from cinder.message import message_field
from cinder import objects
from cinder.objects import cgsnapshot
from cinder.objects import consistencygroup
from cinder.objects import fields
from cinder import quota
from cinder import utils
from cinder import volume as cinder_volume
from cinder.volume import configuration as config
from cinder.volume.flows.manager import create_volume
from cinder.volume.flows.manager import manage_existing
from cinder.volume.flows.manager import manage_existing_snapshot
from cinder.volume import group_types
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import volume_migration
from cinder.volume import volume_types
from cinder.volume import volume_utils
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
GROUP_QUOTAS = quota.GROUP_QUOTAS
VALID_REMOVE_VOL_FROM_GROUP_STATUS = (
'available',
'in-use',
'error',
'error_deleting')
VALID_ADD_VOL_TO_GROUP_STATUS = (
'available',
'in-use')
VALID_CREATE_GROUP_SRC_SNAP_STATUS = (fields.SnapshotStatus.AVAILABLE,)
VALID_CREATE_GROUP_SRC_GROUP_STATUS = ('available',)
VA_LIST = objects.VolumeAttachmentList
volume_manager_opts = [
cfg.IntOpt('migration_create_volume_timeout_secs',
default=300,
help='Timeout for creating the volume to migrate to '
'when performing volume migration (seconds)'),
cfg.BoolOpt('volume_service_inithost_offload',
default=False,
help='Offload pending volume delete during '
'volume service startup'),
cfg.StrOpt('zoning_mode',
help="FC Zoning mode configured, only 'fabric' is "
"supported now."),
cfg.IntOpt('reinit_driver_count',
default=3,
help='Maximum times to reintialize the driver '
'if volume initialization fails. The interval of retry is '
'exponentially backoff, and will be 1s, 2s, 4s etc.'),
cfg.IntOpt('init_host_max_objects_retrieval',
default=0,
help='Max number of volumes and snapshots to be retrieved '
'per batch during volume manager host initialization. '
'Query results will be obtained in batches from the '
'database and not in one shot to avoid extreme memory '
'usage. Set 0 to turn off this functionality.'),
cfg.IntOpt('backend_stats_polling_interval',
default=60,
min=3,
help='Time in seconds between requests for usage statistics '
'from the backend. Be aware that generating usage '
'statistics is expensive for some backends, so setting '
'this value too low may adversely affect performance.'),
]
volume_backend_opts = [
cfg.StrOpt('volume_driver',
default='cinder.volume.drivers.lvm.LVMVolumeDriver',
help='Driver to use for volume creation'),
cfg.StrOpt('extra_capabilities',
default='{}',
help='User defined capabilities, a JSON formatted string '
'specifying key/value pairs. The key/value pairs can '
'be used by the CapabilitiesFilter to select between '
'backends when requests specify volume types. For '
'example, specifying a service level or the geographical '
'location of a backend, then creating a volume type to '
'allow the user to select by these different '
'properties.'),
cfg.BoolOpt('suppress_requests_ssl_warnings',
default=False,
help='Suppress requests library SSL certificate warnings.'),
cfg.IntOpt('backend_native_threads_pool_size',
default=20,
min=20,
help='Size of the native threads pool for the backend. '
'Increase for backends that heavily rely on this, like '
'the RBD driver.'),
]
CONF = cfg.CONF
CONF.register_opts(volume_manager_opts)
CONF.register_opts(volume_backend_opts, group=config.SHARED_CONF_GROUP)
MAPPING = {
'cinder.volume.drivers.dell_emc.vmax.iscsi.VMAXISCSIDriver':
'cinder.volume.drivers.dell_emc.powermax.iscsi.PowerMaxISCSIDriver',
'cinder.volume.drivers.dell_emc.vmax.fc.VMAXFCDriver':
'cinder.volume.drivers.dell_emc.powermax.fc.PowerMaxFCDriver',
'cinder.volume.drivers.fujitsu.eternus_dx_fc.FJDXFCDriver':
'cinder.volume.drivers.fujitsu.eternus_dx.eternus_dx_fc.FJDXFCDriver',
'cinder.volume.drivers.fujitsu.eternus_dx_iscsi.FJDXISCSIDriver':
'cinder.volume.drivers.fujitsu.eternus_dx.eternus_dx_iscsi.'
'FJDXISCSIDriver',
'cinder.volume.drivers.dell_emc.scaleio.driver.ScaleIODriver':
'cinder.volume.drivers.dell_emc.vxflexos.driver.VxFlexOSDriver',
}
class VolumeManager(manager.CleanableManager,
manager.SchedulerDependentManager):
RPC_API_VERSION = volume_rpcapi.VolumeAPI.RPC_API_VERSION
FAILBACK_SENTINEL = 'default'
target = messaging.Target(version=RPC_API_VERSION)
# and volume_attachment, because the db sets that according to [field]_id,
# which we do copy. We also skip some other values that are set during
# creation of Volume object.
_VOLUME_CLONE_SKIP_PROPERTIES = {
'id', '_name_id', 'name_id', 'name', 'status',
'attach_status', 'migration_status', 'volume_type',
'consistencygroup', 'volume_attachment', 'group'}
def _get_service(self, host=None, binary=constants.VOLUME_BINARY):
host = host or self.host
ctxt = context.get_admin_context()
svc_host = volume_utils.extract_host(host, 'backend')
return objects.Service.get_by_args(ctxt, svc_host, binary)
def __init__(self, volume_driver=None, service_name=None,
*args, **kwargs):
# update_service_capabilities needs service_name to be volume
super(VolumeManager, self).__init__(service_name='volume',
*args, **kwargs)
# NOTE(dulek): service_name=None means we're running in unit tests.
service_name = service_name or 'backend_defaults'
self.configuration = config.Configuration(volume_backend_opts,
config_group=service_name)
self._set_tpool_size(
self.configuration.backend_native_threads_pool_size)
self.stats = {}
self.service_uuid = None
if not volume_driver:
volume_driver = self.configuration.volume_driver
if volume_driver in MAPPING:
LOG.warning("Driver path %s is deprecated, update your "
"configuration to the new path.", volume_driver)
volume_driver = MAPPING[volume_driver]
vol_db_empty = self._set_voldb_empty_at_startup_indicator(
context.get_admin_context())
LOG.debug("Cinder Volume DB check: vol_db_empty=%s", vol_db_empty)
curr_active_backend_id = None
try:
service = self._get_service()
except exception.ServiceNotFound:
LOG.info("Service not found for updating "
"active_backend_id, assuming default "
"for driver init.")
else:
curr_active_backend_id = service.active_backend_id
self.service_uuid = service.uuid
if self.configuration.suppress_requests_ssl_warnings:
LOG.warning("Suppressing requests library SSL Warnings")
requests.packages.urllib3.disable_warnings(
requests.packages.urllib3.exceptions.InsecureRequestWarning)
requests.packages.urllib3.disable_warnings(
requests.packages.urllib3.exceptions.InsecurePlatformWarning)
self.key_manager = key_manager.API(CONF)
driver_additional_endpoints = []
self.driver = importutils.import_object(
volume_driver,
configuration=self.configuration,
db=self.db,
host=self.host,
cluster_name=self.cluster,
is_vol_db_empty=vol_db_empty,
active_backend_id=curr_active_backend_id,
additional_endpoints=driver_additional_endpoints)
self.additional_endpoints.extend(driver_additional_endpoints)
if self.cluster and not self.driver.SUPPORTS_ACTIVE_ACTIVE:
msg = _('Active-Active configuration is not currently supported '
'by driver %s.') % volume_driver
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
self.message_api = message_api.API()
if CONF.profiler.enabled and profiler is not None:
self.driver = profiler.trace_cls("driver")(self.driver)
try:
self.extra_capabilities = jsonutils.loads(
self.driver.configuration.extra_capabilities)
except AttributeError:
self.extra_capabilities = {}
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Invalid JSON: %s",
self.driver.configuration.extra_capabilities)
backend_zone = self.driver.configuration.safe_get(
'backend_availability_zone')
if backend_zone:
self.availability_zone = backend_zone
if self.driver.configuration.safe_get(
'image_volume_cache_enabled'):
max_cache_size = self.driver.configuration.safe_get(
'image_volume_cache_max_size_gb')
max_cache_entries = self.driver.configuration.safe_get(
'image_volume_cache_max_count')
self.image_volume_cache = image_cache.ImageVolumeCache(
self.db,
cinder_volume.API(),
max_cache_size,
max_cache_entries
)
LOG.info('Image-volume cache enabled for host %(host)s.',
{'host': self.host})
else:
LOG.info('Image-volume cache disabled for host %(host)s.',
{'host': self.host})
self.image_volume_cache = None
def _count_allocated_capacity(self, ctxt, volume):
pool = volume_utils.extract_host(volume['host'], 'pool')
if pool is None:
try:
pool = self.driver.get_pool(volume)
except Exception:
LOG.exception('Fetch volume pool name failed.',
resource=volume)
return
if pool:
new_host = volume_utils.append_host(volume['host'],
pool)
self.db.volume_update(ctxt, volume['id'],
{'host': new_host})
else:
pool = (self.driver.configuration.safe_get(
'volume_backend_name') or volume_utils.extract_host(
volume['host'], 'pool', True))
try:
pool_stat = self.stats['pools'][pool]
except KeyError:
# First volume in the pool
self.stats['pools'][pool] = dict(
allocated_capacity_gb=0)
pool_stat = self.stats['pools'][pool]
pool_sum = pool_stat['allocated_capacity_gb']
pool_sum += volume['size']
self.stats['pools'][pool]['allocated_capacity_gb'] = pool_sum
self.stats['allocated_capacity_gb'] += volume['size']
def _set_voldb_empty_at_startup_indicator(self, ctxt):
vol_entries = self.db.volume_get_all(ctxt, None, 1, filters=None)
if len(vol_entries) == 0:
LOG.info("Determined volume DB was empty at startup.")
return True
else:
LOG.info("Determined volume DB was not empty at startup.")
return False
def _sync_provider_info(self, ctxt, volumes, snapshots):
# NOTE(jdg): For now this just updates provider_id, we can add more
# items to the update if they're relevant but we need to be safe in
updates, snapshot_updates = self.driver.update_provider_info(
volumes, snapshots)
if updates:
for volume in volumes:
update = (
[updt for updt in updates if updt['id'] ==
volume['id']])
if update:
update = update[0]
self.db.volume_update(
ctxt,
update['id'],
{'provider_id': update['provider_id']})
if snapshot_updates:
for snap in snapshots:
if not snap.get('provider_id', None):
update = (
[updt for updt in snapshot_updates if updt['id'] ==
snap['id']][0])
if update:
self.db.snapshot_update(
ctxt,
update['id'],
{'provider_id': update['provider_id']})
def _include_resources_in_cluster(self, ctxt):
LOG.info('Including all resources from host %(host)s in cluster '
'%(cluster)s.',
{'host': self.host, 'cluster': self.cluster})
num_vols = objects.VolumeList.include_in_cluster(
ctxt, self.cluster, host=self.host)
num_cgs = objects.ConsistencyGroupList.include_in_cluster(
ctxt, self.cluster, host=self.host)
num_gs = objects.GroupList.include_in_cluster(
ctxt, self.cluster, host=self.host)
num_cache = db.image_volume_cache_include_in_cluster(
ctxt, self.cluster, host=self.host)
LOG.info('%(num_vols)s volumes, %(num_cgs)s consistency groups, '
'%(num_gs)s generic groups and %(num_cache)s image '
'volume caches from host %(host)s have been included in '
'cluster %(cluster)s.',
{'num_vols': num_vols, 'num_cgs': num_cgs, 'num_gs': num_gs,
'host': self.host, 'cluster': self.cluster,
'num_cache': num_cache})
def init_host(self, added_to_cluster=None, **kwargs):
if not self.driver.supported:
utils.log_unsupported_driver_warning(self.driver)
if not self.configuration.enable_unsupported_driver:
LOG.error("Unsupported drivers are disabled."
" You can re-enable by adding "
"enable_unsupported_driver=True to the "
"driver section in cinder.conf",
resource={'type': 'driver',
'id': self.__class__.__name__})
return
self._init_host(added_to_cluster, **kwargs)
if not self.driver.initialized:
reinit_count = 0
while reinit_count < CONF.reinit_driver_count:
time.sleep(2 ** reinit_count)
self._init_host(added_to_cluster, **kwargs)
if self.driver.initialized:
return
reinit_count += 1
def _init_host(self, added_to_cluster=None, **kwargs):
ctxt = context.get_admin_context()
if added_to_cluster:
self._include_resources_in_cluster(ctxt)
LOG.info("Starting volume driver %(driver_name)s (%(version)s)",
{'driver_name': self.driver.__class__.__name__,
'version': self.driver.get_version()})
try:
self.driver.do_setup(ctxt)
self.driver.check_for_setup_error()
except Exception:
LOG.exception("Failed to initialize driver.",
resource={'type': 'driver',
'id': self.__class__.__name__})
# to initialize the driver correctly.
return
# Initialize backend capabilities list
self.driver.init_capabilities()
# Zero stats
self.stats['pools'] = {}
self.stats.update({'allocated_capacity_gb': 0})
# Batch retrieval volumes and snapshots
num_vols, num_snaps, max_objs_num, req_range = None, None, None, [0]
req_limit = CONF.init_host_max_objects_retrieval
use_batch_objects_retrieval = req_limit > 0
if use_batch_objects_retrieval:
# Get total number of volumes
num_vols, __, __ = self._get_my_volumes_summary(ctxt)
# Get total number of snapshots
num_snaps, __ = self._get_my_snapshots_summary(ctxt)
# Calculate highest number of the objects (volumes or snapshots)
max_objs_num = max(num_vols, num_snaps)
# Make batch request loop counter
req_range = range(0, max_objs_num, req_limit)
volumes_to_migrate = volume_migration.VolumeMigrationList()
for req_offset in req_range:
# Retrieve 'req_limit' number of objects starting from
# 'req_offset' position
volumes, snapshots = None, None
if use_batch_objects_retrieval:
if req_offset < num_vols:
volumes = self._get_my_volumes(ctxt,
limit=req_limit,
offset=req_offset)
else:
volumes = objects.VolumeList()
if req_offset < num_snaps:
snapshots = self._get_my_snapshots(ctxt,
limit=req_limit,
offset=req_offset)
else:
snapshots = objects.SnapshotList()
# or retrieve all volumes and snapshots per single request
else:
volumes = self._get_my_volumes(ctxt)
snapshots = self._get_my_snapshots(ctxt)
self._sync_provider_info(ctxt, volumes, snapshots)
# FIXME volume count for exporting is wrong
try:
for volume in volumes:
# available volume should also be counted into allocated
if volume['status'] in ['in-use', 'available']:
# calculate allocated capacity for driver
self._count_allocated_capacity(ctxt, volume)
try:
if volume['status'] in ['in-use']:
self.driver.ensure_export(ctxt, volume)
except Exception:
LOG.exception("Failed to re-export volume, "
"setting to ERROR.",
resource=volume)
volume.conditional_update({'status': 'error'},
{'status': 'in-use'})
# All other cleanups are processed by parent class -
# CleanableManager
except Exception:
LOG.exception("Error during re-export on driver init.",
resource=volume)
return
if len(volumes):
volumes_to_migrate.append(volumes, ctxt)
del volumes
del snapshots
self.driver.set_throttle()
# at this point the driver is considered initialized.
# NOTE(jdg): Careful though because that doesn't mean
self.driver.set_initialized()
backend_name = volume_utils.extract_host(self.service_topic_queue)
image_utils.cleanup_temporary_file(backend_name)
self._add_to_threadpool(key_migration.migrate_fixed_key,
volumes=volumes_to_migrate)
self.publish_service_capabilities(ctxt)
LOG.info("Driver initialization completed successfully.",
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
super(VolumeManager, self).init_host(added_to_cluster=added_to_cluster,
**kwargs)
def init_host_with_rpc(self):
LOG.info("Initializing RPC dependent components of volume "
"driver %(driver_name)s (%(version)s)",
{'driver_name': self.driver.__class__.__name__,
'version': self.driver.get_version()})
try:
utils.log_unsupported_driver_warning(self.driver)
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
LOG.error("Cannot complete RPC initialization because "
"driver isn't initialized properly.",
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
return
stats = self.driver.get_volume_stats(refresh=True)
try:
service = self._get_service()
except exception.ServiceNotFound:
with excutils.save_and_reraise_exception():
LOG.error("Service not found for updating replication_status.")
if service.replication_status != fields.ReplicationStatus.FAILED_OVER:
if stats and stats.get('replication_enabled', False):
replication_status = fields.ReplicationStatus.ENABLED
else:
replication_status = fields.ReplicationStatus.DISABLED
if replication_status != service.replication_status:
service.replication_status = replication_status
service.save()
# Update the cluster replication status if necessary
cluster = service.cluster
if (cluster and
cluster.replication_status != service.replication_status):
cluster.replication_status = service.replication_status
cluster.save()
LOG.info("Driver post RPC initialization completed successfully.",
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
def _do_cleanup(self, ctxt, vo_resource):
if isinstance(vo_resource, objects.Volume):
if vo_resource.status == 'downloading':
self.driver.clear_download(ctxt, vo_resource)
elif vo_resource.status == 'uploading':
# Set volume status to available or in-use.
self.db.volume_update_status_based_on_attachment(
ctxt, vo_resource.id)
elif vo_resource.status == 'deleting':
if CONF.volume_service_inithost_offload:
# Offload all the pending volume delete operations to the
# threadpool to prevent the main volume service thread
# from being blocked.
self._add_to_threadpool(self.delete_volume, ctxt,
vo_resource, cascade=True)
else:
# By default, delete volumes sequentially
self.delete_volume(ctxt, vo_resource, cascade=True)
# We signal that we take care of cleaning the worker ourselves
# (with set_workers decorator in delete_volume method) so
# do_cleanup method doesn't need to remove it.
return True
if vo_resource.status in ('creating', 'downloading'):
vo_resource.status = 'error'
vo_resource.save()
def is_working(self):
return self.driver.initialized
def _set_resource_host(self, resource):
if (resource.is_clustered and
not volume_utils.hosts_are_equivalent(resource.host,
self.host)):
pool = volume_utils.extract_host(resource.host, 'pool')
resource.host = volume_utils.append_host(self.host, pool)
resource.save()
@objects.Volume.set_workers
def create_volume(self, context, volume, request_spec=None,
filter_properties=None, allow_reschedule=True):
utils.log_unsupported_driver_warning(self.driver)
self._set_resource_host(volume)
self._update_allocated_capacity(volume)
original_host = volume.host
context_elevated = context.elevated()
if filter_properties is None:
filter_properties = {}
if request_spec is None:
request_spec = objects.RequestSpec()
try:
flow_engine = create_volume.get_flow(
context_elevated,
self,
self.db,
self.driver,
self.scheduler_rpcapi,
self.host,
volume,
allow_reschedule,
context,
request_spec,
filter_properties,
image_volume_cache=self.image_volume_cache,
)
except Exception:
msg = _("Create manager volume flow failed.")
LOG.exception(msg, resource={'type': 'volume', 'id': volume.id})
raise exception.CinderException(msg)
snapshot_id = request_spec.get('snapshot_id')
source_volid = request_spec.get('source_volid')
if snapshot_id is not None:
locked_action = "%s-%s" % (snapshot_id, 'delete_snapshot')
elif source_volid is not None:
locked_action = "%s-%s" % (source_volid, 'delete_volume')
else:
locked_action = None
def _run_flow():
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
# NOTE(dulek): Flag to indicate if volume was rescheduled. Used to
# decide if allocated_capacity should be incremented.
rescheduled = False
try:
if locked_action is None:
_run_flow()
else:
with coordination.COORDINATOR.get_lock(locked_action):
_run_flow()
finally:
try:
flow_engine.storage.fetch('refreshed')
except tfe.NotFound:
# If there's no vol_ref, then flow is reverted. Lets check out
try:
rescheduled = flow_engine.storage.get_revert_result(
create_volume.OnFailureRescheduleTask.make_name(
[create_volume.ACTION]))
except tfe.NotFound:
pass
if rescheduled:
# Volume.host is None now, so we pass the original host value.
self._update_allocated_capacity(volume, decrement=True,
host=original_host)
# Shared targets is only relevant for iSCSI connections.
# We default to True to be on the safe side.
volume.shared_targets = (
self.driver.capabilities.get('storage_protocol') == 'iSCSI' and
self.driver.capabilities.get('shared_targets', True))
# TODO(geguileo): service_uuid won't be enough on Active/Active
volume.service_uuid = self.service_uuid
volume.save()
LOG.info("Created volume successfully.", resource=volume)
return volume.id
def _check_is_our_resource(self, resource):
if resource.host:
res_backend = volume_utils.extract_host(
resource.service_topic_queue)
backend = volume_utils.extract_host(self.service_topic_queue)
if res_backend != backend:
msg = (_('Invalid %(resource)s: %(resource)s %(id)s is not '
'local to %(backend)s.') %
{'resource': resource.obj_name, 'id': resource.id,
'backend': backend})
raise exception.Invalid(msg)
@coordination.synchronized('{volume.id}-{f_name}')
@objects.Volume.set_workers
def delete_volume(self, context, volume, unmanage_only=False,
cascade=False):
context = context.elevated()
try:
volume.refresh()
except exception.VolumeNotFound:
LOG.debug("Attempted delete of non-existent volume: %s", volume.id)
return
if context.project_id != volume.project_id:
project_id = volume.project_id
else:
project_id = context.project_id
if volume['attach_status'] == fields.VolumeAttachStatus.ATTACHED:
raise exception.VolumeAttached(volume_id=volume.id)
self._check_is_our_resource(volume)
if unmanage_only and volume.encryption_key_id is not None:
raise exception.Invalid(
reason=_("Unmanaging encrypted volumes is not "
"supported."))
if unmanage_only and cascade:
raise exception.Invalid(
reason=_("Unmanage and cascade delete options "
"are mutually exclusive."))
is_temp_vol = False
with volume.obj_as_admin():
if volume.admin_metadata.get('temporary', 'False') == 'True':
is_temp_vol = True
LOG.info("Trying to delete temp volume: %s", volume.id)
is_migrating = volume.migration_status not in (None, 'error',
'success')
is_migrating_dest = (is_migrating and
volume.migration_status.startswith(
'target:'))
notification = "delete.start"
if unmanage_only:
notification = "unmanage.start"
if not is_temp_vol:
self._notify_about_volume_usage(context, volume, notification)
try:
utils.require_driver_initialized(self.driver)
self.driver.remove_export(context, volume)
if unmanage_only:
self.driver.unmanage(volume)
elif cascade:
LOG.debug('Performing cascade delete.')
snapshots = objects.SnapshotList.get_all_for_volume(context,
volume.id)
for s in snapshots:
if s.status != fields.SnapshotStatus.DELETING:
self._clear_db(context, is_migrating_dest, volume,
'error_deleting')
msg = (_("Snapshot %(id)s was found in state "
"%(state)s rather than 'deleting' during "
"cascade delete.") % {'id': s.id,
'state': s.status})
raise exception.InvalidSnapshot(reason=msg)
self.delete_snapshot(context, s)
LOG.debug('Snapshots deleted, issuing volume delete')
self.driver.delete_volume(volume)
else:
self.driver.delete_volume(volume)
except exception.VolumeIsBusy:
LOG.error("Unable to delete busy volume.",
resource=volume)
self._clear_db(context, is_migrating_dest, volume,
'available')
return
except Exception:
with excutils.save_and_reraise_exception():
new_status = 'error_deleting'
if unmanage_only is True:
new_status = 'error_unmanaging'
self._clear_db(context, is_migrating_dest, volume,
new_status)
skip_quota = is_migrating or is_temp_vol
if not skip_quota:
try:
reservations = None
if volume.status != 'error_managing_deleting':
reserve_opts = {'volumes': -1,
'gigabytes': -volume.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
LOG.exception("Failed to update usages deleting volume.",
resource=volume)
volume.destroy()
if not skip_quota:
notification = "delete.end"
if unmanage_only:
notification = "unmanage.end"
self._notify_about_volume_usage(context, volume, notification)
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
self._update_allocated_capacity(volume, decrement=True)
self.publish_service_capabilities(context)
msg = "Deleted volume successfully."
if unmanage_only:
msg = "Unmanaged volume successfully."
LOG.info(msg, resource=volume)
def _clear_db(self, context, is_migrating_dest, volume_ref, status):
if is_migrating_dest:
volume_ref.destroy()
LOG.error("Unable to delete the destination volume "
"during volume migration, (NOTE: database "
"record needs to be deleted).", resource=volume_ref)
else:
volume_ref.status = status
volume_ref.save()
def _revert_to_snapshot_generic(self, ctxt, volume, snapshot):
temp_vol = None
try:
v_options = {'display_name': '[revert] temporary volume created '
'from snapshot %s' % snapshot.id}
ctxt = context.get_internal_tenant_context() or ctxt
temp_vol = self.driver._create_temp_volume_from_snapshot(
ctxt, volume, snapshot, volume_options=v_options)
self._copy_volume_data(ctxt, temp_vol, volume)
self.driver.delete_volume(temp_vol)
temp_vol.destroy()
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(
"Failed to use snapshot %(snapshot)s to create "
"a temporary volume and copy data to volume "
" %(volume)s.",
{'snapshot': snapshot.id,
'volume': volume.id})
if temp_vol and temp_vol.status == 'available':
self.driver.delete_volume(temp_vol)
temp_vol.destroy()
def _revert_to_snapshot(self, context, volume, snapshot):
try:
self.driver.revert_to_snapshot(context, volume, snapshot)
except (NotImplementedError, AttributeError):
LOG.info("Driver's 'revert_to_snapshot' is not found. "
"Try to use copy-snapshot-to-volume method.")
self._revert_to_snapshot_generic(context, volume, snapshot)
def _create_backup_snapshot(self, context, volume):
kwargs = {
'volume_id': volume.id,
'user_id': context.user_id,
'project_id': context.project_id,
'status': fields.SnapshotStatus.CREATING,
'progress': '0%',
'volume_size': volume.size,
'display_name': '[revert] volume %s backup snapshot' % volume.id,
'display_description': 'This is only used for backup when '
'reverting. If the reverting process '
'failed, you can restore you data by '
'creating new volume with this snapshot.',
'volume_type_id': volume.volume_type_id,
'encryption_key_id': volume.encryption_key_id,
'metadata': {}
}
snapshot = objects.Snapshot(context=context, **kwargs)
snapshot.create()
self.create_snapshot(context, snapshot)
return snapshot
def revert_to_snapshot(self, context, volume, snapshot):
backup_snapshot = None
try:
LOG.info("Start to perform revert to snapshot process.")
self._notify_about_volume_usage(context, volume,
"revert.start")
self._notify_about_snapshot_usage(context, snapshot,
"revert.start")
# Create a snapshot which can be used to restore the volume
# data by hand if revert process failed.
if self.driver.snapshot_revert_use_temp_snapshot():
backup_snapshot = self._create_backup_snapshot(context,
volume)
self._revert_to_snapshot(context, volume, snapshot)
except Exception as error:
with excutils.save_and_reraise_exception():
self._notify_about_volume_usage(context, volume,
"revert.end")
self._notify_about_snapshot_usage(context, snapshot,
"revert.end")
msg = ('Volume %(v_id)s revert to '
'snapshot %(s_id)s failed with %(error)s.')
msg_args = {'v_id': volume.id,
's_id': snapshot.id,
'error': six.text_type(error)}
v_res = volume.update_single_status_where(
'error',
'reverting')
if not v_res:
msg_args = {"id": volume.id,
"status": 'error'}
msg += ("Failed to reset volume %(id)s "
"status to %(status)s.") % msg_args
s_res = snapshot.update_single_status_where(
fields.SnapshotStatus.AVAILABLE,
fields.SnapshotStatus.RESTORING)
if not s_res:
msg_args = {"id": snapshot.id,
"status":
fields.SnapshotStatus.AVAILABLE}
msg += ("Failed to reset snapshot %(id)s "
"status to %(status)s." % msg_args)
LOG.exception(msg, msg_args)
v_res = volume.update_single_status_where(
'available', 'reverting')
if not v_res:
msg_args = {"id": volume.id,
"status": 'available'}
msg = _("Revert finished, but failed to reset "
"volume %(id)s status to %(status)s, "
"please manually reset it.") % msg_args
raise exception.BadResetResourceStatus(reason=msg)
s_res = snapshot.update_single_status_where(
fields.SnapshotStatus.AVAILABLE,
fields.SnapshotStatus.RESTORING)
if not s_res:
msg_args = {"id": snapshot.id,
"status":
fields.SnapshotStatus.AVAILABLE}
msg = _("Revert finished, but failed to reset "
"snapshot %(id)s status to %(status)s, "
"please manually reset it.") % msg_args
raise exception.BadResetResourceStatus(reason=msg)
if backup_snapshot:
self.delete_snapshot(context,
backup_snapshot, handle_quota=False)
msg = ('Volume %(v_id)s reverted to snapshot %(snap_id)s '
'successfully.')
msg_args = {'v_id': volume.id, 'snap_id': snapshot.id}
LOG.info(msg, msg_args)
self._notify_about_volume_usage(context, volume, "revert.end")
self._notify_about_snapshot_usage(context, snapshot, "revert.end")
@objects.Snapshot.set_workers
def create_snapshot(self, context, snapshot):
context = context.elevated()
self._notify_about_snapshot_usage(
context, snapshot, "create.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot.context = context
model_update = self.driver.create_snapshot(snapshot)
if model_update:
snapshot.update(model_update)
snapshot.save()
except Exception as create_error:
with excutils.save_and_reraise_exception():
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
self.message_api.create(
context,
action=message_field.Action.SNAPSHOT_CREATE,
resource_type=message_field.Resource.VOLUME_SNAPSHOT,
resource_uuid=snapshot['id'],
exception=create_error,
detail=message_field.Detail.SNAPSHOT_CREATE_ERROR)
vol_ref = self.db.volume_get(context, snapshot.volume_id)
if vol_ref.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot.id, snapshot.volume_id)
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
except exception.CinderException as ex:
LOG.exception("Failed updating snapshot"
" metadata using the provided volumes"
" %(volume_id)s metadata",
{'volume_id': snapshot.volume_id},
resource=snapshot)
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
self.message_api.create(
context,
action=message_field.Action.SNAPSHOT_CREATE,
resource_type=message_field.Resource.VOLUME_SNAPSHOT,
resource_uuid=snapshot['id'],
exception=ex,
detail=message_field.Detail.SNAPSHOT_UPDATE_METADATA_FAILED
)
raise exception.MetadataCopyFailure(reason=six.text_type(ex))
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.progress = '100%'
# Resync with the volume's DB value. This addresses the case where
# fixed_key encryption key ID was migrated to Barbican.
snapshot.encryption_key_id = vol_ref.encryption_key_id
snapshot.save()
self._notify_about_snapshot_usage(context, snapshot, "create.end")
LOG.info("Create snapshot completed successfully",
resource=snapshot)
return snapshot.id
@coordination.synchronized('{snapshot.id}-{f_name}')
def delete_snapshot(self, context, snapshot,
unmanage_only=False, handle_quota=True):
context = context.elevated()
snapshot._context = context
project_id = snapshot.project_id
self._notify_about_snapshot_usage(
context, snapshot, "delete.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot.context = context
snapshot.save()
if unmanage_only:
self.driver.unmanage_snapshot(snapshot)
else:
self.driver.delete_snapshot(snapshot)
except exception.SnapshotIsBusy as busy_error:
LOG.error("Delete snapshot failed, due to snapshot busy.",
resource=snapshot)
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.save()
self.message_api.create(
context,
action=message_field.Action.SNAPSHOT_DELETE,
resource_type=message_field.Resource.VOLUME_SNAPSHOT,
resource_uuid=snapshot['id'],
exception=busy_error)
return
except Exception as delete_error:
with excutils.save_and_reraise_exception():
snapshot.status = fields.SnapshotStatus.ERROR_DELETING
snapshot.save()
self.message_api.create(
context,
action=message_field.Action.SNAPSHOT_DELETE,
resource_type=message_field.Resource.VOLUME_SNAPSHOT,
resource_uuid=snapshot['id'],
exception=delete_error,
detail=message_field.Detail.SNAPSHOT_DELETE_ERROR)
# Get reservations
reservations = None
try:
if handle_quota:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': -1}
else:
reserve_opts = {
'snapshots': -1,
'gigabytes': -snapshot.volume_size,
}
volume_ref = self.db.volume_get(context, snapshot.volume_id)
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception("Update snapshot usages failed.",
resource=snapshot)
self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot.id)
snapshot.destroy()
self._notify_about_snapshot_usage(context, snapshot, "delete.end")
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
msg = "Delete snapshot completed successfully."
if unmanage_only:
msg = "Unmanage snapshot completed successfully."
LOG.info(msg, resource=snapshot)
@coordination.synchronized('{volume_id}')
def attach_volume(self, context, volume_id, instance_uuid, host_name,
mountpoint, mode, volume=None):
# FIXME(lixiaoy1): Remove this in v4.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look
# up the volume by its volume_id.
volume = objects.Volume.get_by_id(context, volume_id)
# Get admin_metadata. This needs admin context.
with volume.obj_as_admin():
volume_metadata = volume.admin_metadata
# check the volume status before attaching
if volume.status == 'attaching':
if (volume_metadata.get('attached_mode') and
volume_metadata.get('attached_mode') != mode):
raise exception.InvalidVolume(
reason=_("being attached by different mode"))
host_name_sanitized = volume_utils.sanitize_hostname(
host_name) if host_name else None
if instance_uuid:
attachments = (
VA_LIST.get_all_by_instance_uuid(
context, instance_uuid))
else:
attachments = (
VA_LIST.get_all_by_host(
context, host_name_sanitized))
if attachments:
# check if volume<->instance mapping is already tracked in DB
for attachment in attachments:
if attachment['volume_id'] == volume_id:
volume.status = 'in-use'
volume.save()
return attachment
if (volume.status == 'in-use' and not volume.multiattach
and not volume.migration_status):
raise exception.InvalidVolume(
reason=_("volume is already attached and multiple attachments "
"are not enabled"))
self._notify_about_volume_usage(context, volume,
"attach.start")
attachment = volume.begin_attach(mode)
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
attachment.attach_status = (
fields.VolumeAttachStatus.ERROR_ATTACHING)
attachment.save()
raise exception.InvalidUUID(uuid=instance_uuid)
try:
if volume_metadata.get('readonly') == 'True' and mode != 'ro':
raise exception.InvalidVolumeAttachMode(mode=mode,
volume_id=volume.id)
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
LOG.info('Attaching volume %(volume_id)s to instance '
'%(instance)s at mountpoint %(mount)s on host '
'%(host)s.',
{'volume_id': volume_id, 'instance': instance_uuid,
'mount': mountpoint, 'host': host_name_sanitized},
resource=volume)
self.driver.attach_volume(context,
volume,
instance_uuid,
host_name_sanitized,
mountpoint)
except Exception as excep:
with excutils.save_and_reraise_exception():
self.message_api.create(
context,
message_field.Action.ATTACH_VOLUME,
resource_uuid=volume_id,
exception=excep)
attachment.attach_status = (
fields.VolumeAttachStatus.ERROR_ATTACHING)
attachment.save()
volume = attachment.finish_attach(
instance_uuid,
host_name_sanitized,
mountpoint,
mode)
self._notify_about_volume_usage(context, volume, "attach.end")
LOG.info("Attach volume completed successfully.",
resource=volume)
return attachment
@coordination.synchronized('{volume_id}-{f_name}')
def detach_volume(self, context, volume_id, attachment_id=None,
volume=None):
# TODO(vish): refactor this into a more general "unreserve"
# FIXME(lixiaoy1): Remove this in v4.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look up the volume
# by its volume_id.
volume = objects.Volume.get_by_id(context, volume_id)
if attachment_id:
try:
attachment = objects.VolumeAttachment.get_by_id(context,
attachment_id)
except exception.VolumeAttachmentNotFound:
LOG.info("Volume detach called, but volume not attached.",
resource=volume)
# We need to make sure the volume status is set to the correct
# status. It could be in detaching status now, and we don't
volume.finish_detach(attachment_id)
return
else:
attachments = volume.volume_attachment
if len(attachments) > 1:
msg = _("Detach volume failed: More than one attachment, "
"but no attachment_id provided.")
LOG.error(msg, resource=volume)
raise exception.InvalidVolume(reason=msg)
elif len(attachments) == 1:
attachment = attachments[0]
else:
# so set the status to available and move on.
LOG.info("Volume detach called, but volume not attached.",
resource=volume)
volume.status = 'available'
volume.attach_status = fields.VolumeAttachStatus.DETACHED
volume.save()
return
self._notify_about_volume_usage(context, volume, "detach.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
LOG.info('Detaching volume %(volume_id)s from instance '
'%(instance)s.',
{'volume_id': volume_id,
'instance': attachment.get('instance_uuid')},
resource=volume)
self.driver.detach_volume(context, volume, attachment)
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_attachment_update(
context, attachment.get('id'), {
'attach_status':
fields.VolumeAttachStatus.ERROR_DETACHING})
# NOTE(jdg): We used to do an ensure export here to
# catch upgrades while volumes were attached (E->F)
# this was necessary to convert in-use volumes from
# int ID's to UUID's. Don't need this any longer
# (delete the iscsi target)
try:
utils.require_driver_initialized(self.driver)
self.driver.remove_export(context.elevated(), volume)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception("Detach volume failed, due to "
"uninitialized driver.",
resource=volume)
except Exception as ex:
LOG.exception("Detach volume failed, due to "
"remove-export failure.",
resource=volume)
raise exception.RemoveExportException(volume=volume_id,
reason=six.text_type(ex))
volume.finish_detach(attachment.id)
self._notify_about_volume_usage(context, volume, "detach.end")
LOG.info("Detach volume completed successfully.", resource=volume)
def _create_image_cache_volume_entry(self, ctx, volume_ref,
image_id, image_meta):
cache_entry = self.image_volume_cache.get_entry(ctx,
volume_ref,
image_id,
image_meta)
if cache_entry:
LOG.debug('Cache entry already exists with image ID %'
'(image_id)s',
{'image_id': image_id})
return
image_volume = None
try:
if not self.image_volume_cache.ensure_space(ctx, volume_ref):
LOG.warning('Unable to ensure space for image-volume in'
' cache. Will skip creating entry for image'
' %(image)s on %(service)s.',
{'image': image_id,
'service': volume_ref.service_topic_queue})
return
image_volume = self._clone_image_volume(ctx,
volume_ref,
image_meta)
if not image_volume:
LOG.warning('Unable to clone image_volume for image '
'%(image_id)s will not create cache entry.',
{'image_id': image_id})
return
self.image_volume_cache.create_cache_entry(
ctx,
image_volume,
image_id,
image_meta
)
except exception.CinderException as e:
LOG.warning('Failed to create new image-volume cache entry.'
' Error: %(exception)s', {'exception': e})
if image_volume:
self.delete_volume(ctx, image_volume)
def _clone_image_volume(self, ctx, volume, image_meta):
volume_type_id = volume.get('volume_type_id')
reserve_opts = {'volumes': 1, 'gigabytes': volume.size}
QUOTAS.add_volume_type_opts(ctx, reserve_opts, volume_type_id)
reservations = QUOTAS.reserve(ctx, **reserve_opts)
# NOTE(yikun): Skip 'snapshot_id', 'source_volid' keys to avoid
# creating tmp img vol from wrong snapshot or wrong source vol.
skip = {'snapshot_id', 'source_volid'}
skip.update(self._VOLUME_CLONE_SKIP_PROPERTIES)
try:
new_vol_values = {k: volume[k] for k in set(volume.keys()) - skip}
new_vol_values['volume_type_id'] = volume_type_id
new_vol_values['attach_status'] = (
fields.VolumeAttachStatus.DETACHED)
new_vol_values['status'] = 'creating'
new_vol_values['project_id'] = ctx.project_id
new_vol_values['display_name'] = 'image-%s' % image_meta['id']
new_vol_values['source_volid'] = volume.id
LOG.debug('Creating image volume entry: %s.', new_vol_values)
image_volume = objects.Volume(context=ctx, **new_vol_values)
image_volume.create()
except Exception as ex:
LOG.exception('Create clone_image_volume: %(volume_id)s '
'for image %(image_id)s, '
'failed (Exception: %(except)s)',
{'volume_id': volume.id,
'image_id': image_meta['id'],
'except': ex})
QUOTAS.rollback(ctx, reservations)
return
QUOTAS.commit(ctx, reservations,
project_id=new_vol_values['project_id'])
try:
self.create_volume(ctx, image_volume, allow_reschedule=False)
image_volume.refresh()
if image_volume.status != 'available':
raise exception.InvalidVolume(_('Volume is not available.'))
self.db.volume_admin_metadata_update(ctx.elevated(),
image_volume.id,
{'readonly': 'True'},
False)
return image_volume
except exception.CinderException:
LOG.exception('Failed to clone volume %(volume_id)s for '
'image %(image_id)s.',
{'volume_id': volume.id,
'image_id': image_meta['id']})
try:
self.delete_volume(ctx, image_volume)
except exception.CinderException:
LOG.exception('Could not delete the image volume %(id)s.',
{'id': volume.id})
return
def _clone_image_volume_and_add_location(self, ctx, volume, image_service,
image_meta):
if (image_meta['disk_format'] != 'raw' or
image_meta['container_format'] != 'bare'):
return False
image_volume_context = ctx
if self.driver.configuration.image_upload_use_internal_tenant:
internal_ctx = context.get_internal_tenant_context()
if internal_ctx:
image_volume_context = internal_ctx
image_volume = self._clone_image_volume(image_volume_context,
volume,
image_meta)
if not image_volume:
return False
# The image_owner metadata should be set before uri is added to
# the image so glance cinder store can check its owner.
image_volume_meta = {'image_owner': ctx.project_id}
self.db.volume_metadata_update(image_volume_context,
image_volume.id,
image_volume_meta,
False)
uri = 'cinder://%s' % image_volume.id
image_registered = None
try:
image_registered = image_service.add_location(
ctx, image_meta['id'], uri, {})
except (exception.NotAuthorized, exception.Invalid,
exception.NotFound):
LOG.exception('Failed to register image volume location '
'%(uri)s.', {'uri': uri})
if not image_registered:
LOG.warning('Registration of image volume URI %(uri)s '
'to image %(image_id)s failed.',
{'uri': uri, 'image_id': image_meta['id']})
try:
self.delete_volume(image_volume_context, image_volume)
except exception.CinderException:
LOG.exception('Could not delete failed image volume '
'%(id)s.', {'id': image_volume.id})
return False
image_volume_meta['glance_image_id'] = image_meta['id']
self.db.volume_metadata_update(image_volume_context,
image_volume.id,
image_volume_meta,
False)
return True
def copy_volume_to_image(self, context, volume_id, image_meta):
payload = {'volume_id': volume_id, 'image_id': image_meta['id']}
image_service = None
try:
volume = objects.Volume.get_by_id(context, volume_id)
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
image_service, image_id = \
glance.get_remote_image_service(context, image_meta['id'])
if (self.driver.configuration.image_upload_use_cinder_backend
and self._clone_image_volume_and_add_location(
context, volume, image_service, image_meta)):
LOG.debug("Registered image volume location to glance "
"image-id: %(image_id)s.",
{'image_id': image_meta['id']},
resource=volume)
else:
self.driver.copy_volume_to_image(context, volume,
image_service, image_meta)
LOG.debug("Uploaded volume to glance image-id: %(image_id)s.",
{'image_id': image_meta['id']},
resource=volume)
except Exception as error:
LOG.error("Upload volume to image encountered an error "
"(image-id: %(image_id)s).",
{'image_id': image_meta['id']},
resource=volume)
self.message_api.create(
context,
message_field.Action.COPY_VOLUME_TO_IMAGE,
resource_uuid=volume_id,
exception=error,
detail=message_field.Detail.FAILED_TO_UPLOAD_VOLUME)
if image_service is not None:
# Deletes the image if it is in queued or saving state
self._delete_image(context, image_meta['id'], image_service)
with excutils.save_and_reraise_exception():
payload['message'] = six.text_type(error)
finally:
self.db.volume_update_status_based_on_attachment(context,
volume_id)
LOG.info("Copy volume to image completed successfully.",
resource=volume)
def _delete_image(self, context, image_id, image_service):
try:
image_meta = image_service.show(context, image_id)
image_status = image_meta.get('status')
if image_status == 'queued' or image_status == 'saving':
LOG.warning("Deleting image in unexpected status: "
"%(image_status)s.",
{'image_status': image_status},
resource={'type': 'image', 'id': image_id})
image_service.delete(context, image_id)
except Exception:
LOG.warning("Image delete encountered an error.",
exc_info=True, resource={'type': 'image',
'id': image_id})
def _parse_connection_options(self, context, volume, conn_info):
# Add qos_specs to connection info
typeid = volume.volume_type_id
specs = None
if typeid:
res = volume_types.get_volume_type_qos_specs(typeid)
qos = res['qos_specs']
# only pass qos_specs that is designated to be consumed by
# front-end, or both front-end and back-end.
if qos and qos.get('consumer') in ['front-end', 'both']:
specs = qos.get('specs')
# NOTE(mnaser): The following configures for per-GB QoS
if specs is not None:
volume_size = int(volume.size)
tune_opts = ('read_iops_sec', 'read_bytes_sec',
'write_iops_sec', 'write_bytes_sec',
'total_iops_sec', 'total_bytes_sec')
for option in tune_opts:
option_per_gb = '%s_per_gb' % option
option_per_gb_min = '%s_per_gb_min' % option
option_max = '%s_max' % option
if option_per_gb in specs:
minimum_value = int(specs.pop(option_per_gb_min, 0))
value = int(specs[option_per_gb]) * volume_size
per_gb_value = max(minimum_value, value)
max_value = int(specs.pop(option_max, per_gb_value))
specs[option] = min(per_gb_value, max_value)
specs.pop(option_per_gb)
qos_spec = dict(qos_specs=specs)
conn_info['data'].update(qos_spec)
# Add access_mode to connection info
volume_metadata = volume.admin_metadata
access_mode = volume_metadata.get('attached_mode')
if access_mode is None:
# NOTE(zhiyan): client didn't call 'os-attach' before
access_mode = ('ro'
if volume_metadata.get('readonly') == 'True'
else 'rw')
conn_info['data']['access_mode'] = access_mode
if conn_info['data'].get('encrypted') is None:
encrypted = bool(volume.encryption_key_id)
conn_info['data']['encrypted'] = encrypted
if conn_info['data'].get('discard') is None:
discard_supported = (self.driver.configuration
.safe_get('report_discard_supported'))
if discard_supported:
conn_info['data']['discard'] = True
return conn_info
def initialize_connection(self, context, volume, connector):
utils.require_driver_initialized(self.driver)
try:
self.driver.validate_connector(connector)
except exception.InvalidConnectorException as err:
raise exception.InvalidInput(reason=six.text_type(err))
except Exception as err:
err_msg = (_("Validate volume connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.exception(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
model_update = self.driver.create_export(context.elevated(),
volume, connector)
except exception.CinderException as ex:
msg = _("Create export of volume failed (%s)") % ex.msg
LOG.exception(msg, resource=volume)
raise exception.VolumeBackendAPIException(data=msg)
try:
if model_update:
volume.update(model_update)
volume.save()
except Exception as ex:
LOG.exception("Model update failed.", resource=volume)
try:
self.driver.remove_export(context.elevated(), volume)
except Exception:
LOG.exception('Could not remove export after DB model failed.')
raise exception.ExportFailure(reason=six.text_type(ex))
try:
conn_info = self.driver.initialize_connection(volume, connector)
except exception.ConnectorRejected:
with excutils.save_and_reraise_exception():
LOG.info("The connector was rejected by the volume driver.")
except Exception as err:
err_msg = (_("Driver initialize connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.exception(err_msg, resource=volume)
self.driver.remove_export(context.elevated(), volume)
raise exception.VolumeBackendAPIException(data=err_msg)
conn_info = self._parse_connection_options(context, volume, conn_info)
LOG.info("Initialize volume connection completed successfully.",
resource=volume)
return conn_info
def initialize_connection_snapshot(self, ctxt, snapshot_id, connector):
utils.require_driver_initialized(self.driver)
snapshot = objects.Snapshot.get_by_id(ctxt, snapshot_id)
try:
self.driver.validate_connector(connector)
except exception.InvalidConnectorException as err:
raise exception.InvalidInput(reason=six.text_type(err))
except Exception as err:
err_msg = (_("Validate snapshot connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.exception(err_msg, resource=snapshot)
raise exception.VolumeBackendAPIException(data=err_msg)
model_update = None
try:
LOG.debug("Snapshot %s: creating export.", snapshot.id)
model_update = self.driver.create_export_snapshot(
ctxt.elevated(), snapshot, connector)
if model_update:
snapshot.provider_location = model_update.get(
'provider_location', None)
snapshot.provider_auth = model_update.get(
'provider_auth', None)
snapshot.save()
except exception.CinderException as ex:
msg = _("Create export of snapshot failed (%s)") % ex.msg
LOG.exception(msg, resource=snapshot)
raise exception.VolumeBackendAPIException(data=msg)
try:
if model_update:
snapshot.update(model_update)
snapshot.save()
except exception.CinderException as ex:
LOG.exception("Model update failed.", resource=snapshot)
raise exception.ExportFailure(reason=six.text_type(ex))
try:
conn = self.driver.initialize_connection_snapshot(snapshot,
connector)
except Exception as err:
try:
err_msg = (_('Unable to fetch connection information from '
'backend: %(err)s') %
{'err': six.text_type(err)})
LOG.error(err_msg)
LOG.debug("Cleaning up failed connect initialization.")
self.driver.remove_export_snapshot(ctxt.elevated(), snapshot)
except Exception as ex:
ex_msg = (_('Error encountered during cleanup '
'of a failed attach: %(ex)s') %
{'ex': six.text_type(ex)})
LOG.error(ex_msg)
raise exception.VolumeBackendAPIException(data=ex_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
LOG.info("Initialize snapshot connection completed successfully.",
resource=snapshot)
return conn
def terminate_connection(self, context, volume_id, connector, force=False):
utils.require_driver_initialized(self.driver)
volume_ref = self.db.volume_get(context, volume_id)
try:
self.driver.terminate_connection(volume_ref, connector,
force=force)
except Exception as err:
err_msg = (_('Terminate volume connection failed: %(err)s')
% {'err': six.text_type(err)})
LOG.exception(err_msg, resource=volume_ref)
raise exception.VolumeBackendAPIException(data=err_msg)
LOG.info("Terminate volume connection completed successfully.",
resource=volume_ref)
def terminate_connection_snapshot(self, ctxt, snapshot_id,
connector, force=False):
utils.require_driver_initialized(self.driver)
snapshot = objects.Snapshot.get_by_id(ctxt, snapshot_id)
try:
self.driver.terminate_connection_snapshot(snapshot, connector,
force=force)
except Exception as err:
err_msg = (_('Terminate snapshot connection failed: %(err)s')
% {'err': six.text_type(err)})
LOG.exception(err_msg, resource=snapshot)
raise exception.VolumeBackendAPIException(data=err_msg)
LOG.info("Terminate snapshot connection completed successfully.",
resource=snapshot)
def remove_export(self, context, volume_id):
utils.require_driver_initialized(self.driver)
volume_ref = self.db.volume_get(context, volume_id)
try:
self.driver.remove_export(context, volume_ref)
except Exception:
msg = _("Remove volume export failed.")
LOG.exception(msg, resource=volume_ref)
raise exception.VolumeBackendAPIException(data=msg)
LOG.info("Remove volume export completed successfully.",
resource=volume_ref)
def remove_export_snapshot(self, ctxt, snapshot_id):
utils.require_driver_initialized(self.driver)
snapshot = objects.Snapshot.get_by_id(ctxt, snapshot_id)
try:
self.driver.remove_export_snapshot(ctxt, snapshot)
except Exception:
msg = _("Remove snapshot export failed.")
LOG.exception(msg, resource=snapshot)
raise exception.VolumeBackendAPIException(data=msg)
LOG.info("Remove snapshot export completed successfully.",
resource=snapshot)
def accept_transfer(self, context, volume_id, new_user, new_project,
no_snapshots=False):
utils.require_driver_initialized(self.driver)
# yet
volume_ref = self.db.volume_get(context.elevated(), volume_id)
# NOTE(jdg): Some drivers tie provider info (CHAP) to tenant
# for those that do allow them to return updated model info
model_update = self.driver.accept_transfer(context,
volume_ref,
new_user,
new_project)
if model_update:
try:
self.db.volume_update(context.elevated(),
volume_id,
model_update)
except exception.CinderException:
with excutils.save_and_reraise_exception():
LOG.exception("Update volume model for "
"transfer operation failed.",
resource=volume_ref)
self.db.volume_update(context.elevated(),
volume_id,
{'status': 'error'})
LOG.info("Transfer volume completed successfully.",
resource=volume_ref)
return model_update
def _connect_device(self, conn):
use_multipath = self.configuration.use_multipath_for_image_xfer
device_scan_attempts = self.configuration.num_volume_device_scan_tries
protocol = conn['driver_volume_type']
connector = utils.brick_get_connector(
protocol,
use_multipath=use_multipath,
device_scan_attempts=device_scan_attempts,
conn=conn)
vol_handle = connector.connect_volume(conn['data'])
root_access = True
if not connector.check_valid_device(vol_handle['path'], root_access):
if isinstance(vol_handle['path'], six.string_types):
raise exception.DeviceUnavailable(
path=vol_handle['path'],
reason=(_("Unable to access the backend storage via the "
"path %(path)s.") %
{'path': vol_handle['path']}))
else:
raise exception.DeviceUnavailable(
path=None,
reason=(_("Unable to access the backend storage via file "
"handle.")))
return {'conn': conn, 'device': vol_handle, 'connector': connector}
def _attach_volume(self, ctxt, volume, properties, remote=False,
attach_encryptor=False):
status = volume['status']
if remote:
rpcapi = volume_rpcapi.VolumeAPI()
try:
conn = rpcapi.initialize_connection(ctxt, volume, properties)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Failed to attach volume %(vol)s.",
{'vol': volume['id']})
self.db.volume_update(ctxt, volume['id'],
{'status': status})
else:
conn = self.initialize_connection(ctxt, volume, properties)
attach_info = self._connect_device(conn)
try:
if attach_encryptor and (
volume_types.is_encrypted(ctxt,
volume.volume_type_id)):
encryption = self.db.volume_encryption_metadata_get(
ctxt.elevated(), volume.id)
if encryption:
utils.brick_attach_volume_encryptor(ctxt,
attach_info,
encryption)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Failed to attach volume encryptor"
" %(vol)s.", {'vol': volume['id']})
self._detach_volume(ctxt, attach_info, volume, properties,
force=True)
return attach_info
def _detach_volume(self, ctxt, attach_info, volume, properties,
force=False, remote=False,
attach_encryptor=False):
connector = attach_info['connector']
if attach_encryptor and (
volume_types.is_encrypted(ctxt,
volume.volume_type_id)):
encryption = self.db.volume_encryption_metadata_get(
ctxt.elevated(), volume.id)
if encryption:
utils.brick_detach_volume_encryptor(attach_info, encryption)
connector.disconnect_volume(attach_info['conn']['data'],
attach_info['device'], force=force)
if remote:
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.terminate_connection(ctxt, volume, properties, force=force)
rpcapi.remove_export(ctxt, volume)
else:
try:
self.terminate_connection(ctxt, volume['id'], properties,
force=force)
self.remove_export(ctxt, volume['id'])
except Exception as err:
with excutils.save_and_reraise_exception():
LOG.error('Unable to terminate volume connection: '
'%(err)s.', {'err': err})
def _copy_volume_data(self, ctxt, src_vol, dest_vol, remote=None):
LOG.debug('_copy_volume_data %(src)s -> %(dest)s.',
{'src': src_vol['name'], 'dest': dest_vol['name']})
attach_encryptor = False
# If the encryption method or key is changed, we have to
# copy data through dm-crypt.
if volume_types.volume_types_encryption_changed(
ctxt,
src_vol.volume_type_id,
dest_vol.volume_type_id):
attach_encryptor = True
use_multipath = self.configuration.use_multipath_for_image_xfer
enforce_multipath = self.configuration.enforce_multipath_for_image_xfer
properties = utils.brick_get_connector_properties(use_multipath,
enforce_multipath)
dest_remote = remote in ['dest', 'both']
dest_attach_info = self._attach_volume(
ctxt, dest_vol, properties,
remote=dest_remote,
attach_encryptor=attach_encryptor)
try:
src_remote = remote in ['src', 'both']
src_attach_info = self._attach_volume(
ctxt, src_vol, properties,
remote=src_remote,
attach_encryptor=attach_encryptor)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Failed to attach source volume for copy.")
self._detach_volume(ctxt, dest_attach_info, dest_vol,
properties, remote=dest_remote,
attach_encryptor=attach_encryptor,
force=True)
# Check the backend capabilities of migration destination host.
rpcapi = volume_rpcapi.VolumeAPI()
capabilities = rpcapi.get_capabilities(ctxt,
dest_vol.service_topic_queue,
False)
sparse_copy_volume = bool(capabilities and
capabilities.get('sparse_copy_volume',
False))
try:
size_in_mb = int(src_vol['size']) * units.Ki # vol size is in GB
volume_utils.copy_volume(src_attach_info['device']['path'],
dest_attach_info['device']['path'],
size_in_mb,
self.configuration.volume_dd_blocksize,
sparse=sparse_copy_volume)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Failed to copy volume %(src)s to %(dest)s.",
{'src': src_vol['id'], 'dest': dest_vol['id']})
finally:
try:
self._detach_volume(ctxt, dest_attach_info, dest_vol,
properties, force=True,
remote=dest_remote,
attach_encryptor=attach_encryptor)
finally:
self._detach_volume(ctxt, src_attach_info, src_vol,
properties, force=True,
remote=src_remote,
attach_encryptor=attach_encryptor)
def _migrate_volume_generic(self, ctxt, volume, backend, new_type_id):
rpcapi = volume_rpcapi.VolumeAPI()
# Create new volume on remote host
tmp_skip = {'snapshot_id', 'source_volid'}
skip = {'host', 'cluster_name', 'availability_zone'}
skip.update(tmp_skip)
skip.update(self._VOLUME_CLONE_SKIP_PROPERTIES)
new_vol_values = {k: volume[k] for k in set(volume.keys()) - skip}
if new_type_id:
new_vol_values['volume_type_id'] = new_type_id
if volume_types.volume_types_encryption_changed(
ctxt, volume.volume_type_id, new_type_id):
encryption_key_id = volume_utils.create_encryption_key(
ctxt, self.key_manager, new_type_id)
new_vol_values['encryption_key_id'] = encryption_key_id
dst_service = self._get_service(backend['host'])
new_volume = objects.Volume(
context=ctxt,
host=backend['host'],
availability_zone=dst_service.availability_zone,
cluster_name=backend.get('cluster_name'),
status='creating',
attach_status=fields.VolumeAttachStatus.DETACHED,
migration_status='target:%s' % volume['id'],
**new_vol_values
)
new_volume.create()
rpcapi.create_volume(ctxt, new_volume, None, None,
allow_reschedule=False)
# Wait for new_volume to become ready
starttime = time.time()
deadline = starttime + CONF.migration_create_volume_timeout_secs
new_volume.refresh()
tries = 0
while new_volume.status != 'available':
tries += 1
now = time.time()
if new_volume.status == 'error':
msg = _("failed to create new_volume on destination")
self._clean_temporary_volume(ctxt, volume,
new_volume,
clean_db_only=True)
raise exception.VolumeMigrationFailed(reason=msg)
elif now > deadline:
msg = _("timeout creating new_volume on destination")
self._clean_temporary_volume(ctxt, volume,
new_volume,
clean_db_only=True)
raise exception.VolumeMigrationFailed(reason=msg)
else:
time.sleep(tries ** 2)
new_volume.refresh()
# Set skipped value to avoid calling
# function except for _create_raw_volume
tmp_skipped_values = {k: volume[k] for k in tmp_skip if volume.get(k)}
if tmp_skipped_values:
new_volume.update(tmp_skipped_values)
new_volume.save()
# Copy the source volume to the destination volume
try:
attachments = volume.volume_attachment
# A volume might have attachments created, but if it is reserved
# it means it's being migrated prior to the attachment completion.
if not attachments or volume.status == 'reserved':
self.driver.before_volume_copy(ctxt, volume, new_volume,
remote='dest')
self._copy_volume_data(ctxt, volume, new_volume, remote='dest')
self.driver.after_volume_copy(ctxt, volume, new_volume,
remote='dest')
self.migrate_volume_completion(ctxt, volume, new_volume,
error=False)
else:
nova_api = compute.API()
for attachment in attachments:
instance_uuid = attachment['instance_uuid']
nova_api.update_server_volume(ctxt, instance_uuid,
volume.id,
new_volume.id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(
"Failed to copy volume %(vol1)s to %(vol2)s", {
'vol1': volume.id, 'vol2': new_volume.id})
self._clean_temporary_volume(ctxt, volume,
new_volume)
def _clean_temporary_volume(self, ctxt, volume, new_volume,
clean_db_only=False):
# If we're in the migrating phase, we need to cleanup
if volume.migration_status == 'migrating':
try:
if clean_db_only:
new_volume.destroy()
else:
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.delete_volume(ctxt, new_volume)
except exception.VolumeNotFound:
LOG.info("Couldn't find the temporary volume "
"%(vol)s in the database. There is no need "
"to clean up this volume.",
{'vol': new_volume.id})
else:
# If we're in the completing phase don't delete the
# destination because we may have already deleted the
# source! But the migration_status in database should
# be cleared to handle volume after migration failure
try:
new_volume.migration_status = None
new_volume.save()
except exception.VolumeNotFound:
LOG.info("Couldn't find destination volume "
"%(vol)s in the database. The entry might be "
"successfully deleted during migration "
"completion phase.",
{'vol': new_volume.id})
LOG.warning("Failed to migrate volume. The destination "
"volume %(vol)s is not deleted since the "
"source volume may have been deleted.",
{'vol': new_volume.id})
def migrate_volume_completion(self, ctxt, volume, new_volume, error=False):
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.migration_status = 'error'
volume.save()
# and make this work best we can
LOG.debug("migrate_volume_completion: completing migration for "
"volume %(vol1)s (temporary volume %(vol2)s",
{'vol1': volume.id, 'vol2': new_volume.id})
rpcapi = volume_rpcapi.VolumeAPI()
orig_volume_status = volume.previous_status
if error:
LOG.info("migrate_volume_completion is cleaning up an error "
"for volume %(vol1)s (temporary volume %(vol2)s",
{'vol1': volume['id'], 'vol2': new_volume.id})
rpcapi.delete_volume(ctxt, new_volume)
updates = {'migration_status': 'error',
'status': orig_volume_status}
volume.update(updates)
volume.save()
return volume.id
volume.migration_status = 'completing'
volume.save()
volume_attachments = []
# NOTE(jdg): With new attach flow, we deleted the attachment, so the
# original volume should now be listed as available, we still need to
# do the magic swappy thing of name.id etc but we're done with the
# detach to toggle the status
if orig_volume_status == 'in-use' and volume.status != 'available':
for attachment in volume.volume_attachment:
# Save the attachments the volume currently have
volume_attachments.append(attachment)
try:
self.detach_volume(ctxt, volume.id, attachment.id)
except Exception as ex:
LOG.error("Detach migration source volume "
"%(volume.id)s from attachment "
"%(attachment.id)s failed: %(err)s",
{'err': ex,
'volume.id': volume.id,
'attachment.id': attachment.id},
resource=volume)
# Give driver (new_volume) a chance to update things as needed
# after a successful migration.
# Note this needs to go through rpc to the host of the new volume
# the current host and driver object is for the "existing" volume.
rpcapi.update_migrated_volume(ctxt, volume, new_volume,
orig_volume_status)
volume.refresh()
new_volume.refresh()
# Swap src and dest DB records so we can continue using the src id and
# asynchronously delete the destination id
updated_new = volume.finish_volume_migration(new_volume)
updates = {'status': orig_volume_status,
'previous_status': volume.status,
'migration_status': 'success'}
# NOTE(jdg): With new attachment API's nova will delete the
# updating volume_id
# In the old flow at this point the volumes are in attaching and
# deleting status (dest/new is deleting, but we've done our magic
# when you step through it)
# In the new flow we simlified this and we don't need it, instead of
if orig_volume_status == 'in-use' and volume.status in ['available',
'reserved',
'attaching']:
for attachment in volume_attachments:
LOG.debug('Re-attaching: %s', attachment)
# this
rpcapi.attach_volume(ctxt, volume,
attachment.instance_uuid,
attachment.attached_host,
attachment.mountpoint,
attachment.attach_mode or 'rw')
# At this point we now have done almost all of our swapping and
# state-changes. The target volume is now marked back to
# "in-use" the destination/worker volume is now in deleting
# state and the next steps will finish the deletion steps
volume.update(updates)
volume.save()
# Asynchronous deletion of the source volume in the back-end (now
# pointed by the target volume id)
try:
rpcapi.delete_volume(ctxt, updated_new)
except Exception as ex:
LOG.error('Failed to request async delete of migration source '
'vol %(vol)s: %(err)s',
{'vol': volume.id, 'err': ex})
# For the new flow this is really the key part. We just use the
# attachments to the worker/destination volumes that we created and
# used for the libvirt migration and we'll just swap their volume_id
for attachment in VA_LIST.get_all_by_volume_id(ctxt, updated_new.id):
attachment.volume_id = volume.id
attachment.save()
LOG.info("Complete-Migrate volume completed successfully.",
resource=volume)
return volume.id
def migrate_volume(self, ctxt, volume, host, force_host_copy=False,
new_type_id=None):
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.migration_status = 'error'
volume.save()
model_update = None
moved = False
status_update = None
if volume.status in ('retyping', 'maintenance'):
status_update = {'status': volume.previous_status}
volume.migration_status = 'migrating'
volume.save()
if not force_host_copy and new_type_id is None:
try:
LOG.debug("Issue driver.migrate_volume.", resource=volume)
moved, model_update = self.driver.migrate_volume(ctxt,
volume,
host)
if moved:
dst_service = self._get_service(host['host'])
updates = {
'host': host['host'],
'cluster_name': host.get('cluster_name'),
'migration_status': 'success',
'availability_zone': dst_service.availability_zone,
'previous_status': volume.status,
}
if status_update:
updates.update(status_update)
if model_update:
updates.update(model_update)
volume.update(updates)
volume.save()
except Exception:
with excutils.save_and_reraise_exception():
updates = {'migration_status': 'error'}
if status_update:
updates.update(status_update)
volume.update(updates)
volume.save()
if not moved:
try:
self._migrate_volume_generic(ctxt, volume, host, new_type_id)
except Exception:
with excutils.save_and_reraise_exception():
updates = {'migration_status': 'error'}
if status_update:
updates.update(status_update)
volume.update(updates)
volume.save()
LOG.info("Migrate volume completed successfully.",
resource=volume)
def _report_driver_status(self, context):
# value isn't set (we didn't restart services), so we'll go ahead
if not self.service_uuid:
try:
service = self._get_service()
self.service_uuid = service.uuid
except exception.ServiceNotFound:
LOG.warning("Attempt to update service_uuid "
"resulted in a Service NotFound "
"exception, service_uuid field on "
"volumes will be NULL.")
if not self.driver.initialized:
if self.driver.configuration.config_group is None:
config_group = ''
else:
config_group = ('(config name %s)' %
self.driver.configuration.config_group)
LOG.warning("Update driver status failed: %(config_group)s "
"is uninitialized.",
{'config_group': config_group},
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
else:
volume_stats = self.driver.get_volume_stats(refresh=True)
if self.extra_capabilities:
volume_stats.update(self.extra_capabilities)
if "pools" in volume_stats:
for pool in volume_stats["pools"]:
pool.update(self.extra_capabilities)
else:
volume_stats.update(self.extra_capabilities)
if volume_stats:
if volume_stats.get('replication_status') == (
fields.ReplicationStatus.ERROR):
filters = self._get_cluster_or_host_filters()
groups = objects.GroupList.get_all_replicated(
context, filters=filters)
group_model_updates, volume_model_updates = (
self.driver.get_replication_error_status(context,
groups))
for grp_update in group_model_updates:
try:
grp_obj = objects.Group.get_by_id(
context, grp_update['group_id'])
grp_obj.update(grp_update)
grp_obj.save()
except exception.GroupNotFound:
LOG.warning("Group %(grp)s not found while "
"updating driver status.",
{'grp': grp_update['group_id']},
resource={
'type': 'group',
'id': grp_update['group_id']})
for vol_update in volume_model_updates:
try:
vol_obj = objects.Volume.get_by_id(
context, vol_update['volume_id'])
vol_obj.update(vol_update)
vol_obj.save()
except exception.VolumeNotFound:
LOG.warning("Volume %(vol)s not found while "
"updating driver status.",
{'vol': vol_update['volume_id']},
resource={
'type': 'volume',
'id': vol_update['volume_id']})
self._append_volume_stats(volume_stats)
volume_stats = (
self._append_filter_goodness_functions(volume_stats))
self.update_service_capabilities(volume_stats)
def _append_volume_stats(self, vol_stats):
pools = vol_stats.get('pools', None)
if pools:
if isinstance(pools, list):
for pool in pools:
pool_name = pool['pool_name']
try:
pool_stats = self.stats['pools'][pool_name]
except KeyError:
pool_stats = dict(allocated_capacity_gb=0)
pool.update(pool_stats)
else:
raise exception.ProgrammingError(
reason='Pools stats reported by the driver are not '
'reported in a list')
elif self.stats.get('pools'):
vol_stats.update(next(iter(self.stats['pools'].values())))
else:
vol_stats.update(self.stats)
vol_stats.pop('pools', None)
def _append_filter_goodness_functions(self, volume_stats):
# Append filter_function if needed
if 'filter_function' not in volume_stats:
volume_stats['filter_function'] = (
self.driver.get_filter_function())
# Append goodness_function if needed
if 'goodness_function' not in volume_stats:
volume_stats['goodness_function'] = (
self.driver.get_goodness_function())
return volume_stats
@periodic_task.periodic_task(spacing=CONF.backend_stats_polling_interval)
def publish_service_capabilities(self, context):
self._report_driver_status(context)
self._publish_service_capabilities(context)
def _notify_about_volume_usage(self,
context,
volume,
event_suffix,
extra_usage_info=None):
volume_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_snapshot_usage(self,
context,
snapshot,
event_suffix,
extra_usage_info=None):
volume_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_group_usage(self,
context,
group,
event_suffix,
volumes=None,
extra_usage_info=None):
volume_utils.notify_about_group_usage(
context, group, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not volumes:
volumes = objects.VolumeList.get_all_by_generic_group(
context, group.id)
if volumes:
for volume in volumes:
volume_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_group_snapshot_usage(self,
context,
group_snapshot,
event_suffix,
snapshots=None,
extra_usage_info=None):
volume_utils.notify_about_group_snapshot_usage(
context, group_snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not snapshots:
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
context, group_snapshot.id)
if snapshots:
for snapshot in snapshots:
volume_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def extend_volume(self, context, volume, new_size, reservations):
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.status = 'error_extending'
volume.save()
project_id = volume.project_id
size_increase = (int(new_size)) - volume.size
self._notify_about_volume_usage(context, volume, "resize.start")
try:
self.driver.extend_volume(volume, new_size)
except exception.TargetUpdateFailed:
# We just want to log this but continue on with quota commit
LOG.warning('Volume extended but failed to update target.')
except Exception:
LOG.exception("Extend volume failed.",
resource=volume)
self.message_api.create(
context,
message_field.Action.EXTEND_VOLUME,
resource_uuid=volume.id,
detail=message_field.Detail.DRIVER_FAILED_EXTEND)
try:
self.db.volume_update(context, volume.id,
{'status': 'error_extending'})
raise exception.CinderException(_("Volume %s: Error trying "
"to extend volume") %
volume.id)
finally:
QUOTAS.rollback(context, reservations, project_id=project_id)
return
QUOTAS.commit(context, reservations, project_id=project_id)
attachments = volume.volume_attachment
if not attachments:
orig_volume_status = 'available'
else:
orig_volume_status = 'in-use'
volume.update({'size': int(new_size), 'status': orig_volume_status})
volume.save()
if orig_volume_status == 'in-use':
nova_api = compute.API()
instance_uuids = [attachment.instance_uuid
for attachment in attachments]
nova_api.extend_volume(context, instance_uuids, volume.id)
pool = volume_utils.extract_host(volume.host, 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or volume_utils.extract_host(
volume.host, 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] += size_increase
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=size_increase)
self._notify_about_volume_usage(
context, volume, "resize.end",
extra_usage_info={'size': int(new_size)})
LOG.info("Extend volume completed successfully.",
resource=volume)
def _is_our_backend(self, host, cluster_name):
return ((not cluster_name and
volume_utils.hosts_are_equivalent(self.driver.host, host)) or
(cluster_name and
volume_utils.hosts_are_equivalent(self.driver.cluster_name,
cluster_name)))
def retype(self, context, volume, new_type_id, host,
migration_policy='never', reservations=None,
old_reservations=None):
def _retype_error(context, volume, old_reservations,
new_reservations, status_update):
try:
volume.update(status_update)
volume.save()
finally:
if old_reservations:
QUOTAS.rollback(context, old_reservations)
if new_reservations:
QUOTAS.rollback(context, new_reservations)
previous_status = (
volume.previous_status or volume.status)
status_update = {'status': previous_status}
if context.project_id != volume.project_id:
project_id = volume.project_id
else:
project_id = context.project_id
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
# NOTE(flaper87): Other exceptions in this method don't
# for now.
volume.update(status_update)
volume.save()
# We already got the new reservations
new_reservations = reservations
# If volume types have the same contents, no need to do anything.
# Use the admin contex to be able to access volume extra_specs
retyped = False
diff, all_equal = volume_types.volume_types_diff(
context.elevated(), volume.volume_type_id, new_type_id)
if all_equal:
retyped = True
# Call driver to try and change the type
retype_model_update = None
# NOTE(jdg): Check to see if the destination host or cluster (depending
# if it's the volume is in a clustered backend or not) is the same as
# invalid in the case of a migrate.
# We assume that those that support pools do this internally
# so we strip off the pools designation
if (not retyped and
not diff.get('encryption') and
self._is_our_backend(host['host'], host.get('cluster_name'))):
try:
new_type = volume_types.get_volume_type(context.elevated(),
new_type_id)
with volume.obj_as_admin():
ret = self.driver.retype(context,
volume,
new_type,
diff,
host)
# Check if the driver retype provided a model update or
# just a retype indication
if type(ret) == tuple:
retyped, retype_model_update = ret
else:
retyped = ret
if retyped:
LOG.info("Volume %s: retyped successfully.", volume.id)
except Exception:
retyped = False
LOG.exception("Volume %s: driver error when trying to "
"retype, falling back to generic "
"mechanism.", volume.id)
# We could not change the type, so we need to migrate the volume, where
# the destination volume will be of the new type
if not retyped:
if migration_policy == 'never':
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Retype requires migration but is not allowed.")
raise exception.VolumeMigrationFailed(reason=msg)
snaps = objects.SnapshotList.get_all_for_volume(context,
volume.id)
if snaps:
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Volume must not have snapshots.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Don't allow volume with replicas to be migrated
rep_status = volume.replication_status
if(rep_status is not None and rep_status not in
[fields.ReplicationStatus.DISABLED,
fields.ReplicationStatus.NOT_CAPABLE]):
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Volume must not be replicated.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
volume.migration_status = 'starting'
volume.save()
try:
self.migrate_volume(context, volume, host,
new_type_id=new_type_id)
except Exception:
with excutils.save_and_reraise_exception():
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
else:
model_update = {'volume_type_id': new_type_id,
'host': host['host'],
'cluster_name': host.get('cluster_name'),
'status': status_update['status']}
if retype_model_update:
model_update.update(retype_model_update)
self._set_replication_status(diff, model_update)
volume.update(model_update)
volume.save()
if old_reservations:
QUOTAS.commit(context, old_reservations, project_id=project_id)
if new_reservations:
QUOTAS.commit(context, new_reservations, project_id=project_id)
self._notify_about_volume_usage(
context, volume, "retype",
extra_usage_info={'volume_type': new_type_id})
self.publish_service_capabilities(context)
LOG.info("Retype volume completed successfully.",
resource=volume)
@staticmethod
def _set_replication_status(diff, model_update):
if not diff or model_update.get('replication_status'):
return
diff_specs = diff.get('extra_specs', {})
replication_diff = diff_specs.get('replication_enabled')
if replication_diff:
is_replicated = volume_utils.is_boolean_str(replication_diff[1])
if is_replicated:
replication_status = fields.ReplicationStatus.ENABLED
else:
replication_status = fields.ReplicationStatus.DISABLED
model_update['replication_status'] = replication_status
def manage_existing(self, ctxt, volume, ref=None):
vol_ref = self._run_manage_existing_flow_engine(
ctxt, volume, ref)
self._update_stats_for_managed(vol_ref)
LOG.info("Manage existing volume completed successfully.",
resource=vol_ref)
return vol_ref.id
def _update_stats_for_managed(self, volume_reference):
pool = volume_utils.extract_host(volume_reference.host, 'pool')
if pool is None:
pool = self.driver.configuration.safe_get(
'volume_backend_name') or volume_utils.extract_host(
volume_reference.host, 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] \
+= volume_reference.size
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=volume_reference.size)
def _run_manage_existing_flow_engine(self, ctxt, volume, ref):
try:
flow_engine = manage_existing.get_flow(
ctxt,
self.db,
self.driver,
self.host,
volume,
ref,
)
except Exception:
msg = _("Failed to create manage_existing flow.")
LOG.exception(msg, resource={'type': 'volume', 'id': volume.id})
raise exception.CinderException(msg)
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
vol_ref = flow_engine.storage.fetch('volume')
return vol_ref
def _get_cluster_or_host_filters(self):
if self.cluster:
filters = {'cluster_name': self.cluster}
else:
filters = {'host': self.host}
return filters
def _get_my_volumes_summary(self, ctxt):
filters = self._get_cluster_or_host_filters()
return objects.VolumeList.get_volume_summary(ctxt, False, filters)
def _get_my_snapshots_summary(self, ctxt):
filters = self._get_cluster_or_host_filters()
return objects.SnapshotList.get_snapshot_summary(ctxt, False, filters)
def _get_my_resources(self, ctxt, ovo_class_list, limit=None, offset=None):
filters = self._get_cluster_or_host_filters()
return getattr(ovo_class_list, 'get_all')(ctxt, filters=filters,
limit=limit,
offset=offset)
def _get_my_volumes(self, ctxt, limit=None, offset=None):
return self._get_my_resources(ctxt, objects.VolumeList,
limit, offset)
def _get_my_snapshots(self, ctxt, limit=None, offset=None):
return self._get_my_resources(ctxt, objects.SnapshotList,
limit, offset)
def get_manageable_volumes(self, ctxt, marker, limit, offset, sort_keys,
sort_dirs, want_objects=False):
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception("Listing manageable volumes failed, due "
"to uninitialized driver.")
cinder_volumes = self._get_my_volumes(ctxt)
try:
driver_entries = self.driver.get_manageable_volumes(
cinder_volumes, marker, limit, offset, sort_keys, sort_dirs)
if want_objects:
driver_entries = (objects.ManageableVolumeList.
from_primitives(ctxt, driver_entries))
except AttributeError:
LOG.debug('Driver does not support listing manageable volumes.')
return []
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception("Listing manageable volumes failed, due "
"to driver error.")
return driver_entries
def create_group(self, context, group):
context = context.elevated()
self._set_resource_host(group)
status = fields.GroupStatus.AVAILABLE
model_update = None
self._notify_about_group_usage(context, group, "create.start")
try:
utils.require_driver_initialized(self.driver)
LOG.info("Group %s: creating", group.name)
try:
model_update = self.driver.create_group(context, group)
except NotImplementedError:
if not group_types.is_default_cgsnapshot_type(
group.group_type_id):
model_update = self._create_group_generic(context, group)
else:
cg, __ = self._convert_group_to_cg(group, [])
model_update = self.driver.create_consistencygroup(
context, cg)
if model_update:
if (model_update['status'] ==
fields.GroupStatus.ERROR):
msg = (_('Create group failed.'))
LOG.error(msg,
resource={'type': 'group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = fields.GroupStatus.ERROR
group.save()
LOG.error("Group %s: create failed",
group.name)
group.status = status
group.created_at = timeutils.utcnow()
group.save()
LOG.info("Group %s: created successfully", group.name)
self._notify_about_group_usage(context, group, "create.end")
LOG.info("Create group completed successfully.",
resource={'type': 'group',
'id': group.id})
return group
def create_group_from_src(self, context, group,
group_snapshot=None, source_group=None):
source_name = None
snapshots = None
source_vols = None
try:
volumes = objects.VolumeList.get_all_by_generic_group(context,
group.id)
if group_snapshot:
try:
group_snapshot.refresh()
except exception.GroupSnapshotNotFound:
LOG.error("Create group from snapshot-%(snap)s failed: "
"SnapshotNotFound.",
{'snap': group_snapshot.id},
resource={'type': 'group',
'id': group.id})
raise
source_name = _("snapshot-%s") % group_snapshot.id
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
context, group_snapshot.id)
for snap in snapshots:
if (snap.status not in
VALID_CREATE_GROUP_SRC_SNAP_STATUS):
msg = (_("Cannot create group "
"%(group)s because snapshot %(snap)s is "
"not in a valid state. Valid states are: "
"%(valid)s.") %
{'group': group.id,
'snap': snap['id'],
'valid': VALID_CREATE_GROUP_SRC_SNAP_STATUS})
raise exception.InvalidGroup(reason=msg)
if source_group:
try:
source_group.refresh()
except exception.GroupNotFound:
LOG.error("Create group "
"from source group-%(group)s failed: "
"GroupNotFound.",
{'group': source_group.id},
resource={'type': 'group',
'id': group.id})
raise
source_name = _("group-%s") % source_group.id
source_vols = objects.VolumeList.get_all_by_generic_group(
context, source_group.id)
for source_vol in source_vols:
if (source_vol.status not in
VALID_CREATE_GROUP_SRC_GROUP_STATUS):
msg = (_("Cannot create group "
"%(group)s because source volume "
"%(source_vol)s is not in a valid "
"state. Valid states are: "
"%(valid)s.") %
{'group': group.id,
'source_vol': source_vol.id,
'valid': VALID_CREATE_GROUP_SRC_GROUP_STATUS})
raise exception.InvalidGroup(reason=msg)
sorted_snapshots = None
if group_snapshot and snapshots:
sorted_snapshots = self._sort_snapshots(volumes, snapshots)
sorted_source_vols = None
if source_group and source_vols:
sorted_source_vols = self._sort_source_vols(volumes,
source_vols)
self._notify_about_group_usage(
context, group, "create.start")
utils.require_driver_initialized(self.driver)
try:
model_update, volumes_model_update = (
self.driver.create_group_from_src(
context, group, volumes, group_snapshot,
sorted_snapshots, source_group, sorted_source_vols))
except NotImplementedError:
if not group_types.is_default_cgsnapshot_type(
group.group_type_id):
model_update, volumes_model_update = (
self._create_group_from_src_generic(
context, group, volumes, group_snapshot,
sorted_snapshots, source_group,
sorted_source_vols))
else:
cg, volumes = self._convert_group_to_cg(
group, volumes)
cgsnapshot, sorted_snapshots = (
self._convert_group_snapshot_to_cgsnapshot(
group_snapshot, sorted_snapshots, context))
source_cg, sorted_source_vols = (
self._convert_group_to_cg(source_group,
sorted_source_vols))
model_update, volumes_model_update = (
self.driver.create_consistencygroup_from_src(
context, cg, volumes, cgsnapshot,
sorted_snapshots, source_cg, sorted_source_vols))
self._remove_cgsnapshot_id_from_snapshots(sorted_snapshots)
self._remove_consistencygroup_id_from_volumes(volumes)
self._remove_consistencygroup_id_from_volumes(
sorted_source_vols)
if volumes_model_update:
for update in volumes_model_update:
self.db.volume_update(context, update['id'], update)
if model_update:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = fields.GroupStatus.ERROR
group.save()
LOG.error("Create group "
"from source %(source)s failed.",
{'source': source_name},
resource={'type': 'group',
'id': group.id})
self._remove_consistencygroup_id_from_volumes(volumes)
for vol in volumes:
vol.status = 'error'
vol.save()
now = timeutils.utcnow()
status = 'available'
for vol in volumes:
update = {'status': status, 'created_at': now}
self._update_volume_from_src(context, vol, update, group=group)
self._update_allocated_capacity(vol)
group.status = status
group.created_at = now
group.save()
self._notify_about_group_usage(
context, group, "create.end")
LOG.info("Create group "
"from source-%(source)s completed successfully.",
{'source': source_name},
resource={'type': 'group',
'id': group.id})
return group
def _create_group_from_src_generic(self, context, group, volumes,
group_snapshot=None, snapshots=None,
source_group=None, source_vols=None):
model_update = {'status': 'available'}
volumes_model_update = []
for vol in volumes:
if snapshots:
for snapshot in snapshots:
if vol.snapshot_id == snapshot.id:
vol_model_update = {'id': vol.id}
try:
driver_update = (
self.driver.create_volume_from_snapshot(
vol, snapshot))
if driver_update:
driver_update.pop('id', None)
vol_model_update.update(driver_update)
if 'status' not in vol_model_update:
vol_model_update['status'] = 'available'
except Exception:
vol_model_update['status'] = 'error'
model_update['status'] = 'error'
volumes_model_update.append(vol_model_update)
break
elif source_vols:
for source_vol in source_vols:
if vol.source_volid == source_vol.id:
vol_model_update = {'id': vol.id}
try:
driver_update = self.driver.create_cloned_volume(
vol, source_vol)
if driver_update:
driver_update.pop('id', None)
vol_model_update.update(driver_update)
if 'status' not in vol_model_update:
vol_model_update['status'] = 'available'
except Exception:
vol_model_update['status'] = 'error'
model_update['status'] = 'error'
volumes_model_update.append(vol_model_update)
break
return model_update, volumes_model_update
def _sort_snapshots(self, volumes, snapshots):
if not volumes or not snapshots or len(volumes) != len(snapshots):
msg = _("Input volumes or snapshots are invalid.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
sorted_snapshots = []
for vol in volumes:
found_snaps = [snap for snap in snapshots
if snap['id'] == vol['snapshot_id']]
if not found_snaps:
LOG.error("Source snapshot cannot be found for target "
"volume %(volume_id)s.",
{'volume_id': vol['id']})
raise exception.SnapshotNotFound(
snapshot_id=vol['snapshot_id'])
sorted_snapshots.extend(found_snaps)
return sorted_snapshots
def _sort_source_vols(self, volumes, source_vols):
if not volumes or not source_vols or len(volumes) != len(source_vols):
msg = _("Input volumes or source volumes are invalid.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
sorted_source_vols = []
for vol in volumes:
found_source_vols = [source_vol for source_vol in source_vols
if source_vol['id'] == vol['source_volid']]
if not found_source_vols:
LOG.error("Source volumes cannot be found for target "
"volume %(volume_id)s.",
{'volume_id': vol['id']})
raise exception.VolumeNotFound(
volume_id=vol['source_volid'])
sorted_source_vols.extend(found_source_vols)
return sorted_source_vols
def _update_volume_from_src(self, context, vol, update, group=None):
try:
snapshot_id = vol.get('snapshot_id')
source_volid = vol.get('source_volid')
if snapshot_id:
snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
orig_vref = self.db.volume_get(context,
snapshot.volume_id)
if orig_vref.bootable:
update['bootable'] = True
self.db.volume_glance_metadata_copy_to_volume(
context, vol['id'], snapshot_id)
if source_volid:
source_vol = objects.Volume.get_by_id(context, source_volid)
if source_vol.bootable:
update['bootable'] = True
self.db.volume_glance_metadata_copy_from_volume_to_volume(
context, source_volid, vol['id'])
if source_vol.multiattach:
update['multiattach'] = True
except exception.SnapshotNotFound:
LOG.error("Source snapshot %(snapshot_id)s cannot be found.",
{'snapshot_id': vol['snapshot_id']})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = fields.GroupStatus.ERROR
group.save()
raise
except exception.VolumeNotFound:
LOG.error("The source volume %(volume_id)s "
"cannot be found.",
{'volume_id': snapshot.volume_id})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = fields.GroupStatus.ERROR
group.save()
raise
except exception.CinderException as ex:
LOG.error("Failed to update %(volume_id)s"
" metadata using the provided snapshot"
" %(snapshot_id)s metadata.",
{'volume_id': vol['id'],
'snapshot_id': vol['snapshot_id']})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = fields.GroupStatus.ERROR
group.save()
raise exception.MetadataCopyFailure(reason=six.text_type(ex))
self.db.volume_update(context, vol['id'], update)
def _update_allocated_capacity(self, vol, decrement=False, host=None):
host = host or vol['host']
pool = volume_utils.extract_host(host, 'pool')
if pool is None:
pool = self.driver.configuration.safe_get(
'volume_backend_name') or volume_utils.extract_host(host,
'pool',
True)
vol_size = -vol['size'] if decrement else vol['size']
try:
self.stats['pools'][pool]['allocated_capacity_gb'] += vol_size
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=max(vol_size, 0))
def delete_group(self, context, group):
context = context.elevated()
project_id = group.project_id
if context.project_id != group.project_id:
project_id = group.project_id
else:
project_id = context.project_id
volumes = objects.VolumeList.get_all_by_generic_group(
context, group.id)
for vol_obj in volumes:
if vol_obj.attach_status == "attached":
raise exception.VolumeAttached(volume_id=vol_obj.id)
self._check_is_our_resource(vol_obj)
self._notify_about_group_usage(
context, group, "delete.start")
volumes_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
try:
model_update, volumes_model_update = (
self.driver.delete_group(context, group, volumes))
except NotImplementedError:
if not group_types.is_default_cgsnapshot_type(
group.group_type_id):
model_update, volumes_model_update = (
self._delete_group_generic(context, group, volumes))
else:
cg, volumes = self._convert_group_to_cg(
group, volumes)
model_update, volumes_model_update = (
self.driver.delete_consistencygroup(context, cg,
volumes))
self._remove_consistencygroup_id_from_volumes(volumes)
if volumes_model_update:
for update in volumes_model_update:
if (update['status'] in ['error_deleting', 'error']
and model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = update['status']
self.db.volumes_update(context, volumes_model_update)
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Delete group failed.'))
LOG.error(msg,
resource={'type': 'group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = fields.GroupStatus.ERROR
group.save()
if not volumes_model_update:
self._remove_consistencygroup_id_from_volumes(volumes)
for vol_obj in volumes:
vol_obj.status = 'error'
vol_obj.save()
try:
reserve_opts = {'groups': -1}
grpreservations = GROUP_QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
grpreservations = None
LOG.exception("Delete group "
"failed to update usages.",
resource={'type': 'group',
'id': group.id})
for vol in volumes:
try:
reserve_opts = {'volumes': -1,
'gigabytes': -vol.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
vol.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception("Delete group "
"failed to update usages.",
resource={'type': 'group',
'id': group.id})
vol.destroy()
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
self.stats['allocated_capacity_gb'] -= vol.size
if grpreservations:
GROUP_QUOTAS.commit(context, grpreservations,
project_id=project_id)
group.destroy()
self._notify_about_group_usage(
context, group, "delete.end")
self.publish_service_capabilities(context)
LOG.info("Delete group "
"completed successfully.",
resource={'type': 'group',
'id': group.id})
def _convert_group_to_cg(self, group, volumes):
if not group:
return None, None
cg = consistencygroup.ConsistencyGroup()
cg.from_group(group)
for vol in volumes:
vol.consistencygroup_id = vol.group_id
vol.consistencygroup = cg
return cg, volumes
def _remove_consistencygroup_id_from_volumes(self, volumes):
if not volumes:
return
for vol in volumes:
vol.consistencygroup_id = None
vol.consistencygroup = None
def _convert_group_snapshot_to_cgsnapshot(self, group_snapshot, snapshots,
ctxt):
if not group_snapshot:
return None, None
cgsnap = cgsnapshot.CGSnapshot()
cgsnap.from_group_snapshot(group_snapshot)
grp = objects.Group.get_by_id(ctxt, group_snapshot.group_id)
cg, __ = self._convert_group_to_cg(grp, [])
cgsnap.consistencygroup = cg
for snap in snapshots:
snap.cgsnapshot_id = snap.group_snapshot_id
snap.cgsnapshot = cgsnap
return cgsnap, snapshots
def _remove_cgsnapshot_id_from_snapshots(self, snapshots):
if not snapshots:
return
for snap in snapshots:
snap.cgsnapshot_id = None
snap.cgsnapshot = None
def _create_group_generic(self, context, group):
model_update = {'status': fields.GroupStatus.AVAILABLE,
'created_at': timeutils.utcnow()}
return model_update
def _delete_group_generic(self, context, group, volumes):
model_update = {'status': group.status}
volume_model_updates = []
for volume_ref in volumes:
volume_model_update = {'id': volume_ref.id}
try:
self.driver.remove_export(context, volume_ref)
self.driver.delete_volume(volume_ref)
volume_model_update['status'] = 'deleted'
except exception.VolumeIsBusy:
volume_model_update['status'] = 'available'
except Exception:
volume_model_update['status'] = 'error'
model_update['status'] = fields.GroupStatus.ERROR
volume_model_updates.append(volume_model_update)
return model_update, volume_model_updates
def _update_group_generic(self, context, group,
add_volumes=None, remove_volumes=None):
return None, None, None
def _collect_volumes_for_group(self, context, group, volumes, add=True):
if add:
valid_status = VALID_ADD_VOL_TO_GROUP_STATUS
else:
valid_status = VALID_REMOVE_VOL_FROM_GROUP_STATUS
volumes_ref = []
if not volumes:
return volumes_ref
for add_vol in volumes.split(','):
try:
add_vol_ref = objects.Volume.get_by_id(context, add_vol)
except exception.VolumeNotFound:
LOG.error("Update group "
"failed to %(op)s volume-%(volume_id)s: "
"VolumeNotFound.",
{'volume_id': add_vol,
'op': 'add' if add else 'remove'},
resource={'type': 'group',
'id': group.id})
raise
if add_vol_ref.status not in valid_status:
msg = (_("Can not %(op)s volume %(volume_id)s to "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': add_vol_ref.id,
'group_id': group.id,
'status': add_vol_ref.status,
'valid': valid_status,
'op': 'add' if add else 'remove'})
raise exception.InvalidVolume(reason=msg)
if add:
self._check_is_our_resource(add_vol_ref)
volumes_ref.append(add_vol_ref)
return volumes_ref
def update_group(self, context, group,
add_volumes=None, remove_volumes=None):
add_volumes_ref = self._collect_volumes_for_group(context,
group,
add_volumes,
add=True)
remove_volumes_ref = self._collect_volumes_for_group(context,
group,
remove_volumes,
add=False)
self._notify_about_group_usage(
context, group, "update.start")
try:
utils.require_driver_initialized(self.driver)
try:
model_update, add_volumes_update, remove_volumes_update = (
self.driver.update_group(
context, group,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
except NotImplementedError:
if not group_types.is_default_cgsnapshot_type(
group.group_type_id):
model_update, add_volumes_update, remove_volumes_update = (
self._update_group_generic(
context, group,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
else:
cg, remove_volumes_ref = self._convert_group_to_cg(
group, remove_volumes_ref)
model_update, add_volumes_update, remove_volumes_update = (
self.driver.update_consistencygroup(
context, cg,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
self._remove_consistencygroup_id_from_volumes(
remove_volumes_ref)
volumes_to_update = []
if add_volumes_update:
volumes_to_update.extend(add_volumes_update)
if remove_volumes_update:
volumes_to_update.extend(remove_volumes_update)
self.db.volumes_update(context, volumes_to_update)
if model_update:
if model_update['status'] in (
[fields.GroupStatus.ERROR]):
msg = (_('Error occurred when updating group '
'%s.') % group.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
group.update(model_update)
group.save()
except Exception as e:
with excutils.save_and_reraise_exception():
if isinstance(e, exception.VolumeDriverException):
LOG.error("Error occurred in the volume driver when "
"updating group %(group_id)s.",
{'group_id': group.id})
else:
LOG.error("Failed to update group %(group_id)s.",
{'group_id': group.id})
group.status = fields.GroupStatus.ERROR
group.save()
for add_vol in add_volumes_ref:
add_vol.status = 'error'
add_vol.save()
for rem_vol in remove_volumes_ref:
if isinstance(e, exception.VolumeDriverException):
rem_vol.consistencygroup_id = None
rem_vol.consistencygroup = None
rem_vol.status = 'error'
rem_vol.save()
for add_vol in add_volumes_ref:
add_vol.group_id = group.id
add_vol.save()
for rem_vol in remove_volumes_ref:
rem_vol.group_id = None
rem_vol.save()
group.status = fields.GroupStatus.AVAILABLE
group.save()
self._notify_about_group_usage(
context, group, "update.end")
LOG.info("Update group completed successfully.",
resource={'type': 'group',
'id': group.id})
def create_group_snapshot(self, context, group_snapshot):
caller_context = context
context = context.elevated()
LOG.info("GroupSnapshot %s: creating.", group_snapshot.id)
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
context, group_snapshot.id)
self._notify_about_group_snapshot_usage(
context, group_snapshot, "create.start")
snapshots_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
LOG.debug("Group snapshot %(grp_snap_id)s: creating.",
{'grp_snap_id': group_snapshot.id})
group_snapshot.context = caller_context
for snapshot in snapshots:
snapshot.context = caller_context
try:
model_update, snapshots_model_update = (
self.driver.create_group_snapshot(context, group_snapshot,
snapshots))
except NotImplementedError:
if not group_types.is_default_cgsnapshot_type(
group_snapshot.group_type_id):
model_update, snapshots_model_update = (
self._create_group_snapshot_generic(
context, group_snapshot, snapshots))
else:
cgsnapshot, snapshots = (
self._convert_group_snapshot_to_cgsnapshot(
group_snapshot, snapshots, context))
model_update, snapshots_model_update = (
self.driver.create_cgsnapshot(context, cgsnapshot,
snapshots))
self._remove_cgsnapshot_id_from_snapshots(snapshots)
if snapshots_model_update:
for snap_model in snapshots_model_update:
snap_id = snap_model.pop('id')
snap_obj = objects.Snapshot.get_by_id(context, snap_id)
snap_obj.update(snap_model)
snap_obj.save()
if (snap_model['status'] in [
fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
[fields.GroupSnapshotStatus.ERROR_DELETING,
fields.GroupSnapshotStatus.ERROR]):
model_update['status'] = snap_model['status']
if model_update:
if model_update['status'] == fields.GroupSnapshotStatus.ERROR:
msg = (_('Error occurred when creating group_snapshot '
'%s.') % group_snapshot.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
group_snapshot.update(model_update)
group_snapshot.save()
except exception.CinderException:
with excutils.save_and_reraise_exception():
group_snapshot.status = fields.GroupSnapshotStatus.ERROR
group_snapshot.save()
self._remove_cgsnapshot_id_from_snapshots(snapshots)
if not snapshots_model_update:
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots:
volume_id = snapshot.volume_id
snapshot_id = snapshot.id
vol_obj = objects.Volume.get_by_id(context, volume_id)
if vol_obj.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot_id, volume_id)
except exception.GlanceMetadataNotFound:
pass
except exception.CinderException as ex:
LOG.error("Failed updating %(snapshot_id)s"
" metadata using the provided volumes"
" %(volume_id)s metadata.",
{'volume_id': volume_id,
'snapshot_id': snapshot_id})
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
raise exception.MetadataCopyFailure(
reason=six.text_type(ex))
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.progress = '100%'
snapshot.save()
group_snapshot.status = fields.GroupSnapshotStatus.AVAILABLE
group_snapshot.save()
LOG.info("group_snapshot %s: created successfully",
group_snapshot.id)
self._notify_about_group_snapshot_usage(
context, group_snapshot, "create.end")
return group_snapshot
def _create_group_snapshot_generic(self, context, group_snapshot,
snapshots):
model_update = {'status': 'available'}
snapshot_model_updates = []
for snapshot in snapshots:
snapshot_model_update = {'id': snapshot.id}
try:
driver_update = self.driver.create_snapshot(snapshot)
if driver_update:
driver_update.pop('id', None)
snapshot_model_update.update(driver_update)
if 'status' not in snapshot_model_update:
snapshot_model_update['status'] = (
fields.SnapshotStatus.AVAILABLE)
except Exception:
snapshot_model_update['status'] = (
fields.SnapshotStatus.ERROR)
model_update['status'] = 'error'
snapshot_model_updates.append(snapshot_model_update)
return model_update, snapshot_model_updates
def _delete_group_snapshot_generic(self, context, group_snapshot,
snapshots):
model_update = {'status': group_snapshot.status}
snapshot_model_updates = []
for snapshot in snapshots:
snapshot_model_update = {'id': snapshot.id}
try:
self.driver.delete_snapshot(snapshot)
snapshot_model_update['status'] = (
fields.SnapshotStatus.DELETED)
except exception.SnapshotIsBusy:
snapshot_model_update['status'] = (
fields.SnapshotStatus.AVAILABLE)
except Exception:
snapshot_model_update['status'] = (
fields.SnapshotStatus.ERROR)
model_update['status'] = 'error'
snapshot_model_updates.append(snapshot_model_update)
return model_update, snapshot_model_updates
def delete_group_snapshot(self, context, group_snapshot):
caller_context = context
context = context.elevated()
project_id = group_snapshot.project_id
LOG.info("group_snapshot %s: deleting", group_snapshot.id)
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
context, group_snapshot.id)
self._notify_about_group_snapshot_usage(
context, group_snapshot, "delete.start")
snapshots_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
LOG.debug("group_snapshot %(grp_snap_id)s: deleting",
{'grp_snap_id': group_snapshot.id})
group_snapshot.context = caller_context
for snapshot in snapshots:
snapshot.context = caller_context
try:
model_update, snapshots_model_update = (
self.driver.delete_group_snapshot(context, group_snapshot,
snapshots))
except NotImplementedError:
if not group_types.is_default_cgsnapshot_type(
group_snapshot.group_type_id):
model_update, snapshots_model_update = (
self._delete_group_snapshot_generic(
context, group_snapshot, snapshots))
else:
cgsnapshot, snapshots = (
self._convert_group_snapshot_to_cgsnapshot(
group_snapshot, snapshots, context))
model_update, snapshots_model_update = (
self.driver.delete_cgsnapshot(context, cgsnapshot,
snapshots))
self._remove_cgsnapshot_id_from_snapshots(snapshots)
if snapshots_model_update:
for snap_model in snapshots_model_update:
snap = next((item for item in snapshots if
item.id == snap_model['id']), None)
if snap:
snap_model.pop('id')
snap.update(snap_model)
snap.save()
if (snap_model['status'] in
[fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = snap_model['status']
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Error occurred when deleting group_snapshot '
'%s.') % group_snapshot.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
else:
group_snapshot.update(model_update)
group_snapshot.save()
except exception.CinderException:
with excutils.save_and_reraise_exception():
group_snapshot.status = fields.GroupSnapshotStatus.ERROR
group_snapshot.save()
if not snapshots_model_update:
self._remove_cgsnapshot_id_from_snapshots(snapshots)
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots:
try:
reserve_opts = {'snapshots': -1}
if not CONF.no_snapshot_gb_quota:
reserve_opts['gigabytes'] = -snapshot.volume_size
volume_ref = objects.Volume.get_by_id(context,
snapshot.volume_id)
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception("Failed to update usages deleting snapshot")
self.db.volume_glance_metadata_delete_by_snapshot(context,
snapshot.id)
snapshot.destroy()
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
group_snapshot.destroy()
LOG.info("group_snapshot %s: deleted successfully",
group_snapshot.id)
self._notify_about_group_snapshot_usage(context, group_snapshot,
"delete.end",
snapshots)
def update_migrated_volume(self, ctxt, volume, new_volume, volume_status):
model_update = None
model_update_default = {'_name_id': new_volume.name_id,
'provider_location':
new_volume.provider_location}
try:
model_update = self.driver.update_migrated_volume(ctxt,
volume,
new_volume,
volume_status)
except NotImplementedError:
model_update = model_update_default
if model_update:
model_update_default.update(model_update)
# Need to convert 'metadata' and 'admin_metadata' since
# they are not keys of volume, their corresponding keys are
# 'volume_metadata' and 'volume_admin_metadata'.
model_update_new = dict()
for key in model_update:
if key == 'metadata':
if volume.get('volume_metadata'):
model_update_new[key] = {
metadata['key']: metadata['value']
for metadata in volume.volume_metadata}
elif key == 'admin_metadata':
model_update_new[key] = {
metadata['key']: metadata['value']
for metadata in volume.volume_admin_metadata}
else:
model_update_new[key] = volume[key]
with new_volume.obj_as_admin():
new_volume.update(model_update_new)
new_volume.save()
with volume.obj_as_admin():
volume.update(model_update_default)
volume.save()
# Replication V2.1 and a/a method
def failover(self, context, secondary_backend_id=None):
updates = {}
repl_status = fields.ReplicationStatus
service = self._get_service()
# TODO(geguileo): We should optimize these updates by doing them
# directly on the DB with just 3 queries, one to change the volumes
# another to change all the snapshots, and another to get replicated
# volumes.
# Change non replicated volumes and their snapshots to error if we are
# failing over, leave them as they are for failback
volumes = self._get_my_volumes(context)
replicated_vols = []
for volume in volumes:
if volume.replication_status not in (repl_status.DISABLED,
repl_status.NOT_CAPABLE):
replicated_vols.append(volume)
elif secondary_backend_id != self.FAILBACK_SENTINEL:
volume.previous_status = volume.status
volume.status = 'error'
volume.replication_status = repl_status.NOT_CAPABLE
volume.save()
for snapshot in volume.snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
volume_update_list = None
group_update_list = None
try:
# For non clustered we can call v2.1 failover_host, but for
# clustered we call a/a failover method. We know a/a method
# exists because BaseVD class wouldn't have started if it didn't.
failover = getattr(self.driver,
'failover' if service.is_clustered
else 'failover_host')
# expected form of volume_update_list:
# [{volume_id: <cinder-volid>, updates: {'provider_id': xxxx....}},
# {volume_id: <cinder-volid>, updates: {'provider_id': xxxx....}}]
# It includes volumes in replication groups and those not in them
# expected form of group_update_list:
# [{group_id: <cinder-grpid>, updates: {'xxxx': xxxx....}},
# {group_id: <cinder-grpid>, updates: {'xxxx': xxxx....}}]
filters = self._get_cluster_or_host_filters()
groups = objects.GroupList.get_all_replicated(context,
filters=filters)
active_backend_id, volume_update_list, group_update_list = (
failover(context,
replicated_vols,
secondary_id=secondary_backend_id,
groups=groups))
try:
update_data = {u['volume_id']: u['updates']
for u in volume_update_list}
except KeyError:
msg = "Update list, doesn't include volume_id"
raise exception.ProgrammingError(reason=msg)
try:
update_group_data = {g['group_id']: g['updates']
for g in group_update_list}
except KeyError:
msg = "Update list, doesn't include group_id"
raise exception.ProgrammingError(reason=msg)
except Exception as exc:
# NOTE(jdg): Drivers need to be aware if they fail during
# a failover sequence, we're expecting them to cleanup
# target
if isinstance(exc, exception.InvalidReplicationTarget):
log_method = LOG.error
# Preserve the replication_status: Status should be failed over
# if we were failing back or if we were failing over from one
# secondary to another secondary. In both cases
# active_backend_id will be set.
if service.active_backend_id:
updates['replication_status'] = repl_status.FAILED_OVER
else:
updates['replication_status'] = repl_status.ENABLED
else:
log_method = LOG.exception
updates.update(disabled=True,
replication_status=repl_status.FAILOVER_ERROR)
log_method("Error encountered during failover on host: %(host)s "
"to %(backend_id)s: %(error)s",
{'host': self.host, 'backend_id': secondary_backend_id,
'error': exc})
# We dump the update list for manual recovery
LOG.error('Failed update_list is: %s', volume_update_list)
self.finish_failover(context, service, updates)
return
if secondary_backend_id == "default":
updates['replication_status'] = repl_status.ENABLED
updates['active_backend_id'] = ''
updates['disabled'] = service.frozen
updates['disabled_reason'] = 'frozen' if service.frozen else ''
else:
updates['replication_status'] = repl_status.FAILED_OVER
updates['active_backend_id'] = active_backend_id
updates['disabled'] = True
updates['disabled_reason'] = 'failed-over'
self.finish_failover(context, service, updates)
for volume in replicated_vols:
update = update_data.get(volume.id, {})
if update.get('status', '') == 'error':
update['replication_status'] = repl_status.FAILOVER_ERROR
elif update.get('replication_status') in (None,
repl_status.FAILED_OVER):
update['replication_status'] = updates['replication_status']
if update['replication_status'] == repl_status.FAILOVER_ERROR:
update.setdefault('status', 'error')
# Set all volume snapshots to error
for snapshot in volume.snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
if 'status' in update:
update['previous_status'] = volume.status
volume.update(update)
volume.save()
for grp in groups:
update = update_group_data.get(grp.id, {})
if update.get('status', '') == 'error':
update['replication_status'] = repl_status.FAILOVER_ERROR
elif update.get('replication_status') in (None,
repl_status.FAILED_OVER):
update['replication_status'] = updates['replication_status']
if update['replication_status'] == repl_status.FAILOVER_ERROR:
update.setdefault('status', 'error')
grp.update(update)
grp.save()
LOG.info("Failed over to replication target successfully.")
# TODO(geguileo): In P - remove this
failover_host = failover
def finish_failover(self, context, service, updates):
# If the service is clustered, broadcast the service changes to all
# volume services, including this one.
if service.is_clustered:
# We have to update the cluster with the same data, and we do it
# before broadcasting the failover_completed RPC call to prevent
# races with services that may be starting..
for key, value in updates.items():
setattr(service.cluster, key, value)
service.cluster.save()
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.failover_completed(context, service, updates)
else:
service.update(updates)
service.save()
def failover_completed(self, context, updates):
service = self._get_service()
service.update(updates)
try:
self.driver.failover_completed(context, service.active_backend_id)
except Exception:
msg = _('Driver reported error during replication failover '
'completion.')
LOG.exception(msg)
service.disabled = True
service.disabled_reason = msg
service.replication_status = (
fields.ReplicationStatus.ERROR)
service.save()
def freeze_host(self, context):
# TODO(jdg): Return from driver? or catch?
# Update status column in service entry
try:
self.driver.freeze_backend(context)
except exception.VolumeDriverException:
# NOTE(jdg): In the case of freeze, we don't really
LOG.warning('Error encountered on Cinder backend during '
'freeze operation, service is frozen, however '
'notification to driver has failed.')
service = self._get_service()
service.disabled = True
service.disabled_reason = "frozen"
service.save()
LOG.info("Set backend status to frozen successfully.")
return True
def thaw_host(self, context):
try:
self.driver.thaw_backend(context)
except exception.VolumeDriverException:
LOG.error('Error encountered on Cinder backend during '
'thaw operation, service will remain frozen.')
return False
service = self._get_service()
service.disabled = False
service.disabled_reason = ""
service.save()
LOG.info("Thawed backend successfully.")
return True
def manage_existing_snapshot(self, ctxt, snapshot, ref=None):
LOG.debug('manage_existing_snapshot: managing %s.', ref)
try:
flow_engine = manage_existing_snapshot.get_flow(
ctxt,
self.db,
self.driver,
self.host,
snapshot.id,
ref)
except Exception:
LOG.exception("Failed to create manage_existing flow: "
"%(object_type)s %(object_id)s.",
{'object_type': 'snapshot',
'object_id': snapshot.id})
raise exception.CinderException(
_("Failed to create manage existing flow."))
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
return snapshot.id
def get_manageable_snapshots(self, ctxt, marker, limit, offset,
sort_keys, sort_dirs, want_objects=False):
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception("Listing manageable snapshots failed, due "
"to uninitialized driver.")
cinder_snapshots = self._get_my_snapshots(ctxt)
try:
driver_entries = self.driver.get_manageable_snapshots(
cinder_snapshots, marker, limit, offset, sort_keys, sort_dirs)
if want_objects:
driver_entries = (objects.ManageableSnapshotList.
from_primitives(ctxt, driver_entries))
except AttributeError:
LOG.debug('Driver does not support listing manageable snapshots.')
return []
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception("Listing manageable snapshots failed, due "
"to driver error.")
return driver_entries
def get_capabilities(self, context, discover):
if discover:
self.driver.init_capabilities()
capabilities = self.driver.capabilities
LOG.debug("Obtained capabilities list: %s.", capabilities)
return capabilities
@utils.trace
def get_backup_device(self, ctxt, backup, want_objects=False,
async_call=False):
try:
(backup_device, is_snapshot) = (
self.driver.get_backup_device(ctxt, backup))
except Exception as ex:
if async_call:
LOG.exception("Failed to get backup device. "
"Calling backup continue_backup to cleanup")
rpcapi = backup_rpcapi.BackupAPI()
rpcapi.continue_backup(ctxt, backup, backup_device=None)
return
else:
while excutils.save_and_reraise_exception():
LOG.exception("Failed to get backup device.")
secure_enabled = self.driver.secure_file_operations_enabled()
backup_device_dict = {'backup_device': backup_device,
'secure_enabled': secure_enabled,
'is_snapshot': is_snapshot, }
backup_device = (
objects.BackupDeviceInfo.from_primitive(backup_device_dict, ctxt)
if want_objects else backup_device_dict)
if async_call:
LOG.info("Calling backup continue_backup for: {}".format(backup))
rpcapi = backup_rpcapi.BackupAPI()
rpcapi.continue_backup(ctxt, backup, backup_device)
else:
# so we fallback to returning the value itself.
return backup_device
def secure_file_operations_enabled(self, ctxt, volume):
secure_enabled = self.driver.secure_file_operations_enabled()
return secure_enabled
def _connection_create(self, ctxt, volume, attachment, connector):
try:
self.driver.validate_connector(connector)
except exception.InvalidConnectorException as err:
raise exception.InvalidInput(reason=six.text_type(err))
except Exception as err:
err_msg = (_("Validate volume connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.error(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
model_update = self.driver.create_export(ctxt.elevated(),
volume, connector)
except exception.CinderException as ex:
err_msg = (_("Create export for volume failed (%s).") % ex.msg)
LOG.exception(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
if model_update:
volume.update(model_update)
volume.save()
except exception.CinderException as ex:
LOG.exception("Model update failed.", resource=volume)
raise exception.ExportFailure(reason=six.text_type(ex))
try:
conn_info = self.driver.initialize_connection(volume, connector)
except exception.ConnectorRejected:
with excutils.save_and_reraise_exception():
LOG.info("The connector was rejected by the volume driver.")
except Exception as err:
err_msg = (_("Driver initialize connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.exception(err_msg, resource=volume)
self.driver.remove_export(ctxt.elevated(), volume)
raise exception.VolumeBackendAPIException(data=err_msg)
conn_info = self._parse_connection_options(ctxt, volume, conn_info)
# NOTE(jdg): Get rid of the nested dict (data key)
conn_data = conn_info.pop('data', {})
connection_info = conn_data.copy()
connection_info.update(conn_info)
values = {'volume_id': volume.id,
'attach_status': 'attaching',
'connector': jsonutils.dumps(connector)}
# TODO(mriedem): Use VolumeAttachment.save() here.
self.db.volume_attachment_update(ctxt, attachment.id, values)
connection_info['attachment_id'] = attachment.id
return connection_info
def attachment_update(self,
context,
vref,
connector,
attachment_id):
mode = connector.get('mode', 'rw')
self._notify_about_volume_usage(context, vref, 'attach.start')
attachment_ref = objects.VolumeAttachment.get_by_id(context,
attachment_id)
# Check to see if a mode parameter was set during attachment-create;
# this seems kinda wonky, but it's how we're keeping back compatability
# with the use of connector.mode for now. In other words, we're
if attachment_ref.attach_mode != 'null':
mode = attachment_ref.attach_mode
connector['mode'] = mode
connection_info = self._connection_create(context,
vref,
attachment_ref,
connector)
try:
utils.require_driver_initialized(self.driver)
self.driver.attach_volume(context,
vref,
attachment_ref.instance_uuid,
connector.get('host', ''),
connector.get('mountpoint', 'na'))
except Exception as err:
self.message_api.create(
context, message_field.Action.UPDATE_ATTACHMENT,
resource_uuid=vref.id,
exception=err)
with excutils.save_and_reraise_exception():
self.db.volume_attachment_update(
context, attachment_ref.id,
{'attach_status':
fields.VolumeAttachStatus.ERROR_ATTACHING})
self.db.volume_attached(context.elevated(),
attachment_ref.id,
attachment_ref.instance_uuid,
connector.get('host', ''),
connector.get('mountpoint', 'na'),
mode,
False)
vref.refresh()
attachment_ref.refresh()
LOG.info("attachment_update completed successfully.",
resource=vref)
return connection_info
def _connection_terminate(self, context, volume,
attachment, force=False):
utils.require_driver_initialized(self.driver)
connector = attachment.connector
if not connector and not force:
# in nova, and a shelved offloaded server is not on a compute host,
# which means the attachment was made without a host connector,
# so if we don't have a connector we can't terminate a connection
# that was never actually made to the storage backend, so just
# log a message and exit.
LOG.debug('No connector for attachment %s; skipping storage '
'backend terminate_connection call.', attachment.id)
# None indicates we don't know and don't care.
return None
try:
shared_connections = self.driver.terminate_connection(volume,
connector,
force=force)
if not isinstance(shared_connections, bool):
shared_connections = False
except Exception as err:
err_msg = (_('Terminate volume connection failed: %(err)s')
% {'err': six.text_type(err)})
LOG.exception(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
LOG.info("Terminate volume connection completed successfully.",
resource=volume)
# NOTE(jdg): Return True/False if there are other outstanding
# attachments that share this connection. If True should signify
# caller to preserve the actual host connection (work should be
# done in the brick connector as it has the knowledge of what's
return shared_connections
def attachment_delete(self, context, attachment_id, vref):
attachment_ref = objects.VolumeAttachment.get_by_id(context,
attachment_id)
if not attachment_ref:
for attachment in VA_LIST.get_all_by_volume_id(context, vref.id):
self._do_attachment_delete(context, vref, attachment)
else:
self._do_attachment_delete(context, vref, attachment_ref)
def _do_attachment_delete(self, context, vref, attachment):
utils.require_driver_initialized(self.driver)
self._notify_about_volume_usage(context, vref, "detach.start")
has_shared_connection = self._connection_terminate(context,
vref,
attachment)
try:
LOG.debug('Deleting attachment %(attachment_id)s.',
{'attachment_id': attachment.id},
resource=vref)
self.driver.detach_volume(context, vref, attachment)
if has_shared_connection is not None and not has_shared_connection:
self.driver.remove_export(context.elevated(), vref)
except Exception:
self.db.volume_attachment_update(
context, attachment.get('id'),
{'attach_status': fields.VolumeAttachStatus.ERROR_DETACHING})
else:
self.db.volume_detached(context.elevated(), vref.id,
attachment.get('id'))
self.db.volume_admin_metadata_delete(context.elevated(),
vref.id,
'attached_mode')
self._notify_about_volume_usage(context, vref, "detach.end")
def enable_replication(self, ctxt, group):
group.refresh()
if group.replication_status != fields.ReplicationStatus.ENABLING:
msg = _("Replication status in group %s is not "
"enabling. Cannot enable replication.") % group.id
LOG.error(msg)
raise exception.InvalidGroup(reason=msg)
volumes = group.volumes
for vol in volumes:
vol.refresh()
if vol.replication_status != fields.ReplicationStatus.ENABLING:
msg = _("Replication status in volume %s is not "
"enabling. Cannot enable replication.") % vol.id
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
self._notify_about_group_usage(
ctxt, group, "enable_replication.start")
volumes_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
model_update, volumes_model_update = (
self.driver.enable_replication(ctxt, group, volumes))
if volumes_model_update:
for update in volumes_model_update:
vol_obj = objects.Volume.get_by_id(ctxt, update['id'])
vol_obj.update(update)
vol_obj.save()
if (update.get('replication_status') ==
fields.ReplicationStatus.ERROR and
model_update.get('replication_status') !=
fields.ReplicationStatus.ERROR):
model_update['replication_status'] = update.get(
'replication_status')
if model_update:
if (model_update.get('replication_status') ==
fields.ReplicationStatus.ERROR):
msg = _('Enable replication failed.')
LOG.error(msg,
resource={'type': 'group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except exception.CinderException as ex:
group.status = fields.GroupStatus.ERROR
group.replication_status = fields.ReplicationStatus.ERROR
group.save()
if not volumes_model_update:
for vol in volumes:
vol.status = 'error'
vol.replication_status = fields.ReplicationStatus.ERROR
vol.save()
err_msg = _("Enable replication group failed: "
"%s.") % six.text_type(ex)
raise exception.ReplicationGroupError(reason=err_msg,
group_id=group.id)
for vol in volumes:
vol.replication_status = fields.ReplicationStatus.ENABLED
vol.save()
group.replication_status = fields.ReplicationStatus.ENABLED
group.save()
self._notify_about_group_usage(
ctxt, group, "enable_replication.end", volumes)
LOG.info("Enable replication completed successfully.",
resource={'type': 'group',
'id': group.id})
def disable_replication(self, ctxt, group):
group.refresh()
if group.replication_status != fields.ReplicationStatus.DISABLING:
msg = _("Replication status in group %s is not "
"disabling. Cannot disable replication.") % group.id
LOG.error(msg)
raise exception.InvalidGroup(reason=msg)
volumes = group.volumes
for vol in volumes:
vol.refresh()
if (vol.replication_status !=
fields.ReplicationStatus.DISABLING):
msg = _("Replication status in volume %s is not "
"disabling. Cannot disable replication.") % vol.id
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
self._notify_about_group_usage(
ctxt, group, "disable_replication.start")
volumes_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
model_update, volumes_model_update = (
self.driver.disable_replication(ctxt, group, volumes))
if volumes_model_update:
for update in volumes_model_update:
vol_obj = objects.Volume.get_by_id(ctxt, update['id'])
vol_obj.update(update)
vol_obj.save()
if (update.get('replication_status') ==
fields.ReplicationStatus.ERROR and
model_update.get('replication_status') !=
fields.ReplicationStatus.ERROR):
model_update['replication_status'] = update.get(
'replication_status')
if model_update:
if (model_update.get('replication_status') ==
fields.ReplicationStatus.ERROR):
msg = _('Disable replication failed.')
LOG.error(msg,
resource={'type': 'group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except exception.CinderException as ex:
group.status = fields.GroupStatus.ERROR
group.replication_status = fields.ReplicationStatus.ERROR
group.save()
if not volumes_model_update:
for vol in volumes:
vol.status = 'error'
vol.replication_status = fields.ReplicationStatus.ERROR
vol.save()
err_msg = _("Disable replication group failed: "
"%s.") % six.text_type(ex)
raise exception.ReplicationGroupError(reason=err_msg,
group_id=group.id)
for vol in volumes:
vol.replication_status = fields.ReplicationStatus.DISABLED
vol.save()
group.replication_status = fields.ReplicationStatus.DISABLED
group.save()
self._notify_about_group_usage(
ctxt, group, "disable_replication.end", volumes)
LOG.info("Disable replication completed successfully.",
resource={'type': 'group',
'id': group.id})
def failover_replication(self, ctxt, group, allow_attached_volume=False,
secondary_backend_id=None):
group.refresh()
if group.replication_status != fields.ReplicationStatus.FAILING_OVER:
msg = _("Replication status in group %s is not "
"failing-over. Cannot failover replication.") % group.id
LOG.error(msg)
raise exception.InvalidGroup(reason=msg)
volumes = group.volumes
for vol in volumes:
vol.refresh()
if vol.status == 'in-use' and not allow_attached_volume:
msg = _("Volume %s is attached but allow_attached_volume flag "
"is False. Cannot failover replication.") % vol.id
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
if (vol.replication_status !=
fields.ReplicationStatus.FAILING_OVER):
msg = _("Replication status in volume %s is not "
"failing-over. Cannot failover replication.") % vol.id
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
self._notify_about_group_usage(
ctxt, group, "failover_replication.start")
volumes_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
model_update, volumes_model_update = (
self.driver.failover_replication(
ctxt, group, volumes, secondary_backend_id))
if volumes_model_update:
for update in volumes_model_update:
vol_obj = objects.Volume.get_by_id(ctxt, update['id'])
vol_obj.update(update)
vol_obj.save()
if (update.get('replication_status') ==
fields.ReplicationStatus.ERROR and
model_update.get('replication_status') !=
fields.ReplicationStatus.ERROR):
model_update['replication_status'] = update.get(
'replication_status')
if model_update:
if (model_update.get('replication_status') ==
fields.ReplicationStatus.ERROR):
msg = _('Failover replication failed.')
LOG.error(msg,
resource={'type': 'group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except exception.CinderException as ex:
group.status = fields.GroupStatus.ERROR
group.replication_status = fields.ReplicationStatus.ERROR
group.save()
if not volumes_model_update:
for vol in volumes:
vol.status = 'error'
vol.replication_status = fields.ReplicationStatus.ERROR
vol.save()
err_msg = _("Failover replication group failed: "
"%s.") % six.text_type(ex)
raise exception.ReplicationGroupError(reason=err_msg,
group_id=group.id)
for vol in volumes:
if secondary_backend_id == "default":
vol.replication_status = fields.ReplicationStatus.ENABLED
else:
vol.replication_status = (
fields.ReplicationStatus.FAILED_OVER)
vol.save()
if secondary_backend_id == "default":
group.replication_status = fields.ReplicationStatus.ENABLED
else:
group.replication_status = fields.ReplicationStatus.FAILED_OVER
group.save()
self._notify_about_group_usage(
ctxt, group, "failover_replication.end", volumes)
LOG.info("Failover replication completed successfully.",
resource={'type': 'group',
'id': group.id})
def list_replication_targets(self, ctxt, group):
replication_targets = []
try:
group.refresh()
if self.configuration.replication_device:
if ctxt.is_admin:
for rep_dev in self.configuration.replication_device:
keys = rep_dev.keys()
dev = {}
for k in keys:
dev[k] = rep_dev[k]
replication_targets.append(dev)
else:
for rep_dev in self.configuration.replication_device:
dev = rep_dev.get('backend_id')
if dev:
replication_targets.append({'backend_id': dev})
except exception.GroupNotFound:
err_msg = (_("Get replication targets failed. Group %s not "
"found.") % group.id)
LOG.exception(err_msg)
raise exception.VolumeBackendAPIException(data=err_msg)
return {'replication_targets': replication_targets}
| true | true |
f715b378abaeeb7ac3904a1e1b60d24e706d5389 | 1,803 | py | Python | tensorflow_federated/python/core/impl/executor_stacks/executor_stack_bindings_test.py | zhihansh/federated-oss | 38cfcb05702ff7297db76d3ccb5f5afef53ca09b | [
"Apache-2.0"
] | 1,918 | 2019-02-22T21:17:28.000Z | 2022-03-30T14:49:53.000Z | tensorflow_federated/python/core/impl/executor_stacks/executor_stack_bindings_test.py | zhihansh/federated-oss | 38cfcb05702ff7297db76d3ccb5f5afef53ca09b | [
"Apache-2.0"
] | 999 | 2019-02-22T21:47:44.000Z | 2022-03-31T11:06:42.000Z | tensorflow_federated/python/core/impl/executor_stacks/executor_stack_bindings_test.py | zhihansh/federated-oss | 38cfcb05702ff7297db76d3ccb5f5afef53ca09b | [
"Apache-2.0"
] | 498 | 2019-02-22T21:17:56.000Z | 2022-03-29T02:54:15.000Z | # Copyright 2021, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from pybind11_abseil import status as absl_status
from tensorflow_federated.python.core.impl.executor_stacks import executor_stack_bindings
from tensorflow_federated.python.core.impl.executors import executor_bindings
from tensorflow_federated.python.core.impl.types import placements
_TARGET_LIST = ['localhost:8000', 'localhost:8001']
_CARDINALITIES = {placements.CLIENTS: 5}
class ExecutorStackBindingsTest(parameterized.TestCase):
@parameterized.named_parameters(('from_target_list', list),
('from_target_tuple', tuple),
('from_target_ndarray', np.array))
def test_executor_construction_raises_no_channels_available(
self, container_constructor):
with self.assertRaisesRegex(absl_status.StatusNotOk, 'UNAVAILABLE'):
executor_stack_bindings.create_remote_executor_stack(
channels=container_constructor([
executor_bindings.create_insecure_grpc_channel(t)
for t in _TARGET_LIST
]),
cardinalities=_CARDINALITIES)
if __name__ == '__main__':
absltest.main()
| 39.195652 | 89 | 0.749307 |
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from pybind11_abseil import status as absl_status
from tensorflow_federated.python.core.impl.executor_stacks import executor_stack_bindings
from tensorflow_federated.python.core.impl.executors import executor_bindings
from tensorflow_federated.python.core.impl.types import placements
_TARGET_LIST = ['localhost:8000', 'localhost:8001']
_CARDINALITIES = {placements.CLIENTS: 5}
class ExecutorStackBindingsTest(parameterized.TestCase):
@parameterized.named_parameters(('from_target_list', list),
('from_target_tuple', tuple),
('from_target_ndarray', np.array))
def test_executor_construction_raises_no_channels_available(
self, container_constructor):
with self.assertRaisesRegex(absl_status.StatusNotOk, 'UNAVAILABLE'):
executor_stack_bindings.create_remote_executor_stack(
channels=container_constructor([
executor_bindings.create_insecure_grpc_channel(t)
for t in _TARGET_LIST
]),
cardinalities=_CARDINALITIES)
if __name__ == '__main__':
absltest.main()
| true | true |
f715b38795d175d33576ab05dea9a3fe41688f13 | 1,863 | py | Python | samples/snippets/detect/label-products.py | glaswasser/python-vision | 706c314a86b8f35c313bb3e907ae84317dca1a0b | [
"Apache-2.0"
] | null | null | null | samples/snippets/detect/label-products.py | glaswasser/python-vision | 706c314a86b8f35c313bb3e907ae84317dca1a0b | [
"Apache-2.0"
] | null | null | null | samples/snippets/detect/label-products.py | glaswasser/python-vision | 706c314a86b8f35c313bb3e907ae84317dca1a0b | [
"Apache-2.0"
] | null | null | null |
from detect import (detect_logos, detect_text)
import pandas as pd
import re
import os
#from __future__ import print_function
from google.cloud import vision
images_path = "C:\\Users\\heinz\\Yagora GmbH\\Ievgen Kyrda - Crawler\\images\\foodnewsgermany_images/"
file_names = os.listdir(os.path.dirname(images_path))
file_paths = [images_path + f for f in file_names]
logos = [detect_logos(f) for f in file_paths]
texts = [detect_text(f)[0].description for f in file_paths]
# remove line break symbols
texts = [x.replace("\n", ", ") for x in texts]
contained = []
#contained[1] = "test"
for i in range(len(logos)): # loop over future rows of df
tmp = []
for j in logos[i]: # for every logo-row, check if in text
if j.lower() in texts[i].lower():
tmp.append(logos[i])
else:
tmp.append(None)
contained.append(tmp)
detect_df = pd.DataFrame(
list(zip(file_names, texts, logos, contained, file_paths)),
columns = ["files", "texts", "logos", "probable_brand", "file_path"]
)
detect_df
# other ideas:
# if logo in existing logos, add logo
from PIL import Image
from io import BytesIO
from IPython.display import HTML
import base64
pd.set_option('display.max_colwidth', -1)
def get_thumbnail(path):
i = Image.open(path)
i.thumbnail((150, 150), Image.LANCZOS)
return i
def image_base64(im):
if isinstance(im, str):
im = get_thumbnail(im)
with BytesIO() as buffer:
im.save(buffer, 'jpeg')
return base64.b64encode(buffer.getvalue()).decode()
def image_formatter(im):
return f'<img src="data:image/jpeg;base64,{image_base64(im)}">'
#dogs['file'] = dogs.id.map(lambda id: f'../input/train/{id}.jpg')
detect_df['image'] = detect_df.file_path.map(lambda f: get_thumbnail(f))
HTML(detect_df.to_html(formatters={'image': image_formatter}, escape=False)) | 26.239437 | 102 | 0.688137 |
from detect import (detect_logos, detect_text)
import pandas as pd
import re
import os
from google.cloud import vision
images_path = "C:\\Users\\heinz\\Yagora GmbH\\Ievgen Kyrda - Crawler\\images\\foodnewsgermany_images/"
file_names = os.listdir(os.path.dirname(images_path))
file_paths = [images_path + f for f in file_names]
logos = [detect_logos(f) for f in file_paths]
texts = [detect_text(f)[0].description for f in file_paths]
texts = [x.replace("\n", ", ") for x in texts]
contained = []
for i in range(len(logos)):
tmp = []
for j in logos[i]:
if j.lower() in texts[i].lower():
tmp.append(logos[i])
else:
tmp.append(None)
contained.append(tmp)
detect_df = pd.DataFrame(
list(zip(file_names, texts, logos, contained, file_paths)),
columns = ["files", "texts", "logos", "probable_brand", "file_path"]
)
detect_df
from PIL import Image
from io import BytesIO
from IPython.display import HTML
import base64
pd.set_option('display.max_colwidth', -1)
def get_thumbnail(path):
i = Image.open(path)
i.thumbnail((150, 150), Image.LANCZOS)
return i
def image_base64(im):
if isinstance(im, str):
im = get_thumbnail(im)
with BytesIO() as buffer:
im.save(buffer, 'jpeg')
return base64.b64encode(buffer.getvalue()).decode()
def image_formatter(im):
return f'<img src="data:image/jpeg;base64,{image_base64(im)}">'
detect_df['image'] = detect_df.file_path.map(lambda f: get_thumbnail(f))
HTML(detect_df.to_html(formatters={'image': image_formatter}, escape=False)) | true | true |
f715b443834e4ea4db5d450ce663e81845d95977 | 749 | py | Python | Python/total-appeal-of-a-string.py | Priyansh2/LeetCode-Solutions | d613da1881ec2416ccbe15f20b8000e36ddf1291 | [
"MIT"
] | 4 | 2018-10-11T17:50:56.000Z | 2018-10-11T21:16:44.000Z | Python/total-appeal-of-a-string.py | Priyansh2/LeetCode-Solutions | d613da1881ec2416ccbe15f20b8000e36ddf1291 | [
"MIT"
] | null | null | null | Python/total-appeal-of-a-string.py | Priyansh2/LeetCode-Solutions | d613da1881ec2416ccbe15f20b8000e36ddf1291 | [
"MIT"
] | 4 | 2018-10-11T18:50:32.000Z | 2018-10-12T00:04:09.000Z | # Time: O(n)
# Space: O(26)
# combinatorics
class Solution(object):
def appealSum(self, s):
"""
:type s: str
:rtype: int
"""
result = curr = 0
lookup = [-1]*26
for i, c in enumerate(s):
result += (i-lookup[ord(c)-ord('a')])*(len(s)-i)
lookup[ord(c)-ord('a')] = i
return result
# Time: O(n)
# Space: O(26)
# counting
class Solution2(object):
def appealSum(self, s):
"""
:type s: str
:rtype: int
"""
result = cnt = 0
lookup = [-1]*26
for i, c in enumerate(s):
cnt += i-lookup[ord(c)-ord('a')]
lookup[ord(c)-ord('a')] = i
result += cnt
return result
| 20.805556 | 60 | 0.445928 |
class Solution(object):
def appealSum(self, s):
result = curr = 0
lookup = [-1]*26
for i, c in enumerate(s):
result += (i-lookup[ord(c)-ord('a')])*(len(s)-i)
lookup[ord(c)-ord('a')] = i
return result
class Solution2(object):
def appealSum(self, s):
result = cnt = 0
lookup = [-1]*26
for i, c in enumerate(s):
cnt += i-lookup[ord(c)-ord('a')]
lookup[ord(c)-ord('a')] = i
result += cnt
return result
| true | true |
f715b4574d66756d1158d3deb81d9fa1b677cc30 | 3,454 | py | Python | huaweicloud-sdk-gaussdbfornosql/huaweicloudsdkgaussdbfornosql/v3/model/batch_tag_action_request_body.py | NQLoong/huaweicloud-sdk-python-v3 | 677944a0b722147c6e105c53df9110724d64152a | [
"Apache-2.0"
] | 1 | 2021-11-03T07:54:50.000Z | 2021-11-03T07:54:50.000Z | huaweicloud-sdk-gaussdbfornosql/huaweicloudsdkgaussdbfornosql/v3/model/batch_tag_action_request_body.py | mawenbo-huawei/huaweicloud-sdk-python-v3 | 677944a0b722147c6e105c53df9110724d64152a | [
"Apache-2.0"
] | null | null | null | huaweicloud-sdk-gaussdbfornosql/huaweicloudsdkgaussdbfornosql/v3/model/batch_tag_action_request_body.py | mawenbo-huawei/huaweicloud-sdk-python-v3 | 677944a0b722147c6e105c53df9110724d64152a | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
import pprint
import re
import six
class BatchTagActionRequestBody:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'action': 'str',
'tags': 'list[BatchTagActionTagOption]'
}
attribute_map = {
'action': 'action',
'tags': 'tags'
}
def __init__(self, action=None, tags=None):
"""BatchTagActionRequestBody - a model defined in huaweicloud sdk"""
self._action = None
self._tags = None
self.discriminator = None
self.action = action
self.tags = tags
@property
def action(self):
"""Gets the action of this BatchTagActionRequestBody.
操作标识。取值: - create,表示添加标签。 - delete,表示删除标签。
:return: The action of this BatchTagActionRequestBody.
:rtype: str
"""
return self._action
@action.setter
def action(self, action):
"""Sets the action of this BatchTagActionRequestBody.
操作标识。取值: - create,表示添加标签。 - delete,表示删除标签。
:param action: The action of this BatchTagActionRequestBody.
:type: str
"""
self._action = action
@property
def tags(self):
"""Gets the tags of this BatchTagActionRequestBody.
标签列表。
:return: The tags of this BatchTagActionRequestBody.
:rtype: list[BatchTagActionTagOption]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Sets the tags of this BatchTagActionRequestBody.
标签列表。
:param tags: The tags of this BatchTagActionRequestBody.
:type: list[BatchTagActionTagOption]
"""
self._tags = tags
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BatchTagActionRequestBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 25.397059 | 76 | 0.54806 |
import pprint
import re
import six
class BatchTagActionRequestBody:
sensitive_list = []
openapi_types = {
'action': 'str',
'tags': 'list[BatchTagActionTagOption]'
}
attribute_map = {
'action': 'action',
'tags': 'tags'
}
def __init__(self, action=None, tags=None):
self._action = None
self._tags = None
self.discriminator = None
self.action = action
self.tags = tags
@property
def action(self):
return self._action
@action.setter
def action(self, action):
self._action = action
@property
def tags(self):
return self._tags
@tags.setter
def tags(self, tags):
self._tags = tags
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, BatchTagActionRequestBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f715b4c57e072ac7d2f65d981630ad2bb277941f | 5,616 | py | Python | src/assisted_test_infra/test_infra/controllers/node_controllers/node_controller.py | nirarg/assisted-test-infra | e07c43501c1d9bfaa1aee3aea49f1ef359faee07 | [
"Apache-2.0"
] | null | null | null | src/assisted_test_infra/test_infra/controllers/node_controllers/node_controller.py | nirarg/assisted-test-infra | e07c43501c1d9bfaa1aee3aea49f1ef359faee07 | [
"Apache-2.0"
] | 248 | 2020-11-09T06:47:39.000Z | 2022-03-28T06:02:39.000Z | src/assisted_test_infra/test_infra/controllers/node_controllers/node_controller.py | nirarg/assisted-test-infra | e07c43501c1d9bfaa1aee3aea49f1ef359faee07 | [
"Apache-2.0"
] | null | null | null | from abc import ABC, abstractmethod
from typing import Any, Callable, List, Optional, SupportsAbs, Tuple, TypeVar
import libvirt
from assisted_test_infra.test_infra import BaseEntityConfig
from assisted_test_infra.test_infra.controllers.node_controllers.disk import Disk
from assisted_test_infra.test_infra.controllers.node_controllers.node import Node
from assisted_test_infra.test_infra.helper_classes.config.controller_config import BaseNodeConfig
from service_client import log
class NodeController(ABC):
T = TypeVar("T", bound=SupportsAbs[BaseNodeConfig])
def __init__(self, config: T, entity_config: BaseEntityConfig):
self._config = config
self._entity_config = entity_config
def log_configuration(self):
log.info(f"controller configuration={self._config}")
@property
def workers_count(self):
return self._config.workers_count
@property
def masters_count(self):
return self._config.masters_count
@property
def is_ipv4(self):
return self._config.is_ipv4
@property
def is_ipv6(self):
return self._config.is_ipv6
@abstractmethod
def list_nodes(self) -> List[Node]:
pass
@abstractmethod
def list_disks(self, node_name: str) -> List[Disk]:
pass
@abstractmethod
def list_networks(self) -> List[Any]:
pass
@abstractmethod
def list_leases(self, network_name: str) -> List[Any]:
pass
@abstractmethod
def shutdown_node(self, node_name: str) -> None:
pass
@abstractmethod
def shutdown_all_nodes(self) -> None:
pass
@abstractmethod
def start_node(self, node_name: str, check_ips: bool) -> None:
pass
@abstractmethod
def start_all_nodes(self) -> List[Node]:
pass
@abstractmethod
def restart_node(self, node_name: str) -> None:
pass
@abstractmethod
def format_node_disk(self, node_name: str, disk_index: int = 0) -> None:
pass
@abstractmethod
def format_all_node_disks(self) -> None:
pass
@abstractmethod
def attach_test_disk(self, node_name: str, disk_size: int, bootable=False, persistent=False, with_wwn=False):
"""
Attaches a test disk. That disk can later be detached with `detach_all_test_disks`
:param with_wwn: Weather the disk should have a WWN(World Wide Name), Having a WWN creates a disk by-id link
:param node_name: Node to attach disk to
:param disk_size: Size of disk to attach
:param bootable: Whether to format an MBR sector at the beginning of the disk
:param persistent: Whether the disk should survive shutdowns
"""
pass
@abstractmethod
def detach_all_test_disks(self, node_name: str):
"""
Detaches all test disks created by `attach_test_disk`
:param node_name: Node to detach disk from
"""
pass
@abstractmethod
def get_ingress_and_api_vips(self) -> dict:
pass
@abstractmethod
def destroy_all_nodes(self) -> None:
pass
@abstractmethod
def get_cluster_network(self) -> str:
pass
@abstractmethod
def setup_time(self) -> str:
pass
@abstractmethod
def prepare_nodes(self):
pass
@abstractmethod
def is_active(self, node_name) -> bool:
pass
@abstractmethod
def set_boot_order(self, node_name, cd_first=False) -> None:
pass
@abstractmethod
def set_per_device_boot_order(self, node_name, key: Callable[[Disk], int]) -> None:
"""
Set the boot priority for every disk
It sorts the disk according to the key function result
:param node_name: The node to change its boot order
:param key: a key function that gets a Disk object and decide it's priority
"""
pass
@abstractmethod
def get_node_ips_and_macs(self, node_name) -> Tuple[List[str], List[str]]:
pass
@abstractmethod
def set_single_node_ip(self, ip) -> None:
pass
@abstractmethod
def get_host_id(self, node_name: str) -> str:
pass
@abstractmethod
def get_cpu_cores(self, node_name: str) -> int:
pass
@abstractmethod
def set_cpu_cores(self, node_name: str, core_count: int) -> None:
pass
@abstractmethod
def get_ram_kib(self, node_name: str) -> int:
pass
@abstractmethod
def set_ram_kib(self, node_name: str, ram_kib: int) -> None:
pass
def get_primary_machine_cidr(self) -> Optional[str]:
# Default to auto resolve by the cluster. see cluster.get_primary_machine_cidr
return None
def get_provisioning_cidr(self) -> Optional[str]:
return None
@abstractmethod
def attach_interface(self, node_name, network_xml: str) -> Tuple[libvirt.virNetwork, str]:
pass
@abstractmethod
def add_interface(self, node_name, network_name, target_interface: str) -> str:
pass
@abstractmethod
def undefine_interface(self, node_name: str, mac: str):
pass
@abstractmethod
def create_network(self, network_xml: str) -> libvirt.virNetwork:
pass
@abstractmethod
def get_network_by_name(self, network_name: str) -> libvirt.virNetwork:
pass
@abstractmethod
def destroy_network(self, network: libvirt.virNetwork):
pass
def notify_iso_ready(self) -> None:
pass
def set_dns(self, api_vip: str, ingress_vip: str) -> None:
pass
def set_dns_for_user_managed_network(self) -> None:
pass
| 26.742857 | 116 | 0.667379 | from abc import ABC, abstractmethod
from typing import Any, Callable, List, Optional, SupportsAbs, Tuple, TypeVar
import libvirt
from assisted_test_infra.test_infra import BaseEntityConfig
from assisted_test_infra.test_infra.controllers.node_controllers.disk import Disk
from assisted_test_infra.test_infra.controllers.node_controllers.node import Node
from assisted_test_infra.test_infra.helper_classes.config.controller_config import BaseNodeConfig
from service_client import log
class NodeController(ABC):
T = TypeVar("T", bound=SupportsAbs[BaseNodeConfig])
def __init__(self, config: T, entity_config: BaseEntityConfig):
self._config = config
self._entity_config = entity_config
def log_configuration(self):
log.info(f"controller configuration={self._config}")
@property
def workers_count(self):
return self._config.workers_count
@property
def masters_count(self):
return self._config.masters_count
@property
def is_ipv4(self):
return self._config.is_ipv4
@property
def is_ipv6(self):
return self._config.is_ipv6
@abstractmethod
def list_nodes(self) -> List[Node]:
pass
@abstractmethod
def list_disks(self, node_name: str) -> List[Disk]:
pass
@abstractmethod
def list_networks(self) -> List[Any]:
pass
@abstractmethod
def list_leases(self, network_name: str) -> List[Any]:
pass
@abstractmethod
def shutdown_node(self, node_name: str) -> None:
pass
@abstractmethod
def shutdown_all_nodes(self) -> None:
pass
@abstractmethod
def start_node(self, node_name: str, check_ips: bool) -> None:
pass
@abstractmethod
def start_all_nodes(self) -> List[Node]:
pass
@abstractmethod
def restart_node(self, node_name: str) -> None:
pass
@abstractmethod
def format_node_disk(self, node_name: str, disk_index: int = 0) -> None:
pass
@abstractmethod
def format_all_node_disks(self) -> None:
pass
@abstractmethod
def attach_test_disk(self, node_name: str, disk_size: int, bootable=False, persistent=False, with_wwn=False):
pass
@abstractmethod
def detach_all_test_disks(self, node_name: str):
pass
@abstractmethod
def get_ingress_and_api_vips(self) -> dict:
pass
@abstractmethod
def destroy_all_nodes(self) -> None:
pass
@abstractmethod
def get_cluster_network(self) -> str:
pass
@abstractmethod
def setup_time(self) -> str:
pass
@abstractmethod
def prepare_nodes(self):
pass
@abstractmethod
def is_active(self, node_name) -> bool:
pass
@abstractmethod
def set_boot_order(self, node_name, cd_first=False) -> None:
pass
@abstractmethod
def set_per_device_boot_order(self, node_name, key: Callable[[Disk], int]) -> None:
pass
@abstractmethod
def get_node_ips_and_macs(self, node_name) -> Tuple[List[str], List[str]]:
pass
@abstractmethod
def set_single_node_ip(self, ip) -> None:
pass
@abstractmethod
def get_host_id(self, node_name: str) -> str:
pass
@abstractmethod
def get_cpu_cores(self, node_name: str) -> int:
pass
@abstractmethod
def set_cpu_cores(self, node_name: str, core_count: int) -> None:
pass
@abstractmethod
def get_ram_kib(self, node_name: str) -> int:
pass
@abstractmethod
def set_ram_kib(self, node_name: str, ram_kib: int) -> None:
pass
def get_primary_machine_cidr(self) -> Optional[str]:
return None
def get_provisioning_cidr(self) -> Optional[str]:
return None
@abstractmethod
def attach_interface(self, node_name, network_xml: str) -> Tuple[libvirt.virNetwork, str]:
pass
@abstractmethod
def add_interface(self, node_name, network_name, target_interface: str) -> str:
pass
@abstractmethod
def undefine_interface(self, node_name: str, mac: str):
pass
@abstractmethod
def create_network(self, network_xml: str) -> libvirt.virNetwork:
pass
@abstractmethod
def get_network_by_name(self, network_name: str) -> libvirt.virNetwork:
pass
@abstractmethod
def destroy_network(self, network: libvirt.virNetwork):
pass
def notify_iso_ready(self) -> None:
pass
def set_dns(self, api_vip: str, ingress_vip: str) -> None:
pass
def set_dns_for_user_managed_network(self) -> None:
pass
| true | true |
f715b57902684613288421b8c0d1be2ab344f1cf | 2,747 | py | Python | taxtea/checks.py | lowercase-app/django-taxtea | aa8184c1aceb67ecf34eda2e48184e810616f59f | [
"MIT"
] | 13 | 2020-07-20T17:35:32.000Z | 2021-09-25T02:11:44.000Z | taxtea/checks.py | lowercase-app/django-taxtea | aa8184c1aceb67ecf34eda2e48184e810616f59f | [
"MIT"
] | 51 | 2020-07-22T13:56:09.000Z | 2022-02-05T06:04:36.000Z | taxtea/checks.py | lowercase-app/django-taxtea | aa8184c1aceb67ecf34eda2e48184e810616f59f | [
"MIT"
] | null | null | null | from typing import List
from django.apps.config import AppConfig
from django.core.checks import CheckMessage, Critical, Tags, register
@register(Tags.compatibility)
def check_USPS_api_auth(app_configs: AppConfig = None, **kwargs) -> List[CheckMessage]:
"""
check_USPS_api_auth:
Checks if the user has supplied a USPS username/password.
Args:
appconfig (AppConfig, optional): Defaults to None.
Returns:
List[checks.CheckMessage]: List of Django CheckMessages
"""
from . import settings as tax_settings
messages = []
if not tax_settings.USPS_USER:
msg = "Could not find a USPS User."
hint = "Add TAXTEA_USPS_USER to your settings."
messages.append(Critical(msg, hint=hint, id="tax.C001"))
return messages
@register(Tags.compatibility)
def check_Avalara_api_auth(
app_configs: AppConfig = None, **kwargs
) -> List[CheckMessage]:
"""
check_Avalara_api_auth:
Checks if the user has supplied a Avalara username/password.
Args:
appconfig (AppConfig, optional): Defaults to None.
Returns:
List[checks.CheckMessage]: List of Django CheckMessages
"""
from . import settings as tax_settings
messages = []
if not tax_settings.AVALARA_USER:
msg = "Could not find a Avalara User."
hint = "Add TAXTEA_AVALARA_USER to your settings."
messages.append(Critical(msg, hint=hint, id="tax.C002"))
if not tax_settings.AVALARA_PASSWORD:
msg = "Could not find a Avalara Password."
hint = "Add TAXTEA_AVALARA_PASSWORD to your settings."
messages.append(Critical(msg, hint=hint, id="tax.C003"))
return messages
@register(Tags.compatibility)
def check_origin_zips(app_configs: AppConfig = None, **kwargs) -> List[CheckMessage]:
"""
check_origin_zips:
Checks if the user has supplied at least one origin zip.
Args:
appconfig (AppConfig, optional): Defaults to None.
Returns:
List[checks.CheckMessage]: List of Django CheckMessages
"""
from . import settings as tax_settings
messages = []
if not tax_settings.NEXUSES:
msg = "Could not find a Nexus."
hint = "Add at least one TAXTEA_NEXUSES to your settings."
messages.append(Critical(msg, hint=hint, id="tax.C004"))
# If there is no TAX_NEXUS, then the next check will throw an IndexError
return messages
state, zip_code = tax_settings.NEXUSES[0]
if not state and not zip_code:
msg = "Could not find a valid Nexus tuple."
hint = "Add at least one Nexus tuple ('STATE', 'ZIPCODE') to your settings."
messages.append(Critical(msg, hint=hint, id="tax.C005"))
return messages
| 30.186813 | 87 | 0.67419 | from typing import List
from django.apps.config import AppConfig
from django.core.checks import CheckMessage, Critical, Tags, register
@register(Tags.compatibility)
def check_USPS_api_auth(app_configs: AppConfig = None, **kwargs) -> List[CheckMessage]:
from . import settings as tax_settings
messages = []
if not tax_settings.USPS_USER:
msg = "Could not find a USPS User."
hint = "Add TAXTEA_USPS_USER to your settings."
messages.append(Critical(msg, hint=hint, id="tax.C001"))
return messages
@register(Tags.compatibility)
def check_Avalara_api_auth(
app_configs: AppConfig = None, **kwargs
) -> List[CheckMessage]:
from . import settings as tax_settings
messages = []
if not tax_settings.AVALARA_USER:
msg = "Could not find a Avalara User."
hint = "Add TAXTEA_AVALARA_USER to your settings."
messages.append(Critical(msg, hint=hint, id="tax.C002"))
if not tax_settings.AVALARA_PASSWORD:
msg = "Could not find a Avalara Password."
hint = "Add TAXTEA_AVALARA_PASSWORD to your settings."
messages.append(Critical(msg, hint=hint, id="tax.C003"))
return messages
@register(Tags.compatibility)
def check_origin_zips(app_configs: AppConfig = None, **kwargs) -> List[CheckMessage]:
from . import settings as tax_settings
messages = []
if not tax_settings.NEXUSES:
msg = "Could not find a Nexus."
hint = "Add at least one TAXTEA_NEXUSES to your settings."
messages.append(Critical(msg, hint=hint, id="tax.C004"))
return messages
state, zip_code = tax_settings.NEXUSES[0]
if not state and not zip_code:
msg = "Could not find a valid Nexus tuple."
hint = "Add at least one Nexus tuple ('STATE', 'ZIPCODE') to your settings."
messages.append(Critical(msg, hint=hint, id="tax.C005"))
return messages
| true | true |
f715b7500a857c259eab2aab6854485671e9f369 | 6,405 | py | Python | lakshmi/cache.py | sarvjeets/lakshmi | 8cd6e47f23a61c5b8c967f9fdc756df296f1e0d5 | [
"MIT"
] | 59 | 2021-09-07T05:19:30.000Z | 2022-02-24T18:29:49.000Z | lakshmi/cache.py | sarvjeets/lakshmi | 8cd6e47f23a61c5b8c967f9fdc756df296f1e0d5 | [
"MIT"
] | 4 | 2021-08-01T18:32:51.000Z | 2022-02-26T19:14:37.000Z | lakshmi/cache.py | sarvjeets/lakshmi | 8cd6e47f23a61c5b8c967f9fdc756df296f1e0d5 | [
"MIT"
] | 3 | 2021-08-01T04:35:07.000Z | 2022-03-23T21:48:51.000Z | """
This class is used to cache return value of functions on disk for a specified
number of days. This is used by lakshmi.assets module to cache name/ asset
value (i.e the slow functions). For examples on how to use this class, please
see the tests (tests/test_cache.py file).
Currently, this module can only be used on functions which are class members
and the function itself must take no arguments. These restrictions can be
easily relaxed, but so far that all usecases don't need anything more than what
is currently implemented.
In addition to caching values, this class also allows one to optionally call
a user-specified function on cache-misses (currently used to show a progress
bar to the user via the lak CLI).
"""
import functools
import pickle
from abc import ABC, abstractmethod
from datetime import datetime
from hashlib import md5
from pathlib import Path
# Inspired by https://pypi.org/project/cache-to-disk/. I tried using other
# options such as requests-cache, but it was too slow compared to the solution
# implemented here.
class Cacheable(ABC):
"""Interface that declares that a particular class's method return
values could be cached. The methods should not take a parameter,
and cache_key() + method name should uniquely imply the return
value of that class."""
@abstractmethod
def cache_key(self):
"""Unique string value used as key for caching."""
pass
def get_file_age(file):
"""Returns the age of file.
Args:
file: A PosixPath object representing a file.
Returns: An int represeting the age in days.
"""
return (datetime.today()
- datetime.fromtimestamp(file.stat().st_mtime)).days
# Constants
# Default cache directory if none is specified.
_DEFAULT_DIR = Path.home() / '.lakshmicache'
_CACHE_STR = 'cache_dir'
_FORCE_STR = 'force_refresh'
_FORCED_FILES_STR = 'forced_files'
_MISS_FUNC_STR = 'miss_func'
# Dict (string -> object) to keep cache context.
# Description of keys to what is stored:
# _CACHE_STR:
# The pathlib.Path object specifying cache directory. If set to None,
# caching is disabled. Default: _DEFAULT_DIR
# _FORCE_STR:
# If set to True, new values are re-generated once even if a cached one is
# available. This is meant for data that is cached for < month (stock prices
# and Treasury Bond value). Values that are cached for > 40 days ignore this
# flag. Default: False
# _FORCED_FILES_STR:
# A set of files which are already refreshed once due to _ctx[_FORCE_STR]
# being set to True. this is used to ensure we don't re-fetch same values
# multiple times in a session.
# _MISS_FUNC_STR:
# If set, this function is called for every cache miss.
_ctx = {_FORCE_STR: False}
def set_force_refresh(v):
"""Sets whether cached values should be refreshed.
Args:
v: Boolean representing if cached values should be re-generated.
"""
global _ctx
_ctx[_FORCE_STR] = v
_ctx[_FORCED_FILES_STR] = set()
def set_cache_miss_func(f):
"""Sets the function to call for cache-misses.
Args:
f: The function to call whenever a cache-miss happens (i.e. whenever
the underlying function is called instead of using a cached value).
"""
global _ctx
if f:
_ctx[_MISS_FUNC_STR] = f
else:
# Clear out previously set function, if any.
_ctx.pop(_MISS_FUNC_STR, None)
def set_cache_dir(cache_dir):
"""Sets the cache directory.
If the cache directory is not specified, default ~/.lakshmicache
is used.
Args:
cache_dir: The pathlib.Path object specifying cache directory.
If set to None, caching is disabled.
"""
global _ctx
_ctx[_CACHE_STR] = cache_dir
if cache_dir is None:
return
cache_dir.mkdir(exist_ok=True) # Create cache dir if one doesn't exist.
# Delete old files whose cache values are invalid already.
for file in cache_dir.glob('*_*.lkc'):
days = int(file.name.split('_')[0])
if get_file_age(file) >= days:
file.unlink()
def _valid_cached_value(file, days):
"""Helper function to check if the cached value from file is valid.
Args:
file: The Path object representing a file potentially containing
previously cached value.
days: Number of days after which the cached value becomes invalid.
Returns: True iff the cached value in file is valid.
"""
MAX_DAYS_TO_FORCE_REFRESH = 40
if (
_ctx[_FORCE_STR]
and days < MAX_DAYS_TO_FORCE_REFRESH
and file.name not in _ctx[_FORCED_FILES_STR]
):
# Ignore cached value.
_ctx[_FORCED_FILES_STR].add(file.name)
return False
return (file.exists() and get_file_age(file) < days)
def _call_func(class_obj, func):
"""Helper function to return value of class_obj.func().
In addition to calling function, this helper also calls the
cache_miss function if one is set in the context.
Args:
class_obj: The object of a particular class implementing Cacheable
interface.
func: The function whose return values has to be cached. Assumed
to take no parameters.
Returns: The return value of the func.
"""
global _ctx
if _MISS_FUNC_STR in _ctx:
_ctx[_MISS_FUNC_STR]()
return func(class_obj)
def cache(days):
"""Returns decorator that caches functions return value on disk for
specified number of days.
Args:
days: Number of days for which to cache the return value of the
function.
Returns: The decorator.
"""
def decorator(func):
@functools.wraps(func)
def new_func(class_obj):
global _ctx
if _CACHE_STR not in _ctx:
# Cache dir not set. Set to default.
set_cache_dir(_DEFAULT_DIR)
cache_dir = _ctx[_CACHE_STR]
if not cache_dir:
return _call_func(class_obj, func)
key = f'{func.__qualname__}_{class_obj.cache_key()}'
filename = f'{days}_{md5(key.encode("utf8")).hexdigest()}.lkc'
file = cache_dir / filename
if _valid_cached_value(file, days):
return pickle.loads(file.read_bytes())
value = _call_func(class_obj, func)
file.write_bytes(pickle.dumps(value))
return value
return new_func
return decorator
| 32.025 | 79 | 0.684465 |
import functools
import pickle
from abc import ABC, abstractmethod
from datetime import datetime
from hashlib import md5
from pathlib import Path
class Cacheable(ABC):
@abstractmethod
def cache_key(self):
pass
def get_file_age(file):
return (datetime.today()
- datetime.fromtimestamp(file.stat().st_mtime)).days
_DEFAULT_DIR = Path.home() / '.lakshmicache'
_CACHE_STR = 'cache_dir'
_FORCE_STR = 'force_refresh'
_FORCED_FILES_STR = 'forced_files'
_MISS_FUNC_STR = 'miss_func'
# multiple times in a session.
# _MISS_FUNC_STR:
# If set, this function is called for every cache miss.
_ctx = {_FORCE_STR: False}
def set_force_refresh(v):
global _ctx
_ctx[_FORCE_STR] = v
_ctx[_FORCED_FILES_STR] = set()
def set_cache_miss_func(f):
global _ctx
if f:
_ctx[_MISS_FUNC_STR] = f
else:
# Clear out previously set function, if any.
_ctx.pop(_MISS_FUNC_STR, None)
def set_cache_dir(cache_dir):
global _ctx
_ctx[_CACHE_STR] = cache_dir
if cache_dir is None:
return
cache_dir.mkdir(exist_ok=True) # Create cache dir if one doesn't exist.
for file in cache_dir.glob('*_*.lkc'):
days = int(file.name.split('_')[0])
if get_file_age(file) >= days:
file.unlink()
def _valid_cached_value(file, days):
MAX_DAYS_TO_FORCE_REFRESH = 40
if (
_ctx[_FORCE_STR]
and days < MAX_DAYS_TO_FORCE_REFRESH
and file.name not in _ctx[_FORCED_FILES_STR]
):
_ctx[_FORCED_FILES_STR].add(file.name)
return False
return (file.exists() and get_file_age(file) < days)
def _call_func(class_obj, func):
global _ctx
if _MISS_FUNC_STR in _ctx:
_ctx[_MISS_FUNC_STR]()
return func(class_obj)
def cache(days):
def decorator(func):
@functools.wraps(func)
def new_func(class_obj):
global _ctx
if _CACHE_STR not in _ctx:
set_cache_dir(_DEFAULT_DIR)
cache_dir = _ctx[_CACHE_STR]
if not cache_dir:
return _call_func(class_obj, func)
key = f'{func.__qualname__}_{class_obj.cache_key()}'
filename = f'{days}_{md5(key.encode("utf8")).hexdigest()}.lkc'
file = cache_dir / filename
if _valid_cached_value(file, days):
return pickle.loads(file.read_bytes())
value = _call_func(class_obj, func)
file.write_bytes(pickle.dumps(value))
return value
return new_func
return decorator
| true | true |
f715b7a5eadb8af7edda0af1e5732c76618605bb | 951 | py | Python | tests/test_basic.py | mustafamerttunali/Tensorflow-Training-GUI | ededb2dbfeefeac7ea6bf2986090ebcdf6905f45 | [
"MIT"
] | 84 | 2019-12-28T15:05:46.000Z | 2020-12-01T15:10:56.000Z | tests/test_basic.py | mustafakisacik/Deep-Learning-Training-GUI | 1992185fd18e768f30c5bb5edd08ea709be97b09 | [
"MIT"
] | 6 | 2019-12-28T02:18:08.000Z | 2020-11-13T17:40:14.000Z | tests/test_basic.py | mustafakisacik/Deep-Learning-Training-GUI | 1992185fd18e768f30c5bb5edd08ea709be97b09 | [
"MIT"
] | 23 | 2019-12-29T19:14:23.000Z | 2020-12-07T09:43:52.000Z | import os
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from multiprocessing import Process
def startTensorboard(logdir):
# Start tensorboard with system call
os.system("tensorboard --logdir {}".format(logdir))
def fitModel():
# Create your model
model = Sequential()
model.add(Dense(32, activation='relu', input_dim=100))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
# Some mock training data
data = np.random.random((1000, 100))
labels = np.random.randint(2, size=(1000, 1))
# Run the fit function
model.fit(data, labels, epochs=100, batch_size=32)
if __name__ == '__main__':
# Run both processes simultaneously
Process(target=startTensorboard, args=("logs",)).start()
Process(target=fitModel).start() | 28.818182 | 60 | 0.684543 | import os
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from multiprocessing import Process
def startTensorboard(logdir):
os.system("tensorboard --logdir {}".format(logdir))
def fitModel():
model = Sequential()
model.add(Dense(32, activation='relu', input_dim=100))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
data = np.random.random((1000, 100))
labels = np.random.randint(2, size=(1000, 1))
model.fit(data, labels, epochs=100, batch_size=32)
if __name__ == '__main__':
Process(target=startTensorboard, args=("logs",)).start()
Process(target=fitModel).start() | true | true |
f715b9d364ffce61b2e55ddeebf9ea8f7ff852a8 | 1,094 | py | Python | tests/contrib/hooks/test_nomad_hook.py | YotpoLtd/incubator-airflow | 86bd47db6084b23f4eb4b4c1dfc7f0293e4308e2 | [
"MIT",
"BSD-3-Clause",
"BSD-2-Clause",
"Apache-2.0"
] | 1 | 2021-07-27T15:47:56.000Z | 2021-07-27T15:47:56.000Z | tests/contrib/hooks/test_nomad_hook.py | YotpoLtd/incubator-airflow | 86bd47db6084b23f4eb4b4c1dfc7f0293e4308e2 | [
"MIT",
"BSD-3-Clause",
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | tests/contrib/hooks/test_nomad_hook.py | YotpoLtd/incubator-airflow | 86bd47db6084b23f4eb4b4c1dfc7f0293e4308e2 | [
"MIT",
"BSD-3-Clause",
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from mock import patch
from airflow import configuration
from airflow.contrib.hooks.nomad_hook import NomadHook
class TestNomadHook(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
@patch("airflow.contrib.hooks.nomad_hook.NomadHook.get_nomad_client")
def test_nomad_client_connection(self, get_nomad_client):
NomadHook(nomad_conn_id='nomad_default')
self.assertTrue(get_nomad_client.called_once())
if __name__ == '__main__':
unittest.main()
| 30.388889 | 74 | 0.756856 |
import unittest
from mock import patch
from airflow import configuration
from airflow.contrib.hooks.nomad_hook import NomadHook
class TestNomadHook(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
@patch("airflow.contrib.hooks.nomad_hook.NomadHook.get_nomad_client")
def test_nomad_client_connection(self, get_nomad_client):
NomadHook(nomad_conn_id='nomad_default')
self.assertTrue(get_nomad_client.called_once())
if __name__ == '__main__':
unittest.main()
| true | true |
f715bb669761fdc7d43cef478f02a3c2769d3f57 | 1,243 | py | Python | scripts/suse/yum/plugins/yumnotify.py | Noah-Huppert/salt | 998c382f5f2c3b4cbf7d96aa6913ada6993909b3 | [
"Apache-2.0"
] | 2 | 2020-11-02T22:08:26.000Z | 2020-11-14T13:44:46.000Z | scripts/suse/yum/plugins/yumnotify.py | Noah-Huppert/salt | 998c382f5f2c3b4cbf7d96aa6913ada6993909b3 | [
"Apache-2.0"
] | 4 | 2021-02-06T14:30:48.000Z | 2021-12-13T20:50:10.000Z | scripts/suse/yum/plugins/yumnotify.py | Noah-Huppert/salt | 998c382f5f2c3b4cbf7d96aa6913ada6993909b3 | [
"Apache-2.0"
] | 2 | 2020-11-04T06:32:02.000Z | 2020-11-06T11:01:18.000Z | # Copyright (c) 2016 SUSE Linux LLC
# All Rights Reserved.
#
# Author: Bo Maryniuk <bo@suse.de>
import hashlib
import os
from yum import config
from yum.plugins import TYPE_CORE
CK_PATH = "/var/cache/salt/minion/rpmdb.cookie"
RPM_PATH = "/var/lib/rpm/Packages"
requires_api_version = "2.5"
plugin_type = TYPE_CORE
def _get_mtime():
"""
Get the modified time of the RPM Database.
Returns:
Unix ticks
"""
return os.path.exists(RPM_PATH) and int(os.path.getmtime(RPM_PATH)) or 0
def _get_checksum():
"""
Get the checksum of the RPM Database.
Returns:
hexdigest
"""
digest = hashlib.sha256()
with open(RPM_PATH, "rb") as rpm_db_fh:
while True:
buff = rpm_db_fh.read(0x1000)
if not buff:
break
digest.update(buff)
return digest.hexdigest()
def posttrans_hook(conduit):
"""
Hook after the package installation transaction.
:param conduit:
:return:
"""
# Integrate Yum with Salt
if "SALT_RUNNING" not in os.environ:
with open(CK_PATH, "w") as ck_fh:
ck_fh.write(
"{chksum} {mtime}\n".format(chksum=_get_checksum(), mtime=_get_mtime())
)
| 21.067797 | 87 | 0.618665 |
import hashlib
import os
from yum import config
from yum.plugins import TYPE_CORE
CK_PATH = "/var/cache/salt/minion/rpmdb.cookie"
RPM_PATH = "/var/lib/rpm/Packages"
requires_api_version = "2.5"
plugin_type = TYPE_CORE
def _get_mtime():
return os.path.exists(RPM_PATH) and int(os.path.getmtime(RPM_PATH)) or 0
def _get_checksum():
digest = hashlib.sha256()
with open(RPM_PATH, "rb") as rpm_db_fh:
while True:
buff = rpm_db_fh.read(0x1000)
if not buff:
break
digest.update(buff)
return digest.hexdigest()
def posttrans_hook(conduit):
if "SALT_RUNNING" not in os.environ:
with open(CK_PATH, "w") as ck_fh:
ck_fh.write(
"{chksum} {mtime}\n".format(chksum=_get_checksum(), mtime=_get_mtime())
)
| true | true |
f715bca80298b84ce5bd4435a0da66ffc75de251 | 19,652 | py | Python | Bloxorz.py | ilkercankaya/Bloxorz | 212e8f051329f4f7392e336b9a99d5c4ae78c019 | [
"MIT"
] | null | null | null | Bloxorz.py | ilkercankaya/Bloxorz | 212e8f051329f4f7392e336b9a99d5c4ae78c019 | [
"MIT"
] | null | null | null | Bloxorz.py | ilkercankaya/Bloxorz | 212e8f051329f4f7392e336b9a99d5c4ae78c019 | [
"MIT"
] | null | null | null | # 0 is for perpendicular mode
# 1 is for flat mode
# 0 is for X-Axis config
# 1 is for Y-Axis mode
from copy import deepcopy
class Block:
def __init__(self, givenboard, mode, config, positionfirstbox, positionsecondbox):
# Copy Board
self.board = givenboard
# Fill the Board with Block
self.board.field[positionfirstbox[0]][positionfirstbox[1]] = 2
if positionsecondbox != []:
self.board.field[positionsecondbox[0]][positionsecondbox[1]] = 2
self.mode = mode
self.config = config
self.positionFirstBox = positionfirstbox
self.positionSecondBox = positionsecondbox
def isgamewon(self):
if self.mode == 0 and self.positionFirstBox == self.board.goal:
return True
else:
return False
def ismovableleft(self):
try:
if self.mode == 0:
if self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] - 1] != 1 \
and self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] - 2] != 1:
return True
else:
return False
elif self.mode == 1:
if self.config == 0:
if self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] - 1] != 1:
return True
else:
return False
if self.config == 1:
if self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] - 1] != 1 \
and self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1] - 1] != 1:
return True
else:
return False
except IndexError:
return False
def ismovableright(self):
try:
if self.mode == 0:
if self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] + 1] != 1 \
and self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] + 2] != 1:
return True
else:
return False
elif self.mode == 1:
if self.config == 0:
if self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1] + 1] != 1:
return True
else:
return False
if self.config == 1:
if self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] + 1] != 1 \
and self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1] + 1] != 1:
return True
else:
return False
except IndexError:
return False
def ismovableup(self):
try:
if self.mode == 0:
if self.board.field[self.positionFirstBox[0] - 1][self.positionFirstBox[1]] != 1 \
and self.board.field[self.positionFirstBox[0] - 2][self.positionFirstBox[1]] != 1:
return True
else:
return False
elif self.mode == 1:
if self.config == 0:
if self.board.field[self.positionFirstBox[0] - 1][self.positionFirstBox[1]] != 1 \
and self.board.field[self.positionSecondBox[0] - 1][self.positionSecondBox[1]] != 1:
return True
else:
return False
elif self.config == 1:
if self.board.field[self.positionFirstBox[0] - 1][self.positionFirstBox[1]] != 1:
return True
else:
return False
except IndexError:
return False
def ismovabledown(self):
try:
if self.mode == 0:
if self.board.field[self.positionFirstBox[0] + 1][self.positionFirstBox[1]] != 1 \
and self.board.field[self.positionFirstBox[0] + 2][self.positionFirstBox[1]] != 1:
return True
else:
return False
elif self.mode == 1:
if self.config == 0:
if self.board.field[self.positionFirstBox[0] + 1][self.positionFirstBox[1]] != 1 \
and self.board.field[self.positionSecondBox[0] + 1][self.positionSecondBox[1]] != 1:
return True
else:
return False
elif self.config == 1:
if self.board.field[self.positionSecondBox[0] + 1][self.positionSecondBox[1]] != 1:
return True
else:
return False
except IndexError:
return False
def getleft(self):
if self.mode == 0:
# Object location
secondbox = [self.positionFirstBox[0], self.positionFirstBox[1] - 1]
firstbox = [self.positionFirstBox[0], self.positionFirstBox[1] - 2]
return [firstbox, secondbox, 1, 0]
elif self.mode == 1:
if self.config == 0:
firstbox = [self.positionFirstBox[0], self.positionFirstBox[1] - 1]
return [firstbox, [], 0, self.config]
if self.config == 1:
positionSecondBox = [self.positionSecondBox[0], self.positionSecondBox[1] - 1]
positionFirstBox = [self.positionFirstBox[0], self.positionFirstBox[1] - 1]
return [positionFirstBox, positionSecondBox, 1, self.config]
def moveleft(self):
if self.mode == 0:
if self.ismovableleft():
# Erase the object from board
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
# Re-put object
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] - 1] = 2
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] - 2] = 2
# Update object location
self.positionSecondBox = [self.positionFirstBox[0], self.positionFirstBox[1] - 1]
self.positionFirstBox = [self.positionFirstBox[0], self.positionFirstBox[1] - 2]
# Change Mode and Config
self.mode = 1
self.config = 0
return True
else:
return False
elif self.mode == 1:
if self.ismovableleft():
if self.config == 0:
# Erase the object from board
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1]] = 0
# Re-put object
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] - 1] = 2
# Update object location
self.positionSecondBox = []
self.positionFirstBox = [self.positionFirstBox[0], self.positionFirstBox[1] - 1]
# Change Mode
self.mode = 0
return True
if self.config == 1:
# Erase the object from board
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1]] = 0
# Re-put object
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] - 1] = 2
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1] - 1] = 2
# Update object location
self.positionSecondBox = [self.positionSecondBox[0], self.positionSecondBox[1] - 1]
self.positionFirstBox = [self.positionFirstBox[0], self.positionFirstBox[1] - 1]
return True
else:
return False
def moveright(self):
if self.mode == 0:
if self.ismovableright():
# Erase the object from board
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
# Re-put object
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] + 1] = 2
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] + 2] = 2
# Update object location
self.positionSecondBox = [self.positionFirstBox[0], self.positionFirstBox[1] + 2]
self.positionFirstBox = [self.positionFirstBox[0], self.positionFirstBox[1] + 1]
# Change Mode
self.mode = 1
self.config = 0
return True
else:
return False
elif self.mode == 1:
if self.ismovableright():
if self.config == 0:
# Erase the object from board
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1]] = 0
# Re-put object
self.board.field[self.positionFirstBox[0]][self.positionSecondBox[1] + 1] = 2
# Update object location
self.positionFirstBox = [self.positionFirstBox[0], self.positionSecondBox[1] + 1]
self.positionSecondBox = []
# Change Mode
self.mode = 0
return True
if self.config == 1:
# Erase the object from board
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1]] = 0
# Re-put object
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] + 1] = 2
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1] + 1] = 2
# Update object location
self.positionFirstBox = [self.positionFirstBox[0], self.positionFirstBox[1] + 1]
self.positionSecondBox = [self.positionSecondBox[0], self.positionSecondBox[1] + 1]
return True
else:
return False
def getright(self):
if self.mode == 0:
# Object location
secondbox = [self.positionFirstBox[0], self.positionFirstBox[1] + 2]
firstbox = [self.positionFirstBox[0], self.positionFirstBox[1] + 1]
return [firstbox, secondbox, 1, 0]
elif self.mode == 1:
if self.config == 0:
firstbox = [self.positionFirstBox[0], self.positionSecondBox[1] + 1]
return [firstbox, [], 0, self.config]
if self.config == 1:
positionFirstBox = [self.positionFirstBox[0], self.positionFirstBox[1] + 1]
positionSecondBox = [self.positionSecondBox[0], self.positionSecondBox[1] + 1]
return [positionFirstBox, positionSecondBox, self.mode, self.config]
def moveup(self):
if self.mode == 0:
if self.ismovableup():
# Erase the object from board
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
# Re-put object
self.board.field[self.positionFirstBox[0] - 1][self.positionFirstBox[1]] = 2
self.board.field[self.positionFirstBox[0] - 2][self.positionFirstBox[1]] = 2
# Update object location
self.positionSecondBox = [self.positionFirstBox[0] - 1, self.positionFirstBox[1]]
self.positionFirstBox = [self.positionFirstBox[0] - 2, self.positionFirstBox[1]]
# Change Mode
self.mode = 1
self.config = 1
return True
else:
return False
elif self.mode == 1:
if self.ismovableup():
if self.config == 0:
# Erase the object from board
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1]] = 0
# Re-put object
self.board.field[self.positionFirstBox[0] - 1][self.positionFirstBox[1]] = 2
self.board.field[self.positionSecondBox[0] - 1][self.positionSecondBox[1]] = 2
# Update object location
self.positionSecondBox = [self.positionSecondBox[0] - 1, self.positionSecondBox[1]]
self.positionFirstBox = [self.positionFirstBox[0] - 1, self.positionFirstBox[1]]
return True
elif self.config == 1:
# Erase the object from board
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1]] = 0
# Re-put object
self.board.field[self.positionFirstBox[0] - 1][self.positionFirstBox[1]] = 2
# Update object location
self.positionFirstBox = [self.positionFirstBox[0] - 1, self.positionFirstBox[1]]
self.positionSecondBox = []
# Change Mode
self.mode = 0
return True
else:
return False
def getup(self):
if self.mode == 0:
# Object location
secondbox = [self.positionFirstBox[0] - 1, self.positionFirstBox[1]]
firstbox = [self.positionFirstBox[0] - 2, self.positionFirstBox[1]]
return [firstbox, secondbox, 1, 1]
elif self.mode == 1:
if self.config == 0:
positionSecondBox = [self.positionSecondBox[0] - 1, self.positionSecondBox[1]]
positionFirstBox = [self.positionFirstBox[0] - 1, self.positionFirstBox[1]]
return [positionFirstBox, positionSecondBox, self.mode, self.config]
if self.config == 1:
positionFirstBox = [self.positionFirstBox[0] - 1, self.positionFirstBox[1]]
positionSecondBox = []
return [positionFirstBox, positionSecondBox, 0, self.config]
def movedown(self):
if self.mode == 0:
if self.ismovabledown():
# Erase the object from board
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
# Re-put object
self.board.field[self.positionFirstBox[0] + 1][self.positionFirstBox[1]] = 2
self.board.field[self.positionFirstBox[0] + 2][self.positionFirstBox[1]] = 2
# Update object location
self.positionSecondBox = [self.positionFirstBox[0] + 2, self.positionFirstBox[1]]
self.positionFirstBox = [self.positionFirstBox[0] + 1, self.positionFirstBox[1]]
# Change Mode
self.mode = 1
self.config = 1
return True
else:
return False
elif self.mode == 1:
if self.ismovabledown():
if self.config == 0:
# Erase the object from board
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1]] = 0
# Re-put object
self.board.field[self.positionFirstBox[0] + 1][self.positionFirstBox[1]] = 2
self.board.field[self.positionSecondBox[0] + 1][self.positionSecondBox[1]] = 2
# Update object location
self.positionSecondBox = [self.positionSecondBox[0] + 1, self.positionSecondBox[1]]
self.positionFirstBox = [self.positionFirstBox[0] + 1, self.positionFirstBox[1]]
return True
elif self.config == 1:
# Erase the object from board
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1]] = 0
# Re-put object
self.board.field[self.positionSecondBox[0] + 1][self.positionSecondBox[1]] = 2
# Update object location
self.positionFirstBox = [self.positionSecondBox[0] + 1, self.positionFirstBox[1]]
self.positionSecondBox = []
# Change Mode
self.mode = 0
return True
else:
return False
def getdown(self):
if self.mode == 0:
# Object location
secondbox = [self.positionFirstBox[0] + 2, self.positionFirstBox[1]]
firstbox = [self.positionFirstBox[0] + 1, self.positionFirstBox[1]]
return [firstbox, secondbox, 1, 1]
elif self.mode == 1:
if self.config == 0:
# Adjust the box positions
positionSecondBox = [self.positionSecondBox[0] + 1, self.positionSecondBox[1]]
positionFirstBox = [self.positionFirstBox[0] + 1, self.positionFirstBox[1]]
return [positionFirstBox, positionSecondBox, self.mode, self.config]
if self.config == 1:
# Adjust the box positions
positionFirstBox = [self.positionSecondBox[0] + 1, self.positionFirstBox[1]]
positionSecondBox = []
return [positionFirstBox, positionSecondBox, 0, self.config]
def printfield(self):
printer = deepcopy(self.board.field).astype(str)
# Transfer the field and print
for i in range(self.board.field.shape[0]):
for j in range(self.board.field.shape[1]):
if self.board.field[i][j] == 1:
printer[i][j] = 'X'
elif self.board.field[i][j] == 0:
printer[i][j] = 'O'
elif self.board.field[i][j] == 2:
printer[i][j] = 'S'
elif self.board.field[i][j] == 3:
printer[i][j] = 'G'
print("Current Board: \n", printer,"\n")
class Board:
def __init__(self, array):
# Conver the board and store
for i in range(array.shape[0]):
for j in range(array.shape[1]):
if array[i][j] == 'X':
array[i][j] = 1
elif array[i][j] == 'O':
array[i][j] = 0
elif array[i][j] == 'S':
array[i][j] = 2
elif array[i][j] == 'G':
array[i][j] = 3
self.field = array.astype(int)
for i in range(self.field.shape[0]):
for j in range(self.field.shape[1]):
if self.field[i][j] == 3:
# Update Field And Set The Goal Point
self.field[i][j] = 0
self.goal = [i, j]
break
| 48.403941 | 112 | 0.520507 |
from copy import deepcopy
class Block:
def __init__(self, givenboard, mode, config, positionfirstbox, positionsecondbox):
self.board = givenboard
self.board.field[positionfirstbox[0]][positionfirstbox[1]] = 2
if positionsecondbox != []:
self.board.field[positionsecondbox[0]][positionsecondbox[1]] = 2
self.mode = mode
self.config = config
self.positionFirstBox = positionfirstbox
self.positionSecondBox = positionsecondbox
def isgamewon(self):
if self.mode == 0 and self.positionFirstBox == self.board.goal:
return True
else:
return False
def ismovableleft(self):
try:
if self.mode == 0:
if self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] - 1] != 1 \
and self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] - 2] != 1:
return True
else:
return False
elif self.mode == 1:
if self.config == 0:
if self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] - 1] != 1:
return True
else:
return False
if self.config == 1:
if self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] - 1] != 1 \
and self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1] - 1] != 1:
return True
else:
return False
except IndexError:
return False
def ismovableright(self):
try:
if self.mode == 0:
if self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] + 1] != 1 \
and self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] + 2] != 1:
return True
else:
return False
elif self.mode == 1:
if self.config == 0:
if self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1] + 1] != 1:
return True
else:
return False
if self.config == 1:
if self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] + 1] != 1 \
and self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1] + 1] != 1:
return True
else:
return False
except IndexError:
return False
def ismovableup(self):
try:
if self.mode == 0:
if self.board.field[self.positionFirstBox[0] - 1][self.positionFirstBox[1]] != 1 \
and self.board.field[self.positionFirstBox[0] - 2][self.positionFirstBox[1]] != 1:
return True
else:
return False
elif self.mode == 1:
if self.config == 0:
if self.board.field[self.positionFirstBox[0] - 1][self.positionFirstBox[1]] != 1 \
and self.board.field[self.positionSecondBox[0] - 1][self.positionSecondBox[1]] != 1:
return True
else:
return False
elif self.config == 1:
if self.board.field[self.positionFirstBox[0] - 1][self.positionFirstBox[1]] != 1:
return True
else:
return False
except IndexError:
return False
def ismovabledown(self):
try:
if self.mode == 0:
if self.board.field[self.positionFirstBox[0] + 1][self.positionFirstBox[1]] != 1 \
and self.board.field[self.positionFirstBox[0] + 2][self.positionFirstBox[1]] != 1:
return True
else:
return False
elif self.mode == 1:
if self.config == 0:
if self.board.field[self.positionFirstBox[0] + 1][self.positionFirstBox[1]] != 1 \
and self.board.field[self.positionSecondBox[0] + 1][self.positionSecondBox[1]] != 1:
return True
else:
return False
elif self.config == 1:
if self.board.field[self.positionSecondBox[0] + 1][self.positionSecondBox[1]] != 1:
return True
else:
return False
except IndexError:
return False
def getleft(self):
if self.mode == 0:
secondbox = [self.positionFirstBox[0], self.positionFirstBox[1] - 1]
firstbox = [self.positionFirstBox[0], self.positionFirstBox[1] - 2]
return [firstbox, secondbox, 1, 0]
elif self.mode == 1:
if self.config == 0:
firstbox = [self.positionFirstBox[0], self.positionFirstBox[1] - 1]
return [firstbox, [], 0, self.config]
if self.config == 1:
positionSecondBox = [self.positionSecondBox[0], self.positionSecondBox[1] - 1]
positionFirstBox = [self.positionFirstBox[0], self.positionFirstBox[1] - 1]
return [positionFirstBox, positionSecondBox, 1, self.config]
def moveleft(self):
if self.mode == 0:
if self.ismovableleft():
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] - 1] = 2
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] - 2] = 2
self.positionSecondBox = [self.positionFirstBox[0], self.positionFirstBox[1] - 1]
self.positionFirstBox = [self.positionFirstBox[0], self.positionFirstBox[1] - 2]
self.mode = 1
self.config = 0
return True
else:
return False
elif self.mode == 1:
if self.ismovableleft():
if self.config == 0:
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1]] = 0
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] - 1] = 2
self.positionSecondBox = []
self.positionFirstBox = [self.positionFirstBox[0], self.positionFirstBox[1] - 1]
self.mode = 0
return True
if self.config == 1:
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1]] = 0
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] - 1] = 2
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1] - 1] = 2
self.positionSecondBox = [self.positionSecondBox[0], self.positionSecondBox[1] - 1]
self.positionFirstBox = [self.positionFirstBox[0], self.positionFirstBox[1] - 1]
return True
else:
return False
def moveright(self):
if self.mode == 0:
if self.ismovableright():
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] + 1] = 2
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] + 2] = 2
self.positionSecondBox = [self.positionFirstBox[0], self.positionFirstBox[1] + 2]
self.positionFirstBox = [self.positionFirstBox[0], self.positionFirstBox[1] + 1]
self.mode = 1
self.config = 0
return True
else:
return False
elif self.mode == 1:
if self.ismovableright():
if self.config == 0:
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1]] = 0
self.board.field[self.positionFirstBox[0]][self.positionSecondBox[1] + 1] = 2
self.positionFirstBox = [self.positionFirstBox[0], self.positionSecondBox[1] + 1]
self.positionSecondBox = []
self.mode = 0
return True
if self.config == 1:
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1]] = 0
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] + 1] = 2
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1] + 1] = 2
self.positionFirstBox = [self.positionFirstBox[0], self.positionFirstBox[1] + 1]
self.positionSecondBox = [self.positionSecondBox[0], self.positionSecondBox[1] + 1]
return True
else:
return False
def getright(self):
if self.mode == 0:
secondbox = [self.positionFirstBox[0], self.positionFirstBox[1] + 2]
firstbox = [self.positionFirstBox[0], self.positionFirstBox[1] + 1]
return [firstbox, secondbox, 1, 0]
elif self.mode == 1:
if self.config == 0:
firstbox = [self.positionFirstBox[0], self.positionSecondBox[1] + 1]
return [firstbox, [], 0, self.config]
if self.config == 1:
positionFirstBox = [self.positionFirstBox[0], self.positionFirstBox[1] + 1]
positionSecondBox = [self.positionSecondBox[0], self.positionSecondBox[1] + 1]
return [positionFirstBox, positionSecondBox, self.mode, self.config]
def moveup(self):
if self.mode == 0:
if self.ismovableup():
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionFirstBox[0] - 1][self.positionFirstBox[1]] = 2
self.board.field[self.positionFirstBox[0] - 2][self.positionFirstBox[1]] = 2
self.positionSecondBox = [self.positionFirstBox[0] - 1, self.positionFirstBox[1]]
self.positionFirstBox = [self.positionFirstBox[0] - 2, self.positionFirstBox[1]]
self.mode = 1
self.config = 1
return True
else:
return False
elif self.mode == 1:
if self.ismovableup():
if self.config == 0:
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1]] = 0
self.board.field[self.positionFirstBox[0] - 1][self.positionFirstBox[1]] = 2
self.board.field[self.positionSecondBox[0] - 1][self.positionSecondBox[1]] = 2
self.positionSecondBox = [self.positionSecondBox[0] - 1, self.positionSecondBox[1]]
self.positionFirstBox = [self.positionFirstBox[0] - 1, self.positionFirstBox[1]]
return True
elif self.config == 1:
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1]] = 0
self.board.field[self.positionFirstBox[0] - 1][self.positionFirstBox[1]] = 2
self.positionFirstBox = [self.positionFirstBox[0] - 1, self.positionFirstBox[1]]
self.positionSecondBox = []
self.mode = 0
return True
else:
return False
def getup(self):
if self.mode == 0:
secondbox = [self.positionFirstBox[0] - 1, self.positionFirstBox[1]]
firstbox = [self.positionFirstBox[0] - 2, self.positionFirstBox[1]]
return [firstbox, secondbox, 1, 1]
elif self.mode == 1:
if self.config == 0:
positionSecondBox = [self.positionSecondBox[0] - 1, self.positionSecondBox[1]]
positionFirstBox = [self.positionFirstBox[0] - 1, self.positionFirstBox[1]]
return [positionFirstBox, positionSecondBox, self.mode, self.config]
if self.config == 1:
positionFirstBox = [self.positionFirstBox[0] - 1, self.positionFirstBox[1]]
positionSecondBox = []
return [positionFirstBox, positionSecondBox, 0, self.config]
def movedown(self):
if self.mode == 0:
if self.ismovabledown():
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionFirstBox[0] + 1][self.positionFirstBox[1]] = 2
self.board.field[self.positionFirstBox[0] + 2][self.positionFirstBox[1]] = 2
self.positionSecondBox = [self.positionFirstBox[0] + 2, self.positionFirstBox[1]]
self.positionFirstBox = [self.positionFirstBox[0] + 1, self.positionFirstBox[1]]
self.mode = 1
self.config = 1
return True
else:
return False
elif self.mode == 1:
if self.ismovabledown():
if self.config == 0:
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1]] = 0
self.board.field[self.positionFirstBox[0] + 1][self.positionFirstBox[1]] = 2
self.board.field[self.positionSecondBox[0] + 1][self.positionSecondBox[1]] = 2
self.positionSecondBox = [self.positionSecondBox[0] + 1, self.positionSecondBox[1]]
self.positionFirstBox = [self.positionFirstBox[0] + 1, self.positionFirstBox[1]]
return True
elif self.config == 1:
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1]] = 0
self.board.field[self.positionSecondBox[0] + 1][self.positionSecondBox[1]] = 2
self.positionFirstBox = [self.positionSecondBox[0] + 1, self.positionFirstBox[1]]
self.positionSecondBox = []
self.mode = 0
return True
else:
return False
def getdown(self):
if self.mode == 0:
secondbox = [self.positionFirstBox[0] + 2, self.positionFirstBox[1]]
firstbox = [self.positionFirstBox[0] + 1, self.positionFirstBox[1]]
return [firstbox, secondbox, 1, 1]
elif self.mode == 1:
if self.config == 0:
positionSecondBox = [self.positionSecondBox[0] + 1, self.positionSecondBox[1]]
positionFirstBox = [self.positionFirstBox[0] + 1, self.positionFirstBox[1]]
return [positionFirstBox, positionSecondBox, self.mode, self.config]
if self.config == 1:
positionFirstBox = [self.positionSecondBox[0] + 1, self.positionFirstBox[1]]
positionSecondBox = []
return [positionFirstBox, positionSecondBox, 0, self.config]
def printfield(self):
printer = deepcopy(self.board.field).astype(str)
for i in range(self.board.field.shape[0]):
for j in range(self.board.field.shape[1]):
if self.board.field[i][j] == 1:
printer[i][j] = 'X'
elif self.board.field[i][j] == 0:
printer[i][j] = 'O'
elif self.board.field[i][j] == 2:
printer[i][j] = 'S'
elif self.board.field[i][j] == 3:
printer[i][j] = 'G'
print("Current Board: \n", printer,"\n")
class Board:
def __init__(self, array):
for i in range(array.shape[0]):
for j in range(array.shape[1]):
if array[i][j] == 'X':
array[i][j] = 1
elif array[i][j] == 'O':
array[i][j] = 0
elif array[i][j] == 'S':
array[i][j] = 2
elif array[i][j] == 'G':
array[i][j] = 3
self.field = array.astype(int)
for i in range(self.field.shape[0]):
for j in range(self.field.shape[1]):
if self.field[i][j] == 3:
self.field[i][j] = 0
self.goal = [i, j]
break
| true | true |
f715bcdc38ebdc27ec473e15774bc7f195755daa | 5,704 | py | Python | asposewordscloud/models/bookmark_data.py | rizwanniazigroupdocs/aspose-words-cloud-python | b943384a1e3c0710cc84df74119e6edf7356037e | [
"MIT"
] | null | null | null | asposewordscloud/models/bookmark_data.py | rizwanniazigroupdocs/aspose-words-cloud-python | b943384a1e3c0710cc84df74119e6edf7356037e | [
"MIT"
] | null | null | null | asposewordscloud/models/bookmark_data.py | rizwanniazigroupdocs/aspose-words-cloud-python | b943384a1e3c0710cc84df74119e6edf7356037e | [
"MIT"
] | null | null | null | # coding: utf-8
# -----------------------------------------------------------------------------------
# <copyright company="Aspose" file="bookmark_data.py">
# Copyright (c) 2020 Aspose.Words for Cloud
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# -----------------------------------------------------------------------------------
import pprint
import re # noqa: F401
import six
import json
class BookmarkData(object):
"""DTO for bookmark updating.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'text': 'str'
}
attribute_map = {
'name': 'Name',
'text': 'Text'
}
def __init__(self, name=None, text=None): # noqa: E501
"""BookmarkData - a model defined in Swagger""" # noqa: E501
self._name = None
self._text = None
self.discriminator = None
if name is not None:
self.name = name
if text is not None:
self.text = text
@property
def name(self):
"""Gets the name of this BookmarkData. # noqa: E501
Gets or sets the name of the bookmark. # noqa: E501
:return: The name of this BookmarkData. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this BookmarkData.
Gets or sets the name of the bookmark. # noqa: E501
:param name: The name of this BookmarkData. # noqa: E501
:type: str
"""
self._name = name
@property
def text(self):
"""Gets the text of this BookmarkData. # noqa: E501
Gets or sets text, enclosed in the bookmark. # noqa: E501
:return: The text of this BookmarkData. # noqa: E501
:rtype: str
"""
return self._text
@text.setter
def text(self, text):
"""Sets the text of this BookmarkData.
Gets or sets text, enclosed in the bookmark. # noqa: E501
:param text: The text of this BookmarkData. # noqa: E501
:type: str
"""
self._text = text
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[self.attribute_map[attr]] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[self.attribute_map[attr]] = value.to_dict()
elif isinstance(value, dict):
result[self.attribute_map[attr]] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[self.attribute_map[attr]] = value
return json.dumps(result)
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BookmarkData):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other | 32.409091 | 85 | 0.558555 |
import pprint
import re
import six
import json
class BookmarkData(object):
swagger_types = {
'name': 'str',
'text': 'str'
}
attribute_map = {
'name': 'Name',
'text': 'Text'
}
def __init__(self, name=None, text=None):
self._name = None
self._text = None
self.discriminator = None
if name is not None:
self.name = name
if text is not None:
self.text = text
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def text(self):
return self._text
@text.setter
def text(self, text):
self._text = text
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[self.attribute_map[attr]] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[self.attribute_map[attr]] = value.to_dict()
elif isinstance(value, dict):
result[self.attribute_map[attr]] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[self.attribute_map[attr]] = value
return json.dumps(result)
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, BookmarkData):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other | true | true |
f715bcfb6bc96e73744dd0a50b070cfdd7c67ca2 | 1,316 | py | Python | torch_glow/tests/nodes/adaptive_avg_pool2d_test.py | YonginKwon/glow | 7d316d028e1792534416755bf80af422adccdaa9 | [
"Apache-2.0"
] | 2 | 2020-03-23T21:04:00.000Z | 2020-04-02T22:49:49.000Z | torch_glow/tests/nodes/adaptive_avg_pool2d_test.py | YonginKwon/glow | 7d316d028e1792534416755bf80af422adccdaa9 | [
"Apache-2.0"
] | 1 | 2020-01-06T09:14:32.000Z | 2020-01-06T09:14:32.000Z | torch_glow/tests/nodes/adaptive_avg_pool2d_test.py | YonginKwon/glow | 7d316d028e1792534416755bf80af422adccdaa9 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn.functional as F
from tests.utils import jitVsGlow
import unittest
class TestAdaptiveAvgPool2d(unittest.TestCase):
def test_adaptive_avg_pool2d_basic(self):
"""Basic test of PyTorch adaptive_avg_pool2d Node."""
def test_f(inputs):
return F.adaptive_avg_pool2d(inputs, (5, 5))
inputs = torch.randn(3, 6, 14, 14)
jitVsGlow(test_f, inputs, expected_fused_ops={
"aten::adaptive_avg_pool2d"})
def test_adaptive_avg_pool2d_nonsquare_inputs(self):
"""Test of PyTorch adaptive_avg_pool2d Node with non-square inputs."""
def test_f(inputs):
return F.adaptive_avg_pool2d(inputs, (3, 3))
inputs = torch.randn(3, 6, 13, 14)
jitVsGlow(test_f, inputs, expected_fused_ops={
"aten::adaptive_avg_pool2d"})
def test_adaptive_avg_pool2d_nonsquare_outputs(self):
"""Test of PyTorch adaptive_avg_pool2d Node with non-square outputs."""
def test_f(inputs):
return F.adaptive_avg_pool2d(inputs, (5, 3))
inputs = torch.randn(3, 6, 14, 14)
jitVsGlow(test_f, inputs, expected_fused_ops={
"aten::adaptive_avg_pool2d"})
| 30.604651 | 82 | 0.668693 | from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn.functional as F
from tests.utils import jitVsGlow
import unittest
class TestAdaptiveAvgPool2d(unittest.TestCase):
def test_adaptive_avg_pool2d_basic(self):
def test_f(inputs):
return F.adaptive_avg_pool2d(inputs, (5, 5))
inputs = torch.randn(3, 6, 14, 14)
jitVsGlow(test_f, inputs, expected_fused_ops={
"aten::adaptive_avg_pool2d"})
def test_adaptive_avg_pool2d_nonsquare_inputs(self):
def test_f(inputs):
return F.adaptive_avg_pool2d(inputs, (3, 3))
inputs = torch.randn(3, 6, 13, 14)
jitVsGlow(test_f, inputs, expected_fused_ops={
"aten::adaptive_avg_pool2d"})
def test_adaptive_avg_pool2d_nonsquare_outputs(self):
def test_f(inputs):
return F.adaptive_avg_pool2d(inputs, (5, 3))
inputs = torch.randn(3, 6, 14, 14)
jitVsGlow(test_f, inputs, expected_fused_ops={
"aten::adaptive_avg_pool2d"})
| true | true |
f715bdd5034a351c309d1a984393c7e6094f054e | 274 | py | Python | apigw/typo.py | theztd/flaskapp-prom | e1f5137c319175fe8fc1db0ede8eec020cd2f008 | [
"BSD-2-Clause"
] | 2 | 2021-02-27T21:08:00.000Z | 2021-05-12T13:55:38.000Z | apigw/typo.py | theztd/flaskapp-prom | e1f5137c319175fe8fc1db0ede8eec020cd2f008 | [
"BSD-2-Clause"
] | null | null | null | apigw/typo.py | theztd/flaskapp-prom | e1f5137c319175fe8fc1db0ede8eec020cd2f008 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
def ret_string(name: str) -> str:
print(type(name))
return f"Hi {name}"
for n in ["Karel", "Pepa", 18, "Lucie"]:
try:
print(type(n))
print(ret_string(n))
except TypeError as err:
print(n)
print(err)
| 17.125 | 40 | 0.547445 |
def ret_string(name: str) -> str:
print(type(name))
return f"Hi {name}"
for n in ["Karel", "Pepa", 18, "Lucie"]:
try:
print(type(n))
print(ret_string(n))
except TypeError as err:
print(n)
print(err)
| true | true |
f715bdeeabab1bf9416cdf699d275a46c2adb6d6 | 270 | py | Python | wafw00f/plugins/knownsec.py | aqyoung/scan-wafw00f | a95a94253f138d5ef791232ef4d8371de41622b6 | [
"BSD-3-Clause"
] | 1 | 2019-08-01T11:19:55.000Z | 2019-08-01T11:19:55.000Z | wafw00f/plugins/knownsec.py | aqyoung/scan-wafw00f | a95a94253f138d5ef791232ef4d8371de41622b6 | [
"BSD-3-Clause"
] | null | null | null | wafw00f/plugins/knownsec.py | aqyoung/scan-wafw00f | a95a94253f138d5ef791232ef4d8371de41622b6 | [
"BSD-3-Clause"
] | 2 | 2017-12-27T15:56:15.000Z | 2017-12-27T20:03:09.000Z | #!/usr/bin/env python
NAME = 'KS-WAF (KnownSec)'
def is_waf(self):
for attack in self.attacks:
r = attack(self)
if r is None:
return
_, page = r
if b'/ks-waf-error.png' in page:
return True
return False | 18 | 40 | 0.525926 |
NAME = 'KS-WAF (KnownSec)'
def is_waf(self):
for attack in self.attacks:
r = attack(self)
if r is None:
return
_, page = r
if b'/ks-waf-error.png' in page:
return True
return False | true | true |
f715be36cb847900ba0b72075d63650894204e29 | 16,357 | py | Python | google/cloud/bigquery_storage_v1beta2/services/big_query_read/transports/grpc_asyncio.py | googleapis/python-bigquery-storage | acc92249013f1b31fdac2aa4bf5a6864730d7422 | [
"Apache-2.0"
] | 44 | 2020-02-12T21:28:37.000Z | 2022-03-31T06:16:30.000Z | google/cloud/bigquery_storage_v1beta2/services/big_query_read/transports/grpc_asyncio.py | googleapis/python-bigquery-storage | acc92249013f1b31fdac2aa4bf5a6864730d7422 | [
"Apache-2.0"
] | 178 | 2020-02-05T10:49:45.000Z | 2022-03-31T01:48:44.000Z | google/cloud/bigquery_storage_v1beta2/services/big_query_read/transports/grpc_asyncio.py | googleapis/python-bigquery-storage | acc92249013f1b31fdac2aa4bf5a6864730d7422 | [
"Apache-2.0"
] | 23 | 2020-02-05T23:12:15.000Z | 2022-02-24T08:33:14.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.bigquery_storage_v1beta2.types import storage
from google.cloud.bigquery_storage_v1beta2.types import stream
from .base import BigQueryReadTransport, DEFAULT_CLIENT_INFO
from .grpc import BigQueryReadGrpcTransport
class BigQueryReadGrpcAsyncIOTransport(BigQueryReadTransport):
"""gRPC AsyncIO backend transport for BigQueryRead.
BigQuery Read API.
The Read API can be used to read data from BigQuery.
New code should use the v1 Read API going forward, if they don't
use Write API at the same time.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "bigquerystorage.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "bigquerystorage.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def create_read_session(
self,
) -> Callable[[storage.CreateReadSessionRequest], Awaitable[stream.ReadSession]]:
r"""Return a callable for the create read session method over gRPC.
Creates a new read session. A read session divides
the contents of a BigQuery table into one or more
streams, which can then be used to read data from the
table. The read session also specifies properties of the
data to be read, such as a list of columns or a push-
down filter describing the rows to be returned.
A particular row can be read by at most one stream. When
the caller has reached the end of each stream in the
session, then all the data in the table has been read.
Data is assigned to each stream such that roughly the
same number of rows can be read from each stream.
Because the server-side unit for assigning data is
collections of rows, the API does not guarantee that
each stream will return the same number or rows.
Additionally, the limits are enforced based on the
number of pre-filtered rows, so some filters can lead to
lopsided assignments.
Read sessions automatically expire 6 hours after they
are created and do not require manual clean-up by the
caller.
Returns:
Callable[[~.CreateReadSessionRequest],
Awaitable[~.ReadSession]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_read_session" not in self._stubs:
self._stubs["create_read_session"] = self.grpc_channel.unary_unary(
"/google.cloud.bigquery.storage.v1beta2.BigQueryRead/CreateReadSession",
request_serializer=storage.CreateReadSessionRequest.serialize,
response_deserializer=stream.ReadSession.deserialize,
)
return self._stubs["create_read_session"]
@property
def read_rows(
self,
) -> Callable[[storage.ReadRowsRequest], Awaitable[storage.ReadRowsResponse]]:
r"""Return a callable for the read rows method over gRPC.
Reads rows from the stream in the format prescribed
by the ReadSession. Each response contains one or more
table rows, up to a maximum of 100 MiB per response;
read requests which attempt to read individual rows
larger than 100 MiB will fail.
Each request also returns a set of stream statistics
reflecting the current state of the stream.
Returns:
Callable[[~.ReadRowsRequest],
Awaitable[~.ReadRowsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "read_rows" not in self._stubs:
self._stubs["read_rows"] = self.grpc_channel.unary_stream(
"/google.cloud.bigquery.storage.v1beta2.BigQueryRead/ReadRows",
request_serializer=storage.ReadRowsRequest.serialize,
response_deserializer=storage.ReadRowsResponse.deserialize,
)
return self._stubs["read_rows"]
@property
def split_read_stream(
self,
) -> Callable[
[storage.SplitReadStreamRequest], Awaitable[storage.SplitReadStreamResponse]
]:
r"""Return a callable for the split read stream method over gRPC.
Splits a given ``ReadStream`` into two ``ReadStream`` objects.
These ``ReadStream`` objects are referred to as the primary and
the residual streams of the split. The original ``ReadStream``
can still be read from in the same manner as before. Both of the
returned ``ReadStream`` objects can also be read from, and the
rows returned by both child streams will be the same as the rows
read from the original stream.
Moreover, the two child streams will be allocated back-to-back
in the original ``ReadStream``. Concretely, it is guaranteed
that for streams original, primary, and residual, that
original[0-j] = primary[0-j] and original[j-n] = residual[0-m]
once the streams have been read to completion.
Returns:
Callable[[~.SplitReadStreamRequest],
Awaitable[~.SplitReadStreamResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "split_read_stream" not in self._stubs:
self._stubs["split_read_stream"] = self.grpc_channel.unary_unary(
"/google.cloud.bigquery.storage.v1beta2.BigQueryRead/SplitReadStream",
request_serializer=storage.SplitReadStreamRequest.serialize,
response_deserializer=storage.SplitReadStreamResponse.deserialize,
)
return self._stubs["split_read_stream"]
def close(self):
return self.grpc_channel.close()
__all__ = ("BigQueryReadGrpcAsyncIOTransport",)
| 45.310249 | 88 | 0.645656 |
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials
from google.auth.transport.grpc import SslCredentials
import packaging.version
import grpc
from grpc.experimental import aio
from google.cloud.bigquery_storage_v1beta2.types import storage
from google.cloud.bigquery_storage_v1beta2.types import stream
from .base import BigQueryReadTransport, DEFAULT_CLIENT_INFO
from .grpc import BigQueryReadGrpcTransport
class BigQueryReadGrpcAsyncIOTransport(BigQueryReadTransport):
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "bigquerystorage.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "bigquerystorage.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
credentials = False
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
return self._grpc_channel
@property
def create_read_session(
self,
) -> Callable[[storage.CreateReadSessionRequest], Awaitable[stream.ReadSession]]:
if "create_read_session" not in self._stubs:
self._stubs["create_read_session"] = self.grpc_channel.unary_unary(
"/google.cloud.bigquery.storage.v1beta2.BigQueryRead/CreateReadSession",
request_serializer=storage.CreateReadSessionRequest.serialize,
response_deserializer=stream.ReadSession.deserialize,
)
return self._stubs["create_read_session"]
@property
def read_rows(
self,
) -> Callable[[storage.ReadRowsRequest], Awaitable[storage.ReadRowsResponse]]:
if "read_rows" not in self._stubs:
self._stubs["read_rows"] = self.grpc_channel.unary_stream(
"/google.cloud.bigquery.storage.v1beta2.BigQueryRead/ReadRows",
request_serializer=storage.ReadRowsRequest.serialize,
response_deserializer=storage.ReadRowsResponse.deserialize,
)
return self._stubs["read_rows"]
@property
def split_read_stream(
self,
) -> Callable[
[storage.SplitReadStreamRequest], Awaitable[storage.SplitReadStreamResponse]
]:
if "split_read_stream" not in self._stubs:
self._stubs["split_read_stream"] = self.grpc_channel.unary_unary(
"/google.cloud.bigquery.storage.v1beta2.BigQueryRead/SplitReadStream",
request_serializer=storage.SplitReadStreamRequest.serialize,
response_deserializer=storage.SplitReadStreamResponse.deserialize,
)
return self._stubs["split_read_stream"]
def close(self):
return self.grpc_channel.close()
__all__ = ("BigQueryReadGrpcAsyncIOTransport",)
| true | true |
f715bec4b32bc353255f534021aae397a4a5e309 | 5,398 | py | Python | repair/evaluate.py | h4iku/repairSStuBs | 0caa6269801d13f4743e6b2c8d34c01057f3b4b7 | [
"MIT"
] | 2 | 2021-07-16T04:30:10.000Z | 2022-01-05T01:33:42.000Z | repair/evaluate.py | h4iku/repairSStuBs | 0caa6269801d13f4743e6b2c8d34c01057f3b4b7 | [
"MIT"
] | 1 | 2021-06-17T06:57:49.000Z | 2021-06-18T00:13:14.000Z | repair/evaluate.py | h4iku/repairSStuBs | 0caa6269801d13f4743e6b2c8d34c01057f3b4b7 | [
"MIT"
] | 1 | 2021-05-27T05:50:17.000Z | 2021-05-27T05:50:17.000Z | import csv
import difflib
import shutil
from collections import defaultdict
from statistics import mean
from pytablewriter import MarkdownTableWriter
from tqdm import tqdm
from utils.config import CORRECT_PATCHES, INPUT, REPAIR_OUTPUT, REPAIR_RESULT
class Result:
def __init__(self, buggy_file_line_dir, comparison_result,
fixed_file_line_dir, file_name, project_name, bug_type):
self.buggy_file_line_dir = buggy_file_line_dir
self.comparison_result = eval(comparison_result)
self.fixed_file_line_dir = fixed_file_line_dir
self.file_name = file_name
self.project_name = project_name
self.bug_type = bug_type
self.buggy_file = INPUT / self.buggy_file_line_dir / self.file_name
self.fixed_file = INPUT / self.fixed_file_line_dir / self.file_name
if self.fix_patch_number():
self.genfixed_file = (REPAIR_OUTPUT / self.buggy_file_line_dir /
str(self.fix_patch_number()) / self.file_name)
else:
self.genfixed_file = None
def __eq__(self, other):
return (self.buggy_file_line_dir == other.buggy_file_line_dir
and self.bug_type == other.bug_type)
def fix_patch_number(self):
if True in self.comparison_result:
return self.comparison_result.index(True) + 1
else:
return None
def copy_files(self):
"""Copies result files to a designated directory"""
# Create the destination directory
copy_path = (CORRECT_PATCHES / self.buggy_file_line_dir)
copy_path.mkdir(parents=True, exist_ok=True)
# Copy the buggy file
shutil.copyfile(self.buggy_file, copy_path / 'BuggyFile.java')
# Copy the actual fixed file
shutil.copyfile(self.fixed_file, copy_path / 'FixedFile.java')
# Copy the correctly generated fixed file
shutil.copyfile(self.genfixed_file, copy_path /
'GeneratedFixFile.java')
def generate_diffs(self):
save_path = (CORRECT_PATCHES / self.buggy_file_line_dir)
save_path.mkdir(parents=True, exist_ok=True)
# Diff between buggy file and actual fixed file
with open(self.buggy_file) as buggy_file:
with open(self.fixed_file) as fixed_file:
bugfix_diff = difflib.unified_diff(
buggy_file.readlines(),
fixed_file.readlines(),
fromfile='BuggyFile.java', tofile='FixedFile.java'
)
with open(save_path / 'bugfix.diff', 'w') as bugfix_file:
bugfix_file.writelines(bugfix_diff)
# Diff between buggy file and the generated fixed file
with open(self.buggy_file) as buggy_file:
with open(self.genfixed_file) as genfixed_file:
genfix_diff = difflib.unified_diff(
buggy_file.readlines(),
genfixed_file.readlines(),
fromfile='BuggyFile.java', tofile='GeneratedFixFile.java'
)
with open(save_path / 'genfix.diff', 'w') as genfix_file:
genfix_file.writelines(genfix_diff)
def main():
with open(REPAIR_RESULT, newline='') as file:
reader = csv.reader(file)
all_results = [Result(*line) for line in reader]
# Removing duplicates
results = []
for result in all_results:
if result not in results:
results.append(result)
# Copying results and generating diffs
for res in tqdm(results):
if res.fix_patch_number():
res.copy_files()
res.generate_diffs()
# Evaluating
total_gen_patches = [res.comparison_result for res in results]
num_total_gen_patches = [len(x) for x in total_gen_patches]
print(f'Total generated patches: {sum(num_total_gen_patches)}')
print(f'min: {min(num_total_gen_patches)}, '
f'max: {max(num_total_gen_patches)}, '
f'avg: {mean(num_total_gen_patches)}')
num_fixes = [1 for x in total_gen_patches if any(x)]
print(f'Total bugs: {len(results)}', f'Fixed: {sum(num_fixes)}')
patterns = defaultdict(lambda: [0, 0, []])
for res in results:
gen_patches = res.comparison_result
patterns[res.bug_type][-1].append(len(gen_patches))
if any(gen_patches):
patterns[res.bug_type][0] += 1
patterns[res.bug_type][1] += 1
else:
patterns[res.bug_type][0] += 1
print('Number of min, max, avg generated patches:')
print([(ptn, min(vals[-1]), max(vals[-1]), mean(vals[-1]))
for ptn, vals in patterns.items()])
# Sort by the number of bugs
patterns_list = sorted(patterns.items(),
key=lambda x: x[1][0], reverse=True)
value_matrix = [
[ptn] + vals[:-1] + [f'{(vals[1] / vals[0]) * 100:.2f}%']
for ptn, vals in patterns_list
]
value_matrix.append(
['Total', sstubs := len(results),
corrects := sum(num_fixes),
f'{(corrects / sstubs) * 100:.2f}%']
)
# Configuring the Markdown table
writer = MarkdownTableWriter(
table_name="repair_results",
headers=["Pattern Name", "SStuBs", "Correct Patches", "Ratio"],
value_matrix=value_matrix,
)
writer.write_table()
if __name__ == '__main__':
main()
| 35.051948 | 80 | 0.626343 | import csv
import difflib
import shutil
from collections import defaultdict
from statistics import mean
from pytablewriter import MarkdownTableWriter
from tqdm import tqdm
from utils.config import CORRECT_PATCHES, INPUT, REPAIR_OUTPUT, REPAIR_RESULT
class Result:
def __init__(self, buggy_file_line_dir, comparison_result,
fixed_file_line_dir, file_name, project_name, bug_type):
self.buggy_file_line_dir = buggy_file_line_dir
self.comparison_result = eval(comparison_result)
self.fixed_file_line_dir = fixed_file_line_dir
self.file_name = file_name
self.project_name = project_name
self.bug_type = bug_type
self.buggy_file = INPUT / self.buggy_file_line_dir / self.file_name
self.fixed_file = INPUT / self.fixed_file_line_dir / self.file_name
if self.fix_patch_number():
self.genfixed_file = (REPAIR_OUTPUT / self.buggy_file_line_dir /
str(self.fix_patch_number()) / self.file_name)
else:
self.genfixed_file = None
def __eq__(self, other):
return (self.buggy_file_line_dir == other.buggy_file_line_dir
and self.bug_type == other.bug_type)
def fix_patch_number(self):
if True in self.comparison_result:
return self.comparison_result.index(True) + 1
else:
return None
def copy_files(self):
copy_path = (CORRECT_PATCHES / self.buggy_file_line_dir)
copy_path.mkdir(parents=True, exist_ok=True)
shutil.copyfile(self.buggy_file, copy_path / 'BuggyFile.java')
shutil.copyfile(self.fixed_file, copy_path / 'FixedFile.java')
shutil.copyfile(self.genfixed_file, copy_path /
'GeneratedFixFile.java')
def generate_diffs(self):
save_path = (CORRECT_PATCHES / self.buggy_file_line_dir)
save_path.mkdir(parents=True, exist_ok=True)
with open(self.buggy_file) as buggy_file:
with open(self.fixed_file) as fixed_file:
bugfix_diff = difflib.unified_diff(
buggy_file.readlines(),
fixed_file.readlines(),
fromfile='BuggyFile.java', tofile='FixedFile.java'
)
with open(save_path / 'bugfix.diff', 'w') as bugfix_file:
bugfix_file.writelines(bugfix_diff)
with open(self.buggy_file) as buggy_file:
with open(self.genfixed_file) as genfixed_file:
genfix_diff = difflib.unified_diff(
buggy_file.readlines(),
genfixed_file.readlines(),
fromfile='BuggyFile.java', tofile='GeneratedFixFile.java'
)
with open(save_path / 'genfix.diff', 'w') as genfix_file:
genfix_file.writelines(genfix_diff)
def main():
with open(REPAIR_RESULT, newline='') as file:
reader = csv.reader(file)
all_results = [Result(*line) for line in reader]
results = []
for result in all_results:
if result not in results:
results.append(result)
for res in tqdm(results):
if res.fix_patch_number():
res.copy_files()
res.generate_diffs()
total_gen_patches = [res.comparison_result for res in results]
num_total_gen_patches = [len(x) for x in total_gen_patches]
print(f'Total generated patches: {sum(num_total_gen_patches)}')
print(f'min: {min(num_total_gen_patches)}, '
f'max: {max(num_total_gen_patches)}, '
f'avg: {mean(num_total_gen_patches)}')
num_fixes = [1 for x in total_gen_patches if any(x)]
print(f'Total bugs: {len(results)}', f'Fixed: {sum(num_fixes)}')
patterns = defaultdict(lambda: [0, 0, []])
for res in results:
gen_patches = res.comparison_result
patterns[res.bug_type][-1].append(len(gen_patches))
if any(gen_patches):
patterns[res.bug_type][0] += 1
patterns[res.bug_type][1] += 1
else:
patterns[res.bug_type][0] += 1
print('Number of min, max, avg generated patches:')
print([(ptn, min(vals[-1]), max(vals[-1]), mean(vals[-1]))
for ptn, vals in patterns.items()])
patterns_list = sorted(patterns.items(),
key=lambda x: x[1][0], reverse=True)
value_matrix = [
[ptn] + vals[:-1] + [f'{(vals[1] / vals[0]) * 100:.2f}%']
for ptn, vals in patterns_list
]
value_matrix.append(
['Total', sstubs := len(results),
corrects := sum(num_fixes),
f'{(corrects / sstubs) * 100:.2f}%']
)
writer = MarkdownTableWriter(
table_name="repair_results",
headers=["Pattern Name", "SStuBs", "Correct Patches", "Ratio"],
value_matrix=value_matrix,
)
writer.write_table()
if __name__ == '__main__':
main()
| true | true |
f715bf4feddced17be81d083c4130de44ac9c701 | 1,692 | py | Python | multiple-images/images/migrations/0001_initial.py | mp5maker/django | a2d38e2e9973e755afce1bd0ccb17e58f3db7e33 | [
"MIT"
] | null | null | null | multiple-images/images/migrations/0001_initial.py | mp5maker/django | a2d38e2e9973e755afce1bd0ccb17e58f3db7e33 | [
"MIT"
] | 13 | 2020-02-12T00:14:20.000Z | 2022-02-10T08:46:42.000Z | multiple-images/images/migrations/0001_initial.py | mp5maker/django | a2d38e2e9973e755afce1bd0ccb17e58f3db7e33 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.2 on 2019-06-13 17:37
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Description',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('slug', models.SlugField(blank=True)),
('created', models.DateTimeField(blank=True)),
('updated', models.DateTimeField(blank=True)),
('description', models.TextField(blank=True)),
],
options={
'ordering': ('created',),
'abstract': False,
},
),
migrations.CreateModel(
name='Images',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('slug', models.SlugField(blank=True)),
('created', models.DateTimeField(blank=True)),
('updated', models.DateTimeField(blank=True)),
('image', models.ImageField(blank=True, upload_to='images/%y/%m/%d')),
('description', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='images', to='images.Description')),
],
options={
'ordering': ('created',),
'abstract': False,
},
),
]
| 36 | 144 | 0.536643 |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Description',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('slug', models.SlugField(blank=True)),
('created', models.DateTimeField(blank=True)),
('updated', models.DateTimeField(blank=True)),
('description', models.TextField(blank=True)),
],
options={
'ordering': ('created',),
'abstract': False,
},
),
migrations.CreateModel(
name='Images',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('slug', models.SlugField(blank=True)),
('created', models.DateTimeField(blank=True)),
('updated', models.DateTimeField(blank=True)),
('image', models.ImageField(blank=True, upload_to='images/%y/%m/%d')),
('description', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='images', to='images.Description')),
],
options={
'ordering': ('created',),
'abstract': False,
},
),
]
| true | true |
f715bf76749dbe6664dac3361f0fd6ab0369fb12 | 1,123 | py | Python | websauna/tests/core/test_views.py | stevepiercy/websauna | 2886b86f7920d75900c634958779d61aa73f011b | [
"CNRI-Python"
] | 286 | 2016-01-17T05:44:02.000Z | 2022-02-07T20:28:49.000Z | websauna/tests/core/test_views.py | stevepiercy/websauna | 2886b86f7920d75900c634958779d61aa73f011b | [
"CNRI-Python"
] | 203 | 2016-03-15T02:00:53.000Z | 2021-09-27T10:48:49.000Z | websauna/tests/core/test_views.py | ooduor/websauna | 2e78cd87eda305fbbb1080d386b8cf96537360e5 | [
"CNRI-Python"
] | 71 | 2016-01-17T11:04:26.000Z | 2021-08-24T08:04:31.000Z | # Standard Library
import os
import pytest
# Websauna
from websauna.system import Initializer
from websauna.system.core.route import add_template_only_view
from websauna.tests.fixtures import get_app
from websauna.tests.webserver import customized_web_server
HERE = os.path.abspath(os.path.dirname(__file__))
def extra_init(init: Initializer):
"""Configure one templated only view."""
config = init.config
config.add_jinja2_search_path(HERE + "/templates", name=".html")
add_template_only_view(config, "/dummy", "dummy", "dummy.html")
@pytest.fixture(scope="module")
def app(request, ini_settings):
"""Construct a WSGI app with tutorial models and admins loaded."""
app = get_app(ini_settings, extra_init=extra_init)
return app
@pytest.fixture(scope="module")
def web_server(request, app):
"""Run a web server
with tutorial installed."""
web_server = customized_web_server(request, app)
return web_server()
def test_template_only_view(browser, web_server):
"""See that we can register and render a template only view."""
browser.visit(web_server + "/dummy")
| 26.738095 | 70 | 0.743544 |
import os
import pytest
from websauna.system import Initializer
from websauna.system.core.route import add_template_only_view
from websauna.tests.fixtures import get_app
from websauna.tests.webserver import customized_web_server
HERE = os.path.abspath(os.path.dirname(__file__))
def extra_init(init: Initializer):
config = init.config
config.add_jinja2_search_path(HERE + "/templates", name=".html")
add_template_only_view(config, "/dummy", "dummy", "dummy.html")
@pytest.fixture(scope="module")
def app(request, ini_settings):
app = get_app(ini_settings, extra_init=extra_init)
return app
@pytest.fixture(scope="module")
def web_server(request, app):
web_server = customized_web_server(request, app)
return web_server()
def test_template_only_view(browser, web_server):
browser.visit(web_server + "/dummy")
| true | true |
f715c03152794cffe7e9e530cbda79e5552a407d | 591 | bzl | Python | Examples/ReactNativeKakaoExample/android/app/build_defs.bzl | namdq97/react-native-kakao-login | 603d4f75c912ecdefcfbc2bb7ace02b530a06083 | [
"MIT"
] | 46 | 2017-05-14T13:01:24.000Z | 2022-01-19T00:35:23.000Z | Examples/ReactNativeKakaoExample/android/app/build_defs.bzl | namdq97/react-native-kakao-login | 603d4f75c912ecdefcfbc2bb7ace02b530a06083 | [
"MIT"
] | 12 | 2018-01-12T08:00:27.000Z | 2019-08-11T03:07:47.000Z | Examples/ReactNativeKakaoExample/android/app/build_defs.bzl | namdq97/react-native-kakao-login | 603d4f75c912ecdefcfbc2bb7ace02b530a06083 | [
"MIT"
] | 16 | 2017-05-14T13:29:53.000Z | 2020-11-26T04:01:46.000Z | """Helper definitions to glob .aar and .jar targets"""
def create_aar_targets(aarfiles):
for aarfile in aarfiles:
name = "aars__" + aarfile[aarfile.rindex("/") + 1:aarfile.rindex(".aar")]
lib_deps.append(":" + name)
android_prebuilt_aar(
name = name,
aar = aarfile,
)
def create_jar_targets(jarfiles):
for jarfile in jarfiles:
name = "jars__" + jarfile[jarfile.rindex("/") + 1:jarfile.rindex(".jar")]
lib_deps.append(":" + name)
prebuilt_jar(
name = name,
binary_jar = jarfile,
) | 34.764706 | 81 | 0.57868 | def create_aar_targets(aarfiles):
for aarfile in aarfiles:
name = "aars__" + aarfile[aarfile.rindex("/") + 1:aarfile.rindex(".aar")]
lib_deps.append(":" + name)
android_prebuilt_aar(
name = name,
aar = aarfile,
)
def create_jar_targets(jarfiles):
for jarfile in jarfiles:
name = "jars__" + jarfile[jarfile.rindex("/") + 1:jarfile.rindex(".jar")]
lib_deps.append(":" + name)
prebuilt_jar(
name = name,
binary_jar = jarfile,
) | true | true |
f715c0b803bbc30d25555211d20acb2cc3914485 | 21 | py | Python | prm/__init__.py | fz420/prm | 19d8b27a679d6f9e669e019a563c3433025ba0c6 | [
"MIT"
] | null | null | null | prm/__init__.py | fz420/prm | 19d8b27a679d6f9e669e019a563c3433025ba0c6 | [
"MIT"
] | 3 | 2021-03-05T06:42:18.000Z | 2021-04-30T03:34:30.000Z | prm/__init__.py | fz420/prm | 19d8b27a679d6f9e669e019a563c3433025ba0c6 | [
"MIT"
] | 2 | 2021-04-23T03:19:57.000Z | 2021-04-23T03:49:55.000Z | from .prm import main | 21 | 21 | 0.809524 | from .prm import main | true | true |
f715c0cdf7d479cb571c7245fd89a407280d6b17 | 5,913 | py | Python | src/visualisation/arrow.py | sdat2/seager19 | 9c3acbc5332da787de1eda2600a82490ff20fa11 | [
"MIT"
] | 5 | 2021-04-08T19:03:52.000Z | 2021-12-17T14:22:49.000Z | src/visualisation/arrow.py | sdat2/seager19 | 9c3acbc5332da787de1eda2600a82490ff20fa11 | [
"MIT"
] | 25 | 2021-04-08T13:53:11.000Z | 2022-03-17T19:45:15.000Z | src/visualisation/arrow.py | sdat2/seager19 | 9c3acbc5332da787de1eda2600a82490ff20fa11 | [
"MIT"
] | null | null | null | """Arrow plots for mechanism."""
import os
from src.plot_utils import ps_defaults
from src.constants import FIGURE_PATH
from typing import Optional
import matplotlib.pyplot as plt
def plot_arrow_plot(save_path: Optional[str] = None, show_plots: bool = False) -> None:
"""
Plot the arrow plot to show that I have reproduced the paper.
Args:
save_path (Optional[str], optional): Where to save the plot to.
Defaults to None. If None will not save.
show_plots (bool, optional): Whether to show plots. Defaults to False.
"""
ps_defaults(use_tex=False)
color_d = {
"EEEE": "blue",
"EECE": "green",
"EEEC": "orange",
"EECC": "red",
}
def plot_error(x: float, y: float, yerr: float, mem: str) -> None:
plt.fill_between(
[x - 0.2, x + 0.2],
[y + yerr, y + yerr],
[y - yerr, y - yerr],
color=color_d[mem],
alpha=0.5,
)
plt.plot([x - 0.2, x + 0.2], [y, y], "black", linewidth=1)
xlim = [0.5, 3.5]
head_length = 0.02
decrease_arrow = 0.01
ax = plt.axes()
ecmwf = 0.411
# ax.arrow(0, 0, 0, 1, head_width=0.02, head_length=0.02, fc='k', ec='k')
ax.arrow(
1,
ecmwf,
0,
0.054 - head_length - decrease_arrow,
head_width=0.02,
head_length=head_length,
fc="k",
ec="k",
)
plot_error(1, ecmwf + 0.054, 0.005, "EECE")
ax.arrow(
2,
ecmwf,
0,
0.31 - head_length - decrease_arrow,
head_width=0.02,
head_length=head_length,
fc="k",
ec="k",
)
plot_error(2, ecmwf + 0.31, 0.03, "EEEC")
ax.arrow(
3,
ecmwf,
0,
0.47 - head_length - decrease_arrow,
head_width=0.02,
head_length=head_length,
fc="k",
ec="k",
)
plot_error(3, ecmwf + 0.47, 0.04, "EECC")
plt.plot(xlim, [ecmwf, ecmwf], color="blue", label="ECMWF/ORAS4 $= 0.411$ K ")
plt.plot(
xlim, [ecmwf + 0.478, ecmwf + 0.478], color="red", label="CMIP5 MMM $= 0.889$ K"
)
# plt.xticks([0, 1, 2, 3], ["ECMWF", "W", "RH", "RH+W"])
plt.xticks(
[1, 2, 3],
[
"W\n" + r"$+ 0.054 \pm 0.005$ K ",
"RH\n " + r"$+ 0.31 \pm 0.03$ K",
"RH+W\n " + r"$+ 0.47 \pm 0.04$ K",
],
)
plt.xlim(xlim)
plt.ylabel("1958-2017, Trend in nino3.4 [K]")
plt.legend(
bbox_to_anchor=(0.0, 1.02, 1, 0.102),
loc="lower left",
mode="expand",
ncol=2,
)
plt.tight_layout()
if save_path is not None:
plt.savefig(save_path)
if show_plots:
plt.show()
else:
plt.clf()
def plot_arrow_plot_6(
save_path: Optional[str] = None, show_plots: bool = False
) -> None:
"""
Plot the arrow plot to show how it performs in cmip6.
Args:
save_path (Optional[str], optional): Where to save the plot to.
Defaults to None. If None will not save.
show_plots (bool, optional): Whether to show plots. Defaults to False.
"""
ps_defaults(use_tex=False)
color_d = {
"EEEE": "blue",
"EECE": "green",
"EEEC": "orange",
"EECC": "red",
}
def plot_error(x: float, y: float, yerr: float, mem: str) -> None:
plt.fill_between(
[x - 0.2, x + 0.2],
[y + yerr, y + yerr],
[y - yerr, y - yerr],
color=color_d[mem],
alpha=0.5,
)
plt.plot([x - 0.2, x + 0.2], [y, y], "black", linewidth=1)
xlim = [0.5, 3.5]
head_length = 0.02
decrease_arrow = 0.01
ax = plt.axes()
ecmwf = 0.411
# ax.arrow(0, 0, 0, 1, head_width=0.02, head_length=0.02, fc='k', ec='k')
wind = 0.07
wind_error = 0.01
rh = 0.15
rh_error = 0.02
cmip6 = 0.772
rh_and_wind = 0.29
rh_and_wind_error = 0.04
ax.arrow(
1,
ecmwf,
0,
wind - head_length - decrease_arrow,
head_width=0.02,
head_length=head_length,
fc="k",
ec="k",
)
plot_error(1, ecmwf + wind, wind_error, "EECE")
ax.arrow(
2,
ecmwf,
0,
rh - head_length - decrease_arrow,
head_width=0.02,
head_length=head_length,
fc="k",
ec="k",
)
plot_error(2, ecmwf + rh, rh_error, "EEEC")
ax.arrow(
3,
ecmwf,
0,
rh_and_wind - head_length - decrease_arrow,
head_width=0.02,
head_length=head_length,
fc="k",
ec="k",
)
plot_error(3, ecmwf + rh_and_wind, rh_and_wind_error, "EECC")
plt.plot(xlim, [ecmwf, ecmwf], color="blue", label="ECMWF/ORAS4 $= 0.411$ K ")
plt.plot(
xlim,
[cmip6, cmip6],
color="red",
label="CMIP6 MMM $= 0.772$ K",
)
# plt.xticks([0, 1, 2, 3], ["ECMWF", "W", "RH", "RH+W"])
plt.xticks(
[1, 2, 3],
[
"W\n"
+ r"$+ $"
+ str(wind)
+ r" $\pm$ "
+ r"$"
+ str(wind_error)
+ r"$"
+ " K ",
"RH\n " + r"$+ $ $0.15$ $\pm$ $0.02$ K",
"RH+W\n " + r"$+ $ $0.29$ $\pm$ $0.04$ K",
],
)
plt.xlim(xlim)
plt.ylabel("1958-2017, Trend in nino3.4 [K]")
plt.legend(
bbox_to_anchor=(0.0, 1.02, 1, 0.102),
loc="lower left",
mode="expand",
ncol=2,
)
plt.tight_layout()
if save_path is not None:
plt.savefig(save_path)
if show_plots:
plt.show()
else:
plt.clf()
if __name__ == "__main__":
# python src/visualisation.arrow()
plot_arrow_plot_6(save_path=os.path.join(FIGURE_PATH, "mech_arrow_cmip6.pdf"))
plot_arrow_plot_6(save_path=os.path.join(FIGURE_PATH, "mech_arrow_cmip6.png"))
| 25.161702 | 88 | 0.498393 | import os
from src.plot_utils import ps_defaults
from src.constants import FIGURE_PATH
from typing import Optional
import matplotlib.pyplot as plt
def plot_arrow_plot(save_path: Optional[str] = None, show_plots: bool = False) -> None:
ps_defaults(use_tex=False)
color_d = {
"EEEE": "blue",
"EECE": "green",
"EEEC": "orange",
"EECC": "red",
}
def plot_error(x: float, y: float, yerr: float, mem: str) -> None:
plt.fill_between(
[x - 0.2, x + 0.2],
[y + yerr, y + yerr],
[y - yerr, y - yerr],
color=color_d[mem],
alpha=0.5,
)
plt.plot([x - 0.2, x + 0.2], [y, y], "black", linewidth=1)
xlim = [0.5, 3.5]
head_length = 0.02
decrease_arrow = 0.01
ax = plt.axes()
ecmwf = 0.411
ax.arrow(
1,
ecmwf,
0,
0.054 - head_length - decrease_arrow,
head_width=0.02,
head_length=head_length,
fc="k",
ec="k",
)
plot_error(1, ecmwf + 0.054, 0.005, "EECE")
ax.arrow(
2,
ecmwf,
0,
0.31 - head_length - decrease_arrow,
head_width=0.02,
head_length=head_length,
fc="k",
ec="k",
)
plot_error(2, ecmwf + 0.31, 0.03, "EEEC")
ax.arrow(
3,
ecmwf,
0,
0.47 - head_length - decrease_arrow,
head_width=0.02,
head_length=head_length,
fc="k",
ec="k",
)
plot_error(3, ecmwf + 0.47, 0.04, "EECC")
plt.plot(xlim, [ecmwf, ecmwf], color="blue", label="ECMWF/ORAS4 $= 0.411$ K ")
plt.plot(
xlim, [ecmwf + 0.478, ecmwf + 0.478], color="red", label="CMIP5 MMM $= 0.889$ K"
)
plt.xticks(
[1, 2, 3],
[
"W\n" + r"$+ 0.054 \pm 0.005$ K ",
"RH\n " + r"$+ 0.31 \pm 0.03$ K",
"RH+W\n " + r"$+ 0.47 \pm 0.04$ K",
],
)
plt.xlim(xlim)
plt.ylabel("1958-2017, Trend in nino3.4 [K]")
plt.legend(
bbox_to_anchor=(0.0, 1.02, 1, 0.102),
loc="lower left",
mode="expand",
ncol=2,
)
plt.tight_layout()
if save_path is not None:
plt.savefig(save_path)
if show_plots:
plt.show()
else:
plt.clf()
def plot_arrow_plot_6(
save_path: Optional[str] = None, show_plots: bool = False
) -> None:
ps_defaults(use_tex=False)
color_d = {
"EEEE": "blue",
"EECE": "green",
"EEEC": "orange",
"EECC": "red",
}
def plot_error(x: float, y: float, yerr: float, mem: str) -> None:
plt.fill_between(
[x - 0.2, x + 0.2],
[y + yerr, y + yerr],
[y - yerr, y - yerr],
color=color_d[mem],
alpha=0.5,
)
plt.plot([x - 0.2, x + 0.2], [y, y], "black", linewidth=1)
xlim = [0.5, 3.5]
head_length = 0.02
decrease_arrow = 0.01
ax = plt.axes()
ecmwf = 0.411
wind = 0.07
wind_error = 0.01
rh = 0.15
rh_error = 0.02
cmip6 = 0.772
rh_and_wind = 0.29
rh_and_wind_error = 0.04
ax.arrow(
1,
ecmwf,
0,
wind - head_length - decrease_arrow,
head_width=0.02,
head_length=head_length,
fc="k",
ec="k",
)
plot_error(1, ecmwf + wind, wind_error, "EECE")
ax.arrow(
2,
ecmwf,
0,
rh - head_length - decrease_arrow,
head_width=0.02,
head_length=head_length,
fc="k",
ec="k",
)
plot_error(2, ecmwf + rh, rh_error, "EEEC")
ax.arrow(
3,
ecmwf,
0,
rh_and_wind - head_length - decrease_arrow,
head_width=0.02,
head_length=head_length,
fc="k",
ec="k",
)
plot_error(3, ecmwf + rh_and_wind, rh_and_wind_error, "EECC")
plt.plot(xlim, [ecmwf, ecmwf], color="blue", label="ECMWF/ORAS4 $= 0.411$ K ")
plt.plot(
xlim,
[cmip6, cmip6],
color="red",
label="CMIP6 MMM $= 0.772$ K",
)
plt.xticks(
[1, 2, 3],
[
"W\n"
+ r"$+ $"
+ str(wind)
+ r" $\pm$ "
+ r"$"
+ str(wind_error)
+ r"$"
+ " K ",
"RH\n " + r"$+ $ $0.15$ $\pm$ $0.02$ K",
"RH+W\n " + r"$+ $ $0.29$ $\pm$ $0.04$ K",
],
)
plt.xlim(xlim)
plt.ylabel("1958-2017, Trend in nino3.4 [K]")
plt.legend(
bbox_to_anchor=(0.0, 1.02, 1, 0.102),
loc="lower left",
mode="expand",
ncol=2,
)
plt.tight_layout()
if save_path is not None:
plt.savefig(save_path)
if show_plots:
plt.show()
else:
plt.clf()
if __name__ == "__main__":
plot_arrow_plot_6(save_path=os.path.join(FIGURE_PATH, "mech_arrow_cmip6.pdf"))
plot_arrow_plot_6(save_path=os.path.join(FIGURE_PATH, "mech_arrow_cmip6.png"))
| true | true |
f715c25e8a6baf9dd30e5de343c4575f046db6a9 | 11,450 | py | Python | elite/route.py | mEDI-S/mEDI_s-Elite-Tools | c6927c79358a3781bdf9da0db82c8c7d46f70dc6 | [
"BSD-3-Clause"
] | 15 | 2015-08-30T01:53:10.000Z | 2021-02-19T21:35:07.000Z | elite/route.py | mEDI-S/mEDI_s-Elite-Tools | c6927c79358a3781bdf9da0db82c8c7d46f70dc6 | [
"BSD-3-Clause"
] | 2 | 2018-02-21T22:13:37.000Z | 2021-03-06T16:48:26.000Z | elite/route.py | mEDI-S/mEDI_s-Elite-Tools | c6927c79358a3781bdf9da0db82c8c7d46f70dc6 | [
"BSD-3-Clause"
] | 7 | 2015-11-22T15:25:07.000Z | 2020-05-23T01:29:40.000Z | # -*- coding: UTF8
'''
Created on 13.07.2015
@author: mEDI
'''
from elite.system import system as elitesystem
#from elite.rares import rares as eliterares
# from elite.route import route as eliteroute
class route(object):
'''
classdocs
'''
#__slots__ = ["bla"]
#bla =1
maxHops = None
maxJumpDistance = None
maxDeep = None
# die kürzeste route is nicht die beste wegen den optimalen preisen bei > 150ly
systemID = None
_before = None
# _initSystem = None # startsystem
initSystem = None # startsystem
possibleSystems = []
_raresInSystem = None
_availableSystemList = None
_sellDone = None
starDist = None
deep = 1
_hopsFromBefore = None
_dist = None # distance to before system
mydb = None
rares = None
system = None
def __init__(self, mydb, before=None, maxDeep=None, maxJumpDistance=None, maxHops=None):
# super(route, self).__init__()
self.mydb = mydb
self.possibleSystems = []
self._before = before
if before:
self.initSystem = before.initSystem
self.system = self.initSystem.system
self.maxHops = self.initSystem.maxHops
self.maxDeep = self.initSystem.maxDeep
self.maxJumpDistance = self.initSystem.maxJumpDistance
# self.rares = before.rares
else:
self.system = elitesystem(self.mydb)
self.maxDeep = maxDeep
self.maxHops = maxHops
self.maxJumpDistance = maxJumpDistance
# self.rares = eliterares(con)
def addPossibleSystems(self, systemID, dist, startdist, systemList):
# def addPossibleSystems(self, system, dist, rarelist):
newroute = route(self.mydb, self)
newroute._availableSystemList = systemList
newroute._dist = dist
newroute.systemID = systemID
newroute.starDist = startdist
newroute.deep = self.deep + 1
newroute._hopsFromBefore = int(round((dist / self.maxJumpDistance) + 0.5))
# new = {"System":system, "dist":dist, "rareslist":rarelist, "nextroute":route(self.con)}
self.possibleSystems.append(newroute)
def setMaxHops(self, hops):
self.maxHops = hops
def setmaxJumpDistance(self, dist):
self.maxJumpDistance = dist
def calcRoutingDeep(self):
MaxDeep = self.deep
for nextsystem in self.possibleSystems:
nMaxDeep = nextsystem.calcRoutingDeep()
if nMaxDeep > MaxDeep:
MaxDeep = nMaxDeep
return MaxDeep
def getLongRouting(self, maxdeep, dist, totalStartDist, totalHops, systems=[]):
systems.append(self.systemID)
for nextsystem in self.possibleSystems:
if nextsystem.deep >= maxdeep:
print("system:%s -> %s deep: %d dist:%d totalStarDist:%d hops:%d" % (systems, self.systemID, nextsystem.deep, nextsystem._dist + dist, nextsystem.starDist + totalStartDist, nextsystem._hopsFromBefore + totalHops))
nextsystem.getLongRouting(maxdeep, nextsystem._dist + dist, nextsystem.starDist + totalStartDist, nextsystem._hopsFromBefore + totalHops, systems)
systems.pop()
def getMinHops(self, maxdeep, totalHops=0):
minHops = None
for nextsystem in self.possibleSystems:
if nextsystem.deep >= maxdeep:
if minHops is None or minHops > nextsystem._hopsFromBefore + totalHops:
minHops = nextsystem._hopsFromBefore + totalHops
ret = nextsystem.getMinHops(maxdeep, nextsystem._hopsFromBefore + totalHops)
if ret and (minHops is None or minHops > ret):
minHops = ret
return minHops
def calcRouteSum(self):
totalSum = 1
for nextsystem in self.possibleSystems:
totalSum += nextsystem.calcRouteSum()
return totalSum
def getMinStarDist(self, maxdeep, starDist=0):
minStartDist = None
for nextsystem in self.possibleSystems:
if nextsystem.deep >= maxdeep:
if minStartDist is None or minStartDist > nextsystem.starDist + starDist:
minStartDist = nextsystem.starDist + starDist
ret = nextsystem.getMinStarDist(maxdeep, nextsystem.starDist + starDist)
if ret and (minStartDist is None or minStartDist > ret):
minStartDist = ret
return minStartDist
def getMinDistFromBest(self, maxdeep, dist=0, totalStartDist=0, totalHops=0, minHops=None, minStardist=None):
# first loop calculate optimal data
if minHops is None:
minHops = self.getMinHops(maxdeep)
if minStardist is None:
minStardist = self.getMinStarDist(maxdeep)
minDist = None
for nextsystem in self.possibleSystems:
if nextsystem.deep == maxdeep and nextsystem._hopsFromBefore + totalHops == minHops and nextsystem.starDist + totalStartDist == minStardist:
if minDist is None or minDist > nextsystem._dist + dist:
minDist = nextsystem._dist + dist
ret = nextsystem.getMinDistFromBest(maxdeep, nextsystem._dist + dist, nextsystem.starDist + totalStartDist, nextsystem._hopsFromBefore + totalHops, minHops, minStardist)
if ret and (minDist is None or minDist > ret):
minDist = ret
return minDist
def getBestRoute(self, maxdeep, dist=0, totalStartDist=0, totalHops=0, minHops=None, minStardist=None, minDist=None):
# first loop calculate optimal data
if minHops is None:
minHops = self.getMinHops(maxdeep)
if minStardist is None:
minStardist = self.getMinStarDist(maxdeep)
if minDist is None:
minDist = self.getMinDistFromBest(maxdeep, 0, 0, 0, minHops, minStardist)
# systems.append(self)
for nextsystem in self.possibleSystems:
if nextsystem and nextsystem.deep == maxdeep and nextsystem._hopsFromBefore + totalHops == minHops and nextsystem.starDist + totalStartDist == minStardist and minDist == nextsystem._dist + dist:
#print("best system: %s deep: %d dist:%d totalStarDist:%d hops:%d" % ( self.systemID, nextsystem.deep, nextsystem._dist + dist, nextsystem.starDist + totalStartDist, nextsystem._hopsFromBefore + totalHops))
before = nextsystem
systems = []
while before:
systems.append(before)
before = before._before
systems.reverse()
return systems
break
res = nextsystem.getBestRoute(maxdeep, nextsystem._dist + dist, nextsystem.starDist + totalStartDist, nextsystem._hopsFromBefore + totalHops, minHops, minStardist, minDist)
if res :
#print(res)
return res
def getAllRoutes(self, maxdeep):
routesList = []
def listWorker(curSystem):
if curSystem.deep == maxdeep:
routesList.append(curSystem)
return
for nextsys in curSystem.possibleSystems:
listWorker(nextsys)
listWorker(self.initSystem)
return routesList
def getSystemsFromRoute(self):
before = self
systems = []
while before:
systems.append(before)
before = before._before
systems.reverse()
return systems
def getStardistanceFromRoute(self):
before = self
distance = 0
while before:
if before.starDist:
distance += before.starDist
before = before._before
return distance
def calcRoutenRecrusion(self, slowMode):
# self.queueLock.acquire()
if self.deep+1 >= self.maxDeep:
return
for nextsystem in self.possibleSystems:
nextsystem.calcAllRoutesFromSystem( slowMode)
def testExistRoute(self, system, currentRoute):
# testen ob alle systeme der route schon einmal in einer anderen kombination verwendet wurden
# recursive rareroute
count = len(currentRoute)+1
#if count == 1: return
def listWorker(curSystem, count):
if curSystem.systemID in currentRoute:
count -= 1
elif curSystem.systemID == system:
count -= 1
#print(count)
if count == 0:
# print(system, curSystem.systemID ,currentRoute)
# allow other routes to me drop only extaxt ends to me
if curSystem.systemID == system:
return True
return
for nextsys in curSystem.possibleSystems:
if listWorker(nextsys, count) == True:
return True
# print(self.initSystem)
return listWorker(self.initSystem, count)
def calcAllRoutesFromSystem(self, slowMode=False):
if len(self._availableSystemList) == 0: return
maxDistance = self.maxHops * self.maxJumpDistance
#print(len(self._availableSystemList), self._availableSystemList)
systems = self.system.getSystemsInDistance(self.systemID, maxDistance, self._availableSystemList)
#=======================================================================
# reverse=True long routes first sell more items
# reverse=False short routes first sell not all items
# only in slow mod no difference
#=====================================================================
currentRoute = []
if slowMode != True:
systems = sorted(systems, key=lambda system: system["dist"], reverse=True)
# build current routelist
currentRoute.append(self.systemID)
before = self._before
while before:
currentRoute.append(before.systemID)
before = before._before
for system in systems:
# print(system)
nextSystemlist = self._availableSystemList[:]
#nextSystemlist = []
for listitem in nextSystemlist:
if listitem[0] == system["System"]:
stardist = listitem[1]
nextSystemlist.remove(listitem)
break
if stardist == None:
stardist = 0
if slowMode == True:
self.addPossibleSystems(system["System"], system["dist"], stardist, nextSystemlist)
else:
if self.testExistRoute(system["System"], currentRoute) != True:
#if True:
self.addPossibleSystems(system["System"], system["dist"], stardist, nextSystemlist)
# self.addPossibleSystems(system["System"], system["dist"], newrareslist)
currentRoute = []
self._availableSystemList = []
nextSystemlist = []
systems = []
self.calcRoutenRecrusion(slowMode)
# return myRaresRoute
| 38.294314 | 230 | 0.585415 |
from elite.system import system as elitesystem
class route(object):
maxHops = None
maxJumpDistance = None
maxDeep = None
systemID = None
_before = None
m = None
possibleSystems = []
_raresInSystem = None
_availableSystemList = None
_sellDone = None
starDist = None
deep = 1
_hopsFromBefore = None
_dist = None
mydb = None
rares = None
system = None
def __init__(self, mydb, before=None, maxDeep=None, maxJumpDistance=None, maxHops=None):
self.mydb = mydb
self.possibleSystems = []
self._before = before
if before:
self.initSystem = before.initSystem
self.system = self.initSystem.system
self.maxHops = self.initSystem.maxHops
self.maxDeep = self.initSystem.maxDeep
self.maxJumpDistance = self.initSystem.maxJumpDistance
else:
self.system = elitesystem(self.mydb)
self.maxDeep = maxDeep
self.maxHops = maxHops
self.maxJumpDistance = maxJumpDistance
def addPossibleSystems(self, systemID, dist, startdist, systemList):
newroute = route(self.mydb, self)
newroute._availableSystemList = systemList
newroute._dist = dist
newroute.systemID = systemID
newroute.starDist = startdist
newroute.deep = self.deep + 1
newroute._hopsFromBefore = int(round((dist / self.maxJumpDistance) + 0.5))
self.possibleSystems.append(newroute)
def setMaxHops(self, hops):
self.maxHops = hops
def setmaxJumpDistance(self, dist):
self.maxJumpDistance = dist
def calcRoutingDeep(self):
MaxDeep = self.deep
for nextsystem in self.possibleSystems:
nMaxDeep = nextsystem.calcRoutingDeep()
if nMaxDeep > MaxDeep:
MaxDeep = nMaxDeep
return MaxDeep
def getLongRouting(self, maxdeep, dist, totalStartDist, totalHops, systems=[]):
systems.append(self.systemID)
for nextsystem in self.possibleSystems:
if nextsystem.deep >= maxdeep:
print("system:%s -> %s deep: %d dist:%d totalStarDist:%d hops:%d" % (systems, self.systemID, nextsystem.deep, nextsystem._dist + dist, nextsystem.starDist + totalStartDist, nextsystem._hopsFromBefore + totalHops))
nextsystem.getLongRouting(maxdeep, nextsystem._dist + dist, nextsystem.starDist + totalStartDist, nextsystem._hopsFromBefore + totalHops, systems)
systems.pop()
def getMinHops(self, maxdeep, totalHops=0):
minHops = None
for nextsystem in self.possibleSystems:
if nextsystem.deep >= maxdeep:
if minHops is None or minHops > nextsystem._hopsFromBefore + totalHops:
minHops = nextsystem._hopsFromBefore + totalHops
ret = nextsystem.getMinHops(maxdeep, nextsystem._hopsFromBefore + totalHops)
if ret and (minHops is None or minHops > ret):
minHops = ret
return minHops
def calcRouteSum(self):
totalSum = 1
for nextsystem in self.possibleSystems:
totalSum += nextsystem.calcRouteSum()
return totalSum
def getMinStarDist(self, maxdeep, starDist=0):
minStartDist = None
for nextsystem in self.possibleSystems:
if nextsystem.deep >= maxdeep:
if minStartDist is None or minStartDist > nextsystem.starDist + starDist:
minStartDist = nextsystem.starDist + starDist
ret = nextsystem.getMinStarDist(maxdeep, nextsystem.starDist + starDist)
if ret and (minStartDist is None or minStartDist > ret):
minStartDist = ret
return minStartDist
def getMinDistFromBest(self, maxdeep, dist=0, totalStartDist=0, totalHops=0, minHops=None, minStardist=None):
if minHops is None:
minHops = self.getMinHops(maxdeep)
if minStardist is None:
minStardist = self.getMinStarDist(maxdeep)
minDist = None
for nextsystem in self.possibleSystems:
if nextsystem.deep == maxdeep and nextsystem._hopsFromBefore + totalHops == minHops and nextsystem.starDist + totalStartDist == minStardist:
if minDist is None or minDist > nextsystem._dist + dist:
minDist = nextsystem._dist + dist
ret = nextsystem.getMinDistFromBest(maxdeep, nextsystem._dist + dist, nextsystem.starDist + totalStartDist, nextsystem._hopsFromBefore + totalHops, minHops, minStardist)
if ret and (minDist is None or minDist > ret):
minDist = ret
return minDist
def getBestRoute(self, maxdeep, dist=0, totalStartDist=0, totalHops=0, minHops=None, minStardist=None, minDist=None):
if minHops is None:
minHops = self.getMinHops(maxdeep)
if minStardist is None:
minStardist = self.getMinStarDist(maxdeep)
if minDist is None:
minDist = self.getMinDistFromBest(maxdeep, 0, 0, 0, minHops, minStardist)
for nextsystem in self.possibleSystems:
if nextsystem and nextsystem.deep == maxdeep and nextsystem._hopsFromBefore + totalHops == minHops and nextsystem.starDist + totalStartDist == minStardist and minDist == nextsystem._dist + dist:
before = nextsystem
systems = []
while before:
systems.append(before)
before = before._before
systems.reverse()
return systems
break
res = nextsystem.getBestRoute(maxdeep, nextsystem._dist + dist, nextsystem.starDist + totalStartDist, nextsystem._hopsFromBefore + totalHops, minHops, minStardist, minDist)
if res :
return res
def getAllRoutes(self, maxdeep):
routesList = []
def listWorker(curSystem):
if curSystem.deep == maxdeep:
routesList.append(curSystem)
return
for nextsys in curSystem.possibleSystems:
listWorker(nextsys)
listWorker(self.initSystem)
return routesList
def getSystemsFromRoute(self):
before = self
systems = []
while before:
systems.append(before)
before = before._before
systems.reverse()
return systems
def getStardistanceFromRoute(self):
before = self
distance = 0
while before:
if before.starDist:
distance += before.starDist
before = before._before
return distance
def calcRoutenRecrusion(self, slowMode):
if self.deep+1 >= self.maxDeep:
return
for nextsystem in self.possibleSystems:
nextsystem.calcAllRoutesFromSystem( slowMode)
def testExistRoute(self, system, currentRoute):
count = len(currentRoute)+1
def listWorker(curSystem, count):
if curSystem.systemID in currentRoute:
count -= 1
elif curSystem.systemID == system:
count -= 1
if count == 0:
if curSystem.systemID == system:
return True
return
for nextsys in curSystem.possibleSystems:
if listWorker(nextsys, count) == True:
return True
return listWorker(self.initSystem, count)
def calcAllRoutesFromSystem(self, slowMode=False):
if len(self._availableSystemList) == 0: return
maxDistance = self.maxHops * self.maxJumpDistance
systems = self.system.getSystemsInDistance(self.systemID, maxDistance, self._availableSystemList)
currentRoute = []
if slowMode != True:
systems = sorted(systems, key=lambda system: system["dist"], reverse=True)
currentRoute.append(self.systemID)
before = self._before
while before:
currentRoute.append(before.systemID)
before = before._before
for system in systems:
nextSystemlist = self._availableSystemList[:]
for listitem in nextSystemlist:
if listitem[0] == system["System"]:
stardist = listitem[1]
nextSystemlist.remove(listitem)
break
if stardist == None:
stardist = 0
if slowMode == True:
self.addPossibleSystems(system["System"], system["dist"], stardist, nextSystemlist)
else:
if self.testExistRoute(system["System"], currentRoute) != True:
self.addPossibleSystems(system["System"], system["dist"], stardist, nextSystemlist)
currentRoute = []
self._availableSystemList = []
nextSystemlist = []
systems = []
self.calcRoutenRecrusion(slowMode)
| true | true |
f715c277513bc3a3aa82c20df1b2e8276d462a27 | 9,895 | py | Python | pcdsdevices/tests/test_ccm.py | vespos/pcdsdevices | 7c4728df62ea58b6491d1cb36bb39d27d6dd9fca | [
"BSD-3-Clause-LBNL"
] | 3 | 2019-06-17T20:08:54.000Z | 2022-01-11T17:55:21.000Z | pcdsdevices/tests/test_ccm.py | vespos/pcdsdevices | 7c4728df62ea58b6491d1cb36bb39d27d6dd9fca | [
"BSD-3-Clause-LBNL"
] | 757 | 2017-12-21T23:16:41.000Z | 2022-03-31T22:56:06.000Z | pcdsdevices/tests/test_ccm.py | vespos/pcdsdevices | 7c4728df62ea58b6491d1cb36bb39d27d6dd9fca | [
"BSD-3-Clause-LBNL"
] | 38 | 2018-01-26T00:01:35.000Z | 2022-02-17T00:48:55.000Z | import logging
import time
import numpy as np
import pytest
from ophyd.sim import fake_device_cache, make_fake_device
from .. import ccm
from ..sim import FastMotor
logger = logging.getLogger(__name__)
SAMPLE_ALIO = 4.575 # Current value as of writing this file
SAMPLE_THETA = 1.2 # Modest angle
SAMPLE_WAVELENGTH = 1.5 # hard xray
# Make sure the calcs are properly inverted
def test_theta_alio_inversion():
logger.debug('test_theta_alio_inversion')
theta = ccm.alio_to_theta(SAMPLE_ALIO, ccm.default_theta0, ccm.default_gr,
ccm.default_gd)
alio_calc = ccm.theta_to_alio(theta, ccm.default_theta0, ccm.default_gr,
ccm.default_gd)
# Unlike the other inversions, this is just an approximation
assert np.isclose(alio_calc, SAMPLE_ALIO)
def test_wavelength_theta_inversion():
logger.debug('test_wavelength_theta_inversion')
wavelength = ccm.theta_to_wavelength(SAMPLE_THETA, ccm.default_dspacing)
theta = ccm.wavelength_to_theta(wavelength, ccm.default_dspacing)
logger.debug('%s, %s', wavelength, theta)
assert np.isclose(theta, SAMPLE_THETA)
theta = ccm.wavelength_to_theta(SAMPLE_WAVELENGTH, ccm.default_dspacing)
wavelength = ccm.theta_to_wavelength(theta, ccm.default_dspacing)
logger.debug('%s, %s', wavelength, theta)
assert np.isclose(wavelength, SAMPLE_WAVELENGTH)
def test_energy_wavelength_inversion():
logger.debug('test_energy_wavelength_inversion')
energy = ccm.wavelength_to_energy(SAMPLE_WAVELENGTH)
wavelength_calc = ccm.energy_to_wavelength(energy)
assert wavelength_calc == SAMPLE_WAVELENGTH
@pytest.fixture(scope='function')
def fake_ccm():
return make_fake_ccm()
class FakeAlio(FastMotor):
kill = None
home = None
def make_fake_ccm():
fake_device_cache[ccm.CCMMotor] = FastMotor
fake_device_cache[ccm.CCMAlio] = FakeAlio
FakeCCM = make_fake_device(ccm.CCM)
fake_ccm = FakeCCM(alio_prefix='ALIO', theta2fine_prefix='THETA',
theta2coarse_prefix='THTA', chi2_prefix='CHI',
x_down_prefix='X:DOWN', x_up_prefix='X:UP',
y_down_prefix='Y:DOWN', y_up_north_prefix='Y:UP:NORTH',
y_up_south_prefix='Y:UP:SOUTH', in_pos=8, out_pos=0,
name='fake_ccm')
def init_pos(mot, pos=0):
mot.user_readback.sim_put(0)
mot.user_setpoint.sim_put(0)
mot.user_setpoint.sim_set_limits((0, 0))
mot.motor_spg.sim_put(2)
mot.part_number.sim_put('tasdf')
init_pos(fake_ccm.x.down)
init_pos(fake_ccm.x.up)
init_pos(fake_ccm.y.down)
init_pos(fake_ccm.y.up_north)
init_pos(fake_ccm.y.up_south)
fake_ccm.alio.set(SAMPLE_ALIO)
fake_ccm.energy.alio.set(SAMPLE_ALIO)
fake_ccm.energy_with_vernier.alio.set(SAMPLE_ALIO)
fake_ccm.energy_with_vernier.vernier.setpoint.sim_put(0)
return fake_ccm
def test_fake_ccm(fake_ccm):
logger.debug('test_fake_ccm')
fake_ccm.get()
# Make sure we set up the forward/inverse to use the right methods
def test_ccm_calc(fake_ccm):
logger.debug('test_ccm_calc')
calc = fake_ccm.energy
logger.debug('physics pos is %s', calc.position)
logger.debug('real pos is %s', calc.real_position)
logger.debug('sample alio is %s', SAMPLE_ALIO)
theta_func = ccm.alio_to_theta(
SAMPLE_ALIO,
calc.theta0_rad_val,
calc.gr_val,
calc.gd_val,
)
wavelength_func = ccm.theta_to_wavelength(theta_func, calc.dspacing_val)
energy_func = ccm.wavelength_to_energy(wavelength_func)
energy = calc.energy.position
assert energy == energy_func
calc.alio.move(0)
calc.move(energy, wait=False)
assert np.isclose(calc.alio.position, SAMPLE_ALIO)
calc.alio.move(calc.alio.position)
calc.move(energy=calc.energy.position, wait=False)
assert np.isclose(calc.alio.position, SAMPLE_ALIO)
# Make sure sync'd axes work and that unk/in/out states work
@pytest.mark.timeout(5)
def test_ccm_main(fake_ccm):
logger.debug('test_ccm_main')
fake_ccm.y.move(5, wait=False)
assert fake_ccm.y.down.user_setpoint.get() == 5
assert fake_ccm.y.up_north.user_setpoint.get() == 5
assert fake_ccm.y.up_south.user_setpoint.get() == 5
assert fake_ccm.removed
assert not fake_ccm.inserted
fake_ccm.x.down.user_readback.sim_put(8)
fake_ccm.x.up.user_readback.sim_put(8)
assert not fake_ccm.removed
assert fake_ccm.inserted
fake_ccm.x.down.user_readback.sim_put(4)
fake_ccm.x.up.user_readback.sim_put(4)
assert not fake_ccm.removed
assert not fake_ccm.inserted
fake_ccm.insert(wait=False)
assert fake_ccm.x.down.user_setpoint.get() == 8
assert fake_ccm.x.up.user_setpoint.get() == 8
fake_ccm.remove(wait=False)
assert fake_ccm.x.down.user_setpoint.get() == 0
assert fake_ccm.x.up.user_setpoint.get() == 0
@pytest.mark.timeout(5)
def test_vernier(fake_ccm):
logger.debug('test_vernier')
pseudopos = fake_ccm.energy_with_vernier
# Moving with vernier should move the energy request motor too
pseudopos.move(7, wait=False)
assert np.isclose(pseudopos.energy.position, 7)
assert pseudopos.vernier.position == 7000
pseudopos.move(8, wait=False)
assert np.isclose(pseudopos.energy.position, 8)
assert pseudopos.vernier.position == 8000
pseudopos.move(9, wait=False)
assert np.isclose(pseudopos.energy.position, 9)
assert pseudopos.vernier.position == 9000
# Small moves (less than 30eV) should be skipped on the energy request
pseudopos.move(9.001, wait=False)
assert np.isclose(pseudopos.energy.position, 9.001)
assert pseudopos.vernier.position == 9000
# Unless we set the option for not skipping them
pseudopos.vernier.skip_small_moves = False
pseudopos.move(9.002, wait=False)
assert np.isclose(pseudopos.energy.position, 9.002)
assert pseudopos.vernier.position == 9002
@pytest.mark.timeout(5)
def test_set_current_position(fake_ccm):
logger.debug('test_set_current_position')
mot = fake_ccm.energy.energy
for energy in range(6, 14):
mot.set_current_position(energy)
assert np.isclose(mot.position, energy)
@pytest.mark.timeout(5)
def test_check_valid_constant(fake_ccm):
logger.debug('test_check_valid_constant')
# First call to make_valid sends the first monitor update
def make_valid(sig, valid):
if valid:
sig.put(1)
else:
sig.put(0)
def make_conn(sig, conn):
sig._metadata['connected'] = conn
def output(sig):
return fake_ccm._check_valid_constant(sig, sig.get())
test_sig = fake_ccm.dspacing
# Can we get to all the enum values?
make_conn(test_sig, False)
assert output(test_sig) == ccm.CCMConstantWarning.ALWAYS_DISCONNECT
make_conn(test_sig, True)
make_valid(test_sig, False)
assert output(test_sig) == ccm.CCMConstantWarning.INVALID_CONNECT
make_conn(test_sig, False)
assert output(test_sig) == ccm.CCMConstantWarning.INVALID_DISCONNECT
make_conn(test_sig, True)
make_valid(test_sig, True)
assert output(test_sig) == ccm.CCMConstantWarning.NO_WARNING
make_conn(test_sig, False)
assert output(test_sig) == ccm.CCMConstantWarning.VALID_DISCONNECT
# theta0_deg is allowed to be zero, unlike the others
test_sig2 = fake_ccm.theta0_deg
make_conn(test_sig2, True)
make_valid(test_sig2, False)
assert output(test_sig2) == ccm.CCMConstantWarning.NO_WARNING
@pytest.mark.timeout(5)
def test_show_constant_warning(fake_ccm, caplog):
logger.debug('test_show_constant_warning')
for warning in (
ccm.CCMConstantWarning.NO_WARNING,
ccm.CCMConstantWarning.ALWAYS_DISCONNECT,
ccm.CCMConstantWarning.VALID_DISCONNECT,
ccm.CCMConstantWarning.INVALID_DISCONNECT,
ccm.CCMConstantWarning.INVALID_CONNECT,
):
caplog.clear()
with caplog.at_level(logging.WARNING):
fake_ccm._show_constant_warning(
warning,
fake_ccm.dspacing,
0.111111,
0.222222,
)
if warning == ccm.CCMConstantWarning.NO_WARNING:
assert len(caplog.records) == 0
else:
assert len(caplog.records) == 1
@pytest.mark.timeout(5)
def test_warn_invalid_constants(fake_ccm, caplog):
logger.debug('test_warn_invalid_constants')
# Trick the warning into thinking we've be initialized for a while
fake_ccm._init_time = time.monotonic() - 1000
fake_ccm.theta0_deg.put(0)
fake_ccm.dspacing.put(0)
fake_ccm.gr.put(0)
fake_ccm.gd.put(0)
# We expect three warnings from the fake PVs that start at 0
caplog.clear()
with caplog.at_level(logging.WARNING):
fake_ccm.warn_invalid_constants(only_new=False)
assert len(caplog.records) == 3
# We expect the warnings to not repeat
caplog.clear()
fake_ccm.warn_invalid_constants(only_new=True)
assert len(caplog.records) == 0
# Unless we ask them to
caplog.clear()
fake_ccm.warn_invalid_constants(only_new=False)
assert len(caplog.records) == 3
# Let's fix the issue and make sure no warnings are shown
fake_ccm.reset_calc_constant_defaults(confirm=False)
caplog.clear()
fake_ccm.warn_invalid_constants(only_new=False)
assert len(caplog.records) == 0
@pytest.mark.timeout(5)
def test_disconnected_ccm():
ccm.CCM(alio_prefix='ALIO', theta2fine_prefix='THETA',
theta2coarse_prefix='THTA', chi2_prefix='CHI',
x_down_prefix='X:DOWN', x_up_prefix='X:UP',
y_down_prefix='Y:DOWN', y_up_north_prefix='Y:UP:NORTH',
y_up_south_prefix='Y:UP:SOUTH', in_pos=8, out_pos=0,
name='ccm')
| 33.316498 | 78 | 0.700455 | import logging
import time
import numpy as np
import pytest
from ophyd.sim import fake_device_cache, make_fake_device
from .. import ccm
from ..sim import FastMotor
logger = logging.getLogger(__name__)
SAMPLE_ALIO = 4.575
SAMPLE_THETA = 1.2
SAMPLE_WAVELENGTH = 1.5
def test_theta_alio_inversion():
logger.debug('test_theta_alio_inversion')
theta = ccm.alio_to_theta(SAMPLE_ALIO, ccm.default_theta0, ccm.default_gr,
ccm.default_gd)
alio_calc = ccm.theta_to_alio(theta, ccm.default_theta0, ccm.default_gr,
ccm.default_gd)
assert np.isclose(alio_calc, SAMPLE_ALIO)
def test_wavelength_theta_inversion():
logger.debug('test_wavelength_theta_inversion')
wavelength = ccm.theta_to_wavelength(SAMPLE_THETA, ccm.default_dspacing)
theta = ccm.wavelength_to_theta(wavelength, ccm.default_dspacing)
logger.debug('%s, %s', wavelength, theta)
assert np.isclose(theta, SAMPLE_THETA)
theta = ccm.wavelength_to_theta(SAMPLE_WAVELENGTH, ccm.default_dspacing)
wavelength = ccm.theta_to_wavelength(theta, ccm.default_dspacing)
logger.debug('%s, %s', wavelength, theta)
assert np.isclose(wavelength, SAMPLE_WAVELENGTH)
def test_energy_wavelength_inversion():
logger.debug('test_energy_wavelength_inversion')
energy = ccm.wavelength_to_energy(SAMPLE_WAVELENGTH)
wavelength_calc = ccm.energy_to_wavelength(energy)
assert wavelength_calc == SAMPLE_WAVELENGTH
@pytest.fixture(scope='function')
def fake_ccm():
return make_fake_ccm()
class FakeAlio(FastMotor):
kill = None
home = None
def make_fake_ccm():
fake_device_cache[ccm.CCMMotor] = FastMotor
fake_device_cache[ccm.CCMAlio] = FakeAlio
FakeCCM = make_fake_device(ccm.CCM)
fake_ccm = FakeCCM(alio_prefix='ALIO', theta2fine_prefix='THETA',
theta2coarse_prefix='THTA', chi2_prefix='CHI',
x_down_prefix='X:DOWN', x_up_prefix='X:UP',
y_down_prefix='Y:DOWN', y_up_north_prefix='Y:UP:NORTH',
y_up_south_prefix='Y:UP:SOUTH', in_pos=8, out_pos=0,
name='fake_ccm')
def init_pos(mot, pos=0):
mot.user_readback.sim_put(0)
mot.user_setpoint.sim_put(0)
mot.user_setpoint.sim_set_limits((0, 0))
mot.motor_spg.sim_put(2)
mot.part_number.sim_put('tasdf')
init_pos(fake_ccm.x.down)
init_pos(fake_ccm.x.up)
init_pos(fake_ccm.y.down)
init_pos(fake_ccm.y.up_north)
init_pos(fake_ccm.y.up_south)
fake_ccm.alio.set(SAMPLE_ALIO)
fake_ccm.energy.alio.set(SAMPLE_ALIO)
fake_ccm.energy_with_vernier.alio.set(SAMPLE_ALIO)
fake_ccm.energy_with_vernier.vernier.setpoint.sim_put(0)
return fake_ccm
def test_fake_ccm(fake_ccm):
logger.debug('test_fake_ccm')
fake_ccm.get()
def test_ccm_calc(fake_ccm):
logger.debug('test_ccm_calc')
calc = fake_ccm.energy
logger.debug('physics pos is %s', calc.position)
logger.debug('real pos is %s', calc.real_position)
logger.debug('sample alio is %s', SAMPLE_ALIO)
theta_func = ccm.alio_to_theta(
SAMPLE_ALIO,
calc.theta0_rad_val,
calc.gr_val,
calc.gd_val,
)
wavelength_func = ccm.theta_to_wavelength(theta_func, calc.dspacing_val)
energy_func = ccm.wavelength_to_energy(wavelength_func)
energy = calc.energy.position
assert energy == energy_func
calc.alio.move(0)
calc.move(energy, wait=False)
assert np.isclose(calc.alio.position, SAMPLE_ALIO)
calc.alio.move(calc.alio.position)
calc.move(energy=calc.energy.position, wait=False)
assert np.isclose(calc.alio.position, SAMPLE_ALIO)
@pytest.mark.timeout(5)
def test_ccm_main(fake_ccm):
logger.debug('test_ccm_main')
fake_ccm.y.move(5, wait=False)
assert fake_ccm.y.down.user_setpoint.get() == 5
assert fake_ccm.y.up_north.user_setpoint.get() == 5
assert fake_ccm.y.up_south.user_setpoint.get() == 5
assert fake_ccm.removed
assert not fake_ccm.inserted
fake_ccm.x.down.user_readback.sim_put(8)
fake_ccm.x.up.user_readback.sim_put(8)
assert not fake_ccm.removed
assert fake_ccm.inserted
fake_ccm.x.down.user_readback.sim_put(4)
fake_ccm.x.up.user_readback.sim_put(4)
assert not fake_ccm.removed
assert not fake_ccm.inserted
fake_ccm.insert(wait=False)
assert fake_ccm.x.down.user_setpoint.get() == 8
assert fake_ccm.x.up.user_setpoint.get() == 8
fake_ccm.remove(wait=False)
assert fake_ccm.x.down.user_setpoint.get() == 0
assert fake_ccm.x.up.user_setpoint.get() == 0
@pytest.mark.timeout(5)
def test_vernier(fake_ccm):
logger.debug('test_vernier')
pseudopos = fake_ccm.energy_with_vernier
# Moving with vernier should move the energy request motor too
pseudopos.move(7, wait=False)
assert np.isclose(pseudopos.energy.position, 7)
assert pseudopos.vernier.position == 7000
pseudopos.move(8, wait=False)
assert np.isclose(pseudopos.energy.position, 8)
assert pseudopos.vernier.position == 8000
pseudopos.move(9, wait=False)
assert np.isclose(pseudopos.energy.position, 9)
assert pseudopos.vernier.position == 9000
# Small moves (less than 30eV) should be skipped on the energy request
pseudopos.move(9.001, wait=False)
assert np.isclose(pseudopos.energy.position, 9.001)
assert pseudopos.vernier.position == 9000
# Unless we set the option for not skipping them
pseudopos.vernier.skip_small_moves = False
pseudopos.move(9.002, wait=False)
assert np.isclose(pseudopos.energy.position, 9.002)
assert pseudopos.vernier.position == 9002
@pytest.mark.timeout(5)
def test_set_current_position(fake_ccm):
logger.debug('test_set_current_position')
mot = fake_ccm.energy.energy
for energy in range(6, 14):
mot.set_current_position(energy)
assert np.isclose(mot.position, energy)
@pytest.mark.timeout(5)
def test_check_valid_constant(fake_ccm):
logger.debug('test_check_valid_constant')
# First call to make_valid sends the first monitor update
def make_valid(sig, valid):
if valid:
sig.put(1)
else:
sig.put(0)
def make_conn(sig, conn):
sig._metadata['connected'] = conn
def output(sig):
return fake_ccm._check_valid_constant(sig, sig.get())
test_sig = fake_ccm.dspacing
# Can we get to all the enum values?
make_conn(test_sig, False)
assert output(test_sig) == ccm.CCMConstantWarning.ALWAYS_DISCONNECT
make_conn(test_sig, True)
make_valid(test_sig, False)
assert output(test_sig) == ccm.CCMConstantWarning.INVALID_CONNECT
make_conn(test_sig, False)
assert output(test_sig) == ccm.CCMConstantWarning.INVALID_DISCONNECT
make_conn(test_sig, True)
make_valid(test_sig, True)
assert output(test_sig) == ccm.CCMConstantWarning.NO_WARNING
make_conn(test_sig, False)
assert output(test_sig) == ccm.CCMConstantWarning.VALID_DISCONNECT
# theta0_deg is allowed to be zero, unlike the others
test_sig2 = fake_ccm.theta0_deg
make_conn(test_sig2, True)
make_valid(test_sig2, False)
assert output(test_sig2) == ccm.CCMConstantWarning.NO_WARNING
@pytest.mark.timeout(5)
def test_show_constant_warning(fake_ccm, caplog):
logger.debug('test_show_constant_warning')
for warning in (
ccm.CCMConstantWarning.NO_WARNING,
ccm.CCMConstantWarning.ALWAYS_DISCONNECT,
ccm.CCMConstantWarning.VALID_DISCONNECT,
ccm.CCMConstantWarning.INVALID_DISCONNECT,
ccm.CCMConstantWarning.INVALID_CONNECT,
):
caplog.clear()
with caplog.at_level(logging.WARNING):
fake_ccm._show_constant_warning(
warning,
fake_ccm.dspacing,
0.111111,
0.222222,
)
if warning == ccm.CCMConstantWarning.NO_WARNING:
assert len(caplog.records) == 0
else:
assert len(caplog.records) == 1
@pytest.mark.timeout(5)
def test_warn_invalid_constants(fake_ccm, caplog):
logger.debug('test_warn_invalid_constants')
# Trick the warning into thinking we've be initialized for a while
fake_ccm._init_time = time.monotonic() - 1000
fake_ccm.theta0_deg.put(0)
fake_ccm.dspacing.put(0)
fake_ccm.gr.put(0)
fake_ccm.gd.put(0)
caplog.clear()
with caplog.at_level(logging.WARNING):
fake_ccm.warn_invalid_constants(only_new=False)
assert len(caplog.records) == 3
caplog.clear()
fake_ccm.warn_invalid_constants(only_new=True)
assert len(caplog.records) == 0
caplog.clear()
fake_ccm.warn_invalid_constants(only_new=False)
assert len(caplog.records) == 3
fake_ccm.reset_calc_constant_defaults(confirm=False)
caplog.clear()
fake_ccm.warn_invalid_constants(only_new=False)
assert len(caplog.records) == 0
@pytest.mark.timeout(5)
def test_disconnected_ccm():
ccm.CCM(alio_prefix='ALIO', theta2fine_prefix='THETA',
theta2coarse_prefix='THTA', chi2_prefix='CHI',
x_down_prefix='X:DOWN', x_up_prefix='X:UP',
y_down_prefix='Y:DOWN', y_up_north_prefix='Y:UP:NORTH',
y_up_south_prefix='Y:UP:SOUTH', in_pos=8, out_pos=0,
name='ccm')
| true | true |
f715c27add3916da3a4ba06ed5e3227bf392db8f | 4,303 | py | Python | spectree/models.py | loonateam/spectree | 71b2d34993e01b36a8de18c2a3d6856d0c9e45c3 | [
"Apache-2.0"
] | 183 | 2019-12-29T00:37:09.000Z | 2022-03-15T20:37:53.000Z | spectree/models.py | 0b01001001/spectree | 35f17fe9694031a335223111d7fb38175d7e6e25 | [
"Apache-2.0"
] | 102 | 2019-12-13T09:10:53.000Z | 2022-03-15T06:21:29.000Z | spectree/models.py | loonateam/spectree | 71b2d34993e01b36a8de18c2a3d6856d0c9e45c3 | [
"Apache-2.0"
] | 51 | 2020-01-06T21:06:07.000Z | 2022-03-19T16:10:58.000Z | import re
from enum import Enum
from typing import Any, Dict, Sequence
from pydantic import BaseModel, Field, root_validator, validator
# OpenAPI names validation regexp
OpenAPI_NAME_RE = re.compile(r"^[A-Za-z0-9-._]+")
class ExternalDocs(BaseModel):
description: str = ""
url: str
class Tag(BaseModel):
"""OpenAPI tag object"""
name: str
description: str = ""
externalDocs: ExternalDocs = None
def __str__(self):
return self.name
class UnprocessableEntityElement(BaseModel):
"""Model of missing field description."""
loc: Sequence[str] = Field(
...,
title="Missing field name",
)
msg: str = Field(
...,
title="Error message",
)
type: str = Field( # noqa: WPS125
...,
title="Error type",
)
ctx: Dict[str, Any] = Field(
None,
title="Error context",
)
class UnprocessableEntity(BaseModel):
"""Model of 422 Unprocessable Entity error."""
__root__: Sequence[UnprocessableEntityElement]
class SecureType(str, Enum):
HTTP = "http"
API_KEY = "apiKey"
OAUTH_TWO = "oauth2"
OPEN_ID_CONNECT = "openIdConnect"
class InType(str, Enum):
HEADER = "header"
QUERY = "query"
COOKIE = "cookie"
type_req_fields = {
SecureType.HTTP: ["scheme"],
SecureType.API_KEY: ["name", "field_in"],
SecureType.OAUTH_TWO: ["flows"],
SecureType.OPEN_ID_CONNECT: ["openIdConnectUrl"],
}
class SecuritySchemeData(BaseModel):
"""
Security scheme data
https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.3.md#securitySchemeObject
"""
type: SecureType = Field(..., description="Secure scheme type")
description: str = Field(
None,
description="A short description for security scheme.",
)
name: str = Field(
None,
description="The name of the header, query or cookie parameter to be used.",
)
field_in: InType = Field(
None, alias="in", description="The location of the API key."
)
scheme: str = Field(None, description="The name of the HTTP Authorization scheme.")
bearerFormat: str = Field(
None,
description=(
"A hint to the client to identify how the bearer token is formatted."
),
)
flows: dict = Field(
None,
description=(
"Containing configuration information for the flow types supported."
),
)
openIdConnectUrl: str = Field(
None, description="OpenId Connect URL to discover OAuth2 configuration values."
)
@root_validator()
def check_type_required_fields(cls, values: dict):
exist_fields = {key for key in values.keys() if values[key]}
if not values.get("type"):
raise ValueError("Type field is required")
if not set(type_req_fields[values["type"]]).issubset(exist_fields):
raise ValueError(
f"For `{values['type']}` type "
f"`{', '.join(type_req_fields[values['type']])}` field(s) is required."
)
return values
class Config:
validate_assignment = True
class SecurityScheme(BaseModel):
"""
Named security scheme
"""
name: str = Field(
...,
description="Custom security scheme name. Can only contain - [A-Za-z0-9-._]",
)
data: SecuritySchemeData = Field(..., description="Security scheme data")
@validator("name")
def check_name(cls, value: str):
if not OpenAPI_NAME_RE.fullmatch(value):
raise ValueError("Name not match OpenAPI rules")
return value
class Config:
validate_assignment = True
class Server(BaseModel):
"""
Servers section of OAS
"""
url: str = Field(
...,
description="""URL or path of API server
(may be parametrized with using \"variables\" section - for more information,
see: https://swagger.io/docs/specification/api-host-and-base-path/ )""",
)
description: str = Field(
None,
description="Custom server description for server URL",
)
variables: dict = Field(
None,
description="Variables for customizing server URL",
)
class Config:
validate_assignment = True
| 25.163743 | 97 | 0.614455 | import re
from enum import Enum
from typing import Any, Dict, Sequence
from pydantic import BaseModel, Field, root_validator, validator
OpenAPI_NAME_RE = re.compile(r"^[A-Za-z0-9-._]+")
class ExternalDocs(BaseModel):
description: str = ""
url: str
class Tag(BaseModel):
name: str
description: str = ""
externalDocs: ExternalDocs = None
def __str__(self):
return self.name
class UnprocessableEntityElement(BaseModel):
loc: Sequence[str] = Field(
...,
title="Missing field name",
)
msg: str = Field(
...,
title="Error message",
)
type: str = Field(
...,
title="Error type",
)
ctx: Dict[str, Any] = Field(
None,
title="Error context",
)
class UnprocessableEntity(BaseModel):
__root__: Sequence[UnprocessableEntityElement]
class SecureType(str, Enum):
HTTP = "http"
API_KEY = "apiKey"
OAUTH_TWO = "oauth2"
OPEN_ID_CONNECT = "openIdConnect"
class InType(str, Enum):
HEADER = "header"
QUERY = "query"
COOKIE = "cookie"
type_req_fields = {
SecureType.HTTP: ["scheme"],
SecureType.API_KEY: ["name", "field_in"],
SecureType.OAUTH_TWO: ["flows"],
SecureType.OPEN_ID_CONNECT: ["openIdConnectUrl"],
}
class SecuritySchemeData(BaseModel):
type: SecureType = Field(..., description="Secure scheme type")
description: str = Field(
None,
description="A short description for security scheme.",
)
name: str = Field(
None,
description="The name of the header, query or cookie parameter to be used.",
)
field_in: InType = Field(
None, alias="in", description="The location of the API key."
)
scheme: str = Field(None, description="The name of the HTTP Authorization scheme.")
bearerFormat: str = Field(
None,
description=(
"A hint to the client to identify how the bearer token is formatted."
),
)
flows: dict = Field(
None,
description=(
"Containing configuration information for the flow types supported."
),
)
openIdConnectUrl: str = Field(
None, description="OpenId Connect URL to discover OAuth2 configuration values."
)
@root_validator()
def check_type_required_fields(cls, values: dict):
exist_fields = {key for key in values.keys() if values[key]}
if not values.get("type"):
raise ValueError("Type field is required")
if not set(type_req_fields[values["type"]]).issubset(exist_fields):
raise ValueError(
f"For `{values['type']}` type "
f"`{', '.join(type_req_fields[values['type']])}` field(s) is required."
)
return values
class Config:
validate_assignment = True
class SecurityScheme(BaseModel):
name: str = Field(
...,
description="Custom security scheme name. Can only contain - [A-Za-z0-9-._]",
)
data: SecuritySchemeData = Field(..., description="Security scheme data")
@validator("name")
def check_name(cls, value: str):
if not OpenAPI_NAME_RE.fullmatch(value):
raise ValueError("Name not match OpenAPI rules")
return value
class Config:
validate_assignment = True
class Server(BaseModel):
url: str = Field(
...,
description="""URL or path of API server
(may be parametrized with using \"variables\" section - for more information,
see: https://swagger.io/docs/specification/api-host-and-base-path/ )""",
)
description: str = Field(
None,
description="Custom server description for server URL",
)
variables: dict = Field(
None,
description="Variables for customizing server URL",
)
class Config:
validate_assignment = True
| true | true |
f715c2f9b4de2c046a801fa47e5dbf73f975953d | 820 | py | Python | phone_iso3166/network.py | foxkirov/phone-iso3166 | 8419091e906c439f9362690d7d2d02186098e5c4 | [
"MIT"
] | 19 | 2017-03-28T10:35:22.000Z | 2022-03-14T04:39:03.000Z | phone_iso3166/network.py | foxkirov/phone-iso3166 | 8419091e906c439f9362690d7d2d02186098e5c4 | [
"MIT"
] | 17 | 2016-11-11T11:50:57.000Z | 2021-06-22T09:32:17.000Z | phone_iso3166/network.py | foxkirov/phone-iso3166 | 8419091e906c439f9362690d7d2d02186098e5c4 | [
"MIT"
] | 5 | 2015-09-28T18:25:38.000Z | 2021-07-05T11:57:58.000Z | from .e212_names import operators, countries
from .errors import InvalidNetwork, InvalidCountry
def network(mcc, mnc):
'''
Returns a tuple (country, network_name), with country specified as
ISO-3166-1 alpha-2 code.
'''
mcc = int(mcc)
mnc = int(mnc)
try:
return operators[mcc][mnc]
except:
raise InvalidNetwork('Invalid MCC {} MNC {}'.format(mcc, mnc))
def country_networks(country):
'''
Returns a list of tuples (mcc, mnc, network_name) with all the networks
belonging to the specified country.
The country must be specified as an ISO-3166-1 alpha-2 code.
'''
try:
return [(m[0], m[1], operators[m[0]][m[1]][1])
for m in countries[country]]
except:
raise InvalidCountry('Invalid country {}'.format(country))
| 28.275862 | 75 | 0.636585 | from .e212_names import operators, countries
from .errors import InvalidNetwork, InvalidCountry
def network(mcc, mnc):
mcc = int(mcc)
mnc = int(mnc)
try:
return operators[mcc][mnc]
except:
raise InvalidNetwork('Invalid MCC {} MNC {}'.format(mcc, mnc))
def country_networks(country):
try:
return [(m[0], m[1], operators[m[0]][m[1]][1])
for m in countries[country]]
except:
raise InvalidCountry('Invalid country {}'.format(country))
| true | true |
f715c364022f5d19e2b6087341499850fd3d9b4c | 501 | py | Python | polidoro_terminal/__init__.py | heitorpolidoro/py-terminal | 3ef04d12aa48ef6d214598df34ddf932518f4614 | [
"MIT"
] | null | null | null | polidoro_terminal/__init__.py | heitorpolidoro/py-terminal | 3ef04d12aa48ef6d214598df34ddf932518f4614 | [
"MIT"
] | null | null | null | polidoro_terminal/__init__.py | heitorpolidoro/py-terminal | 3ef04d12aa48ef6d214598df34ddf932518f4614 | [
"MIT"
] | null | null | null | from polidoro_terminal.size import size, columns, rows
from polidoro_terminal.manipulation import erase_lines, up_lines, clear_to_end_of_line
from polidoro_terminal import cursor
from polidoro_terminal.color import Color
from polidoro_terminal.format import Format
from polidoro_terminal.question import question
NAME = 'polidoro_terminal'
VERSION = '0.0.2'
__all__ = ['size', 'columns', 'rows', 'erase_lines', 'up_lines', 'clear_to_end_of_line', 'cursor', 'Color',
'Format', 'question']
| 38.538462 | 107 | 0.784431 | from polidoro_terminal.size import size, columns, rows
from polidoro_terminal.manipulation import erase_lines, up_lines, clear_to_end_of_line
from polidoro_terminal import cursor
from polidoro_terminal.color import Color
from polidoro_terminal.format import Format
from polidoro_terminal.question import question
NAME = 'polidoro_terminal'
VERSION = '0.0.2'
__all__ = ['size', 'columns', 'rows', 'erase_lines', 'up_lines', 'clear_to_end_of_line', 'cursor', 'Color',
'Format', 'question']
| true | true |
f715c430f48dcf933c9fde5179a7cfbfd6339883 | 8,381 | py | Python | hubspot/crm/deals/models/batch_response_simple_public_object.py | cclauss/hubspot-api-python | 7c60c0f572b98c73e1f1816bf5981396a42735f6 | [
"Apache-2.0"
] | null | null | null | hubspot/crm/deals/models/batch_response_simple_public_object.py | cclauss/hubspot-api-python | 7c60c0f572b98c73e1f1816bf5981396a42735f6 | [
"Apache-2.0"
] | null | null | null | hubspot/crm/deals/models/batch_response_simple_public_object.py | cclauss/hubspot-api-python | 7c60c0f572b98c73e1f1816bf5981396a42735f6 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Deals
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from hubspot.crm.deals.configuration import Configuration
class BatchResponseSimplePublicObject(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'status': 'str',
'results': 'list[SimplePublicObject]',
'requested_at': 'datetime',
'started_at': 'datetime',
'completed_at': 'datetime',
'links': 'dict(str, str)'
}
attribute_map = {
'status': 'status',
'results': 'results',
'requested_at': 'requestedAt',
'started_at': 'startedAt',
'completed_at': 'completedAt',
'links': 'links'
}
def __init__(self, status=None, results=None, requested_at=None, started_at=None, completed_at=None, links=None, local_vars_configuration=None): # noqa: E501
"""BatchResponseSimplePublicObject - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._status = None
self._results = None
self._requested_at = None
self._started_at = None
self._completed_at = None
self._links = None
self.discriminator = None
self.status = status
self.results = results
if requested_at is not None:
self.requested_at = requested_at
self.started_at = started_at
self.completed_at = completed_at
if links is not None:
self.links = links
@property
def status(self):
"""Gets the status of this BatchResponseSimplePublicObject. # noqa: E501
:return: The status of this BatchResponseSimplePublicObject. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this BatchResponseSimplePublicObject.
:param status: The status of this BatchResponseSimplePublicObject. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and status is None: # noqa: E501
raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
allowed_values = ["PENDING", "PROCESSING", "CANCELED", "COMPLETE"] # noqa: E501
if self.local_vars_configuration.client_side_validation and status not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `status` ({0}), must be one of {1}" # noqa: E501
.format(status, allowed_values)
)
self._status = status
@property
def results(self):
"""Gets the results of this BatchResponseSimplePublicObject. # noqa: E501
:return: The results of this BatchResponseSimplePublicObject. # noqa: E501
:rtype: list[SimplePublicObject]
"""
return self._results
@results.setter
def results(self, results):
"""Sets the results of this BatchResponseSimplePublicObject.
:param results: The results of this BatchResponseSimplePublicObject. # noqa: E501
:type: list[SimplePublicObject]
"""
if self.local_vars_configuration.client_side_validation and results is None: # noqa: E501
raise ValueError("Invalid value for `results`, must not be `None`") # noqa: E501
self._results = results
@property
def requested_at(self):
"""Gets the requested_at of this BatchResponseSimplePublicObject. # noqa: E501
:return: The requested_at of this BatchResponseSimplePublicObject. # noqa: E501
:rtype: datetime
"""
return self._requested_at
@requested_at.setter
def requested_at(self, requested_at):
"""Sets the requested_at of this BatchResponseSimplePublicObject.
:param requested_at: The requested_at of this BatchResponseSimplePublicObject. # noqa: E501
:type: datetime
"""
self._requested_at = requested_at
@property
def started_at(self):
"""Gets the started_at of this BatchResponseSimplePublicObject. # noqa: E501
:return: The started_at of this BatchResponseSimplePublicObject. # noqa: E501
:rtype: datetime
"""
return self._started_at
@started_at.setter
def started_at(self, started_at):
"""Sets the started_at of this BatchResponseSimplePublicObject.
:param started_at: The started_at of this BatchResponseSimplePublicObject. # noqa: E501
:type: datetime
"""
if self.local_vars_configuration.client_side_validation and started_at is None: # noqa: E501
raise ValueError("Invalid value for `started_at`, must not be `None`") # noqa: E501
self._started_at = started_at
@property
def completed_at(self):
"""Gets the completed_at of this BatchResponseSimplePublicObject. # noqa: E501
:return: The completed_at of this BatchResponseSimplePublicObject. # noqa: E501
:rtype: datetime
"""
return self._completed_at
@completed_at.setter
def completed_at(self, completed_at):
"""Sets the completed_at of this BatchResponseSimplePublicObject.
:param completed_at: The completed_at of this BatchResponseSimplePublicObject. # noqa: E501
:type: datetime
"""
if self.local_vars_configuration.client_side_validation and completed_at is None: # noqa: E501
raise ValueError("Invalid value for `completed_at`, must not be `None`") # noqa: E501
self._completed_at = completed_at
@property
def links(self):
"""Gets the links of this BatchResponseSimplePublicObject. # noqa: E501
:return: The links of this BatchResponseSimplePublicObject. # noqa: E501
:rtype: dict(str, str)
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this BatchResponseSimplePublicObject.
:param links: The links of this BatchResponseSimplePublicObject. # noqa: E501
:type: dict(str, str)
"""
self._links = links
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BatchResponseSimplePublicObject):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, BatchResponseSimplePublicObject):
return True
return self.to_dict() != other.to_dict()
| 32.111111 | 162 | 0.624866 |
import pprint
import re
import six
from hubspot.crm.deals.configuration import Configuration
class BatchResponseSimplePublicObject(object):
openapi_types = {
'status': 'str',
'results': 'list[SimplePublicObject]',
'requested_at': 'datetime',
'started_at': 'datetime',
'completed_at': 'datetime',
'links': 'dict(str, str)'
}
attribute_map = {
'status': 'status',
'results': 'results',
'requested_at': 'requestedAt',
'started_at': 'startedAt',
'completed_at': 'completedAt',
'links': 'links'
}
def __init__(self, status=None, results=None, requested_at=None, started_at=None, completed_at=None, links=None, local_vars_configuration=None):
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._status = None
self._results = None
self._requested_at = None
self._started_at = None
self._completed_at = None
self._links = None
self.discriminator = None
self.status = status
self.results = results
if requested_at is not None:
self.requested_at = requested_at
self.started_at = started_at
self.completed_at = completed_at
if links is not None:
self.links = links
@property
def status(self):
return self._status
@status.setter
def status(self, status):
if self.local_vars_configuration.client_side_validation and status is None:
raise ValueError("Invalid value for `status`, must not be `None`")
allowed_values = ["PENDING", "PROCESSING", "CANCELED", "COMPLETE"]
if self.local_vars_configuration.client_side_validation and status not in allowed_values:
raise ValueError(
"Invalid value for `status` ({0}), must be one of {1}"
.format(status, allowed_values)
)
self._status = status
@property
def results(self):
return self._results
@results.setter
def results(self, results):
if self.local_vars_configuration.client_side_validation and results is None:
raise ValueError("Invalid value for `results`, must not be `None`")
self._results = results
@property
def requested_at(self):
return self._requested_at
@requested_at.setter
def requested_at(self, requested_at):
self._requested_at = requested_at
@property
def started_at(self):
return self._started_at
@started_at.setter
def started_at(self, started_at):
if self.local_vars_configuration.client_side_validation and started_at is None:
raise ValueError("Invalid value for `started_at`, must not be `None`")
self._started_at = started_at
@property
def completed_at(self):
return self._completed_at
@completed_at.setter
def completed_at(self, completed_at):
if self.local_vars_configuration.client_side_validation and completed_at is None:
raise ValueError("Invalid value for `completed_at`, must not be `None`")
self._completed_at = completed_at
@property
def links(self):
return self._links
@links.setter
def links(self, links):
self._links = links
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, BatchResponseSimplePublicObject):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
if not isinstance(other, BatchResponseSimplePublicObject):
return True
return self.to_dict() != other.to_dict()
| true | true |
f715c44f3f9b2781b37fa1bf5d47e32a81a7c1be | 8,332 | py | Python | SimPEG/electromagnetics/analytics/FDEM.py | ElliotCheung/simpeg | ce5bde154179ca63798a62a12787a7ec3535472c | [
"MIT"
] | 1 | 2022-02-18T16:31:27.000Z | 2022-02-18T16:31:27.000Z | SimPEG/electromagnetics/analytics/FDEM.py | ElliotCheung/simpeg | ce5bde154179ca63798a62a12787a7ec3535472c | [
"MIT"
] | null | null | null | SimPEG/electromagnetics/analytics/FDEM.py | ElliotCheung/simpeg | ce5bde154179ca63798a62a12787a7ec3535472c | [
"MIT"
] | null | null | null | from __future__ import division
import numpy as np
from scipy.constants import mu_0, pi, epsilon_0
from scipy.special import erf
from SimPEG import utils
import warnings
def hzAnalyticDipoleF(r, freq, sigma, secondary=True, mu=mu_0):
"""
The analytical expression is given in Equation 4.56 in Ward and Hohmann,
1988, and the example reproduces their Figure 4.2.
.. plot::
import numpy as np
import matplotlib.pyplot as plt
from SimPEG import electromagnetics as EM
freq = np.logspace(-1, 5, 301)
test = EM.analytics.hzAnalyticDipoleF(
100, freq, 0.01, secondary=False)
plt.loglog(freq, test.real, 'C0-', label='Real')
plt.loglog(freq, -test.real, 'C0--')
plt.loglog(freq, test.imag, 'C1-', label='Imaginary')
plt.loglog(freq, -test.imag, 'C1--')
plt.title('Response at $r=100$ m')
plt.xlim([1e-1, 1e5])
plt.ylim([1e-12, 1e-6])
plt.xlabel('Frequency (Hz)')
plt.ylabel('$H_z$ (A/m)')
plt.legend(loc=6)
plt.show()
**Reference**
- Ward, S. H., and G. W. Hohmann, 1988, Electromagnetic theory for
geophysical applications, Chapter 4 of Electromagnetic Methods in Applied
Geophysics: SEG, Investigations in Geophysics No. 3, 130--311; DOI:
`10.1190/1.9781560802631.ch4
<https://doi.org/10.1190/1.9781560802631.ch4>`_.
"""
r = np.abs(r)
k = np.sqrt(-1j * 2.0 * np.pi * freq * mu * sigma)
m = 1
front = m / (2.0 * np.pi * (k**2) * (r**5))
back = 9 - (
9 + 9j * k * r - 4 * (k**2) * (r**2) - 1j * (k**3) * (r**3)
) * np.exp(-1j * k * r)
hz = front * back
if secondary:
hp = -1 / (4 * np.pi * r**3)
hz = hz - hp
if hz.ndim == 1:
hz = utils.mkvc(hz, 2)
return hz
def MagneticDipoleWholeSpace(
XYZ, srcLoc, sig, f, moment, fieldType="b", mu_r=1, eps_r=1, **kwargs
):
"""
Analytical solution for a dipole in a whole-space.
The analytical expression is given in Equation 2.57 in Ward and Hohmann,
1988, and the example reproduces their Figure 2.2.
TODOs:
- set it up to instead take a mesh & survey
- add divide by zero safety
.. plot::
import numpy as np
from SimPEG import electromagnetics as EM
import matplotlib.pyplot as plt
from scipy.constants import mu_0
freqs = np.logspace(-2, 5, 301)
Bx, By, Bz = EM.analytics.FDEM.MagneticDipoleWholeSpace(
[0, 100, 0], [0, 0, 0], 1e-2, freqs, moment='Z')
plt.figure()
plt.loglog(freqs, Bz.real/mu_0, 'C0', label='Real')
plt.loglog(freqs, -Bz.real/mu_0, 'C0--')
plt.loglog(freqs, Bz.imag/mu_0, 'C1', label='Imaginary')
plt.loglog(freqs, -Bz.imag/mu_0, 'C1--')
plt.legend()
plt.xlim([1e-2, 1e5])
plt.ylim([1e-13, 1e-6])
plt.show()
**Reference**
- Ward, S. H., and G. W. Hohmann, 1988, Electromagnetic theory for
geophysical applications, Chapter 4 of Electromagnetic Methods in Applied
Geophysics: SEG, Investigations in Geophysics No. 3, 130--311; DOI:
`10.1190/1.9781560802631.ch4
<https://doi.org/10.1190/1.9781560802631.ch4>`_.
"""
orient = kwargs.pop("orientation", None)
if orient is not None:
raise TypeError(
"orientation kwarg has been removed, please use the moment argument",
)
magnitude = moment
moment = orient
else:
magnitude = 1
mu = kwargs.pop("mu", None)
if mu is not None:
raise TypeError("mu kwarg has been removed, please use the mu_r argument.")
mu_r = mu / mu_0
mu = mu_0 * mu_r
eps = epsilon_0 * eps_r
w = 2 * np.pi * f
if isinstance(moment, str):
if moment == "X":
mx, my, mz = 1.0, 0.0, 0.0
elif moment == "Y":
mx, my, mz = 0.0, 1.0, 0.0
elif moment == "Z":
mx, my, mz = 0.0, 0.0, 1.0
else:
raise NotImplementedError("String type for moment not recognized")
mx, my, mz = mx * magnitude, my * magnitude, mz * magnitude
else:
mx, my, mz = moment[0], moment[1], moment[2]
XYZ = utils.asArray_N_x_Dim(XYZ, 3)
dx = XYZ[:, 0] - srcLoc[0]
dy = XYZ[:, 1] - srcLoc[1]
dz = XYZ[:, 2] - srcLoc[2]
r = np.sqrt(dx**2.0 + dy**2.0 + dz**2.0)
k = np.sqrt(-1j * w * mu * sig + w**2 * mu * eps)
kr = k * r
if fieldType in ["h", "b"]:
front = 1 / (4.0 * pi * r**3.0) * np.exp(-1j * kr)
mid = -(kr**2.0) + 3.0 * 1j * kr + 3.0
Fx = front * (
mx * ((dx / r) ** 2.0 * mid + (kr**2.0 - 1j * kr - 1.0))
+ my * ((dy * dx / r**2.0) * mid)
+ mz * ((dx * dz / r**2.0) * mid)
)
Fy = front * (
mx * ((dx * dy / r**2.0) * mid)
+ my * ((dy / r) ** 2.0 * mid + (kr**2.0 - 1j * kr - 1.0))
+ mz * ((dy * dz / r**2.0) * mid)
)
Fz = front * (
mx * ((dx * dz / r**2.0) * mid)
+ my * ((dy * dz / r**2.0) * mid)
+ mz * ((dz / r) ** 2.0 * mid + (kr**2.0 - 1j * kr - 1.0))
)
if fieldType == "b":
Fx, Fy, Fz = mu * Fx, mu * Fy, mu * Fz
elif fieldType == "e":
front = 1j * w * mu * (1 + 1j * kr) / (4.0 * pi * r**3.0) * np.exp(-1j * kr)
Fx = front * (my * (dz / r) + mz * (-dy / r))
Fy = front * (mx * (-dz / r) + mz * (dx / r))
Fz = front * (mx * (dy / r) + my * (-dx / r))
return Fx, Fy, Fz
def ElectricDipoleWholeSpace(
XYZ, srcLoc, sig, f, moment="X", fieldType="e", mu_r=1, eps_r=1, **kwargs
):
orient = kwargs.pop("orientation", None)
if orient is not None:
raise TypeError(
"orientation kwarg has been removed, please use the moment argument."
)
mu = kwargs.pop("mu", None)
if mu is not None:
raise TypeError("mu kwarg has been removed, please use the mu_r argument.")
cur = kwargs.pop("current", None)
if cur is not None:
raise TypeError(
"current kwarg has been removed, please use the moment argument.",
)
else:
magnitude = 1
length = kwargs.pop("length", None)
if length is not None:
raise TypeError(
"length kwarg has been removed, please use the moment argument."
)
mu = mu_0 * mu_r
eps = epsilon_0 * eps_r
w = 2 * np.pi * f
if isinstance(moment, str):
if moment.upper() == "X":
mx, my, mz = 1.0, 0.0, 0.0
elif moment.upper() == "Y":
mx, my, mz = 0.0, 1.0, 0.0
elif moment.upper() == "Z":
mx, my, mz = 0.0, 0.0, 1.0
else:
raise NotImplementedError("String type for moment not recognized")
mx, my, mz = mx * magnitude, my * magnitude, mz * magnitude
else:
mx, my, mz = moment[0], moment[1], moment[2]
XYZ = utils.asArray_N_x_Dim(XYZ, 3)
dx = XYZ[:, 0] - srcLoc[0]
dy = XYZ[:, 1] - srcLoc[1]
dz = XYZ[:, 2] - srcLoc[2]
r = np.sqrt(dx**2.0 + dy**2.0 + dz**2.0)
k = np.sqrt(-1j * w * mu * sig + w**2 * mu * eps)
kr = k * r
if fieldType == "e":
front = 1 / (4.0 * np.pi * sig * r**3) * np.exp(-1j * k * r)
mid = -(k**2) * r**2 + 3 * 1j * k * r + 3
Fx = front * (
mx * ((dx**2 / r**2) * mid + (k**2 * r**2 - 1j * k * r - 1.0))
+ my * (dy * dx / r**2) * mid
+ mz * (dz * dx / r**2) * mid
)
Fy = front * (
mx * (dx * dy / r**2) * mid
+ my * ((dy**2 / r**2) * mid + (k**2 * r**2 - 1j * k * r - 1.0))
+ mz * (dz * dy / r**2) * mid
)
Fz = front * (
mx * (dx * dz / r**2) * mid
+ my * (dy * dz / r**2) * mid
+ mz * ((dz**2 / r**2) * mid + (k**2 * r**2 - 1j * k * r - 1.0))
)
elif fieldType in ["h", "b"]:
front = (1 + 1j * kr) / (4.0 * np.pi * r**2) * np.exp(-1j * k * r)
Fx = front * (my * (dz / r) + mz * (-dy / r))
Fy = front * (mx * (-dz / r) + mz * (dx / r))
Fz = front * (mx * (dy / r) + my * (-dx / r))
if fieldType == "b":
Fx, Fy, Fz = mu * Fx, mu * Fy, mu * Fz
return Fx, Fy, Fz
| 30.079422 | 84 | 0.491959 | from __future__ import division
import numpy as np
from scipy.constants import mu_0, pi, epsilon_0
from scipy.special import erf
from SimPEG import utils
import warnings
def hzAnalyticDipoleF(r, freq, sigma, secondary=True, mu=mu_0):
r = np.abs(r)
k = np.sqrt(-1j * 2.0 * np.pi * freq * mu * sigma)
m = 1
front = m / (2.0 * np.pi * (k**2) * (r**5))
back = 9 - (
9 + 9j * k * r - 4 * (k**2) * (r**2) - 1j * (k**3) * (r**3)
) * np.exp(-1j * k * r)
hz = front * back
if secondary:
hp = -1 / (4 * np.pi * r**3)
hz = hz - hp
if hz.ndim == 1:
hz = utils.mkvc(hz, 2)
return hz
def MagneticDipoleWholeSpace(
XYZ, srcLoc, sig, f, moment, fieldType="b", mu_r=1, eps_r=1, **kwargs
):
orient = kwargs.pop("orientation", None)
if orient is not None:
raise TypeError(
"orientation kwarg has been removed, please use the moment argument",
)
magnitude = moment
moment = orient
else:
magnitude = 1
mu = kwargs.pop("mu", None)
if mu is not None:
raise TypeError("mu kwarg has been removed, please use the mu_r argument.")
mu_r = mu / mu_0
mu = mu_0 * mu_r
eps = epsilon_0 * eps_r
w = 2 * np.pi * f
if isinstance(moment, str):
if moment == "X":
mx, my, mz = 1.0, 0.0, 0.0
elif moment == "Y":
mx, my, mz = 0.0, 1.0, 0.0
elif moment == "Z":
mx, my, mz = 0.0, 0.0, 1.0
else:
raise NotImplementedError("String type for moment not recognized")
mx, my, mz = mx * magnitude, my * magnitude, mz * magnitude
else:
mx, my, mz = moment[0], moment[1], moment[2]
XYZ = utils.asArray_N_x_Dim(XYZ, 3)
dx = XYZ[:, 0] - srcLoc[0]
dy = XYZ[:, 1] - srcLoc[1]
dz = XYZ[:, 2] - srcLoc[2]
r = np.sqrt(dx**2.0 + dy**2.0 + dz**2.0)
k = np.sqrt(-1j * w * mu * sig + w**2 * mu * eps)
kr = k * r
if fieldType in ["h", "b"]:
front = 1 / (4.0 * pi * r**3.0) * np.exp(-1j * kr)
mid = -(kr**2.0) + 3.0 * 1j * kr + 3.0
Fx = front * (
mx * ((dx / r) ** 2.0 * mid + (kr**2.0 - 1j * kr - 1.0))
+ my * ((dy * dx / r**2.0) * mid)
+ mz * ((dx * dz / r**2.0) * mid)
)
Fy = front * (
mx * ((dx * dy / r**2.0) * mid)
+ my * ((dy / r) ** 2.0 * mid + (kr**2.0 - 1j * kr - 1.0))
+ mz * ((dy * dz / r**2.0) * mid)
)
Fz = front * (
mx * ((dx * dz / r**2.0) * mid)
+ my * ((dy * dz / r**2.0) * mid)
+ mz * ((dz / r) ** 2.0 * mid + (kr**2.0 - 1j * kr - 1.0))
)
if fieldType == "b":
Fx, Fy, Fz = mu * Fx, mu * Fy, mu * Fz
elif fieldType == "e":
front = 1j * w * mu * (1 + 1j * kr) / (4.0 * pi * r**3.0) * np.exp(-1j * kr)
Fx = front * (my * (dz / r) + mz * (-dy / r))
Fy = front * (mx * (-dz / r) + mz * (dx / r))
Fz = front * (mx * (dy / r) + my * (-dx / r))
return Fx, Fy, Fz
def ElectricDipoleWholeSpace(
XYZ, srcLoc, sig, f, moment="X", fieldType="e", mu_r=1, eps_r=1, **kwargs
):
orient = kwargs.pop("orientation", None)
if orient is not None:
raise TypeError(
"orientation kwarg has been removed, please use the moment argument."
)
mu = kwargs.pop("mu", None)
if mu is not None:
raise TypeError("mu kwarg has been removed, please use the mu_r argument.")
cur = kwargs.pop("current", None)
if cur is not None:
raise TypeError(
"current kwarg has been removed, please use the moment argument.",
)
else:
magnitude = 1
length = kwargs.pop("length", None)
if length is not None:
raise TypeError(
"length kwarg has been removed, please use the moment argument."
)
mu = mu_0 * mu_r
eps = epsilon_0 * eps_r
w = 2 * np.pi * f
if isinstance(moment, str):
if moment.upper() == "X":
mx, my, mz = 1.0, 0.0, 0.0
elif moment.upper() == "Y":
mx, my, mz = 0.0, 1.0, 0.0
elif moment.upper() == "Z":
mx, my, mz = 0.0, 0.0, 1.0
else:
raise NotImplementedError("String type for moment not recognized")
mx, my, mz = mx * magnitude, my * magnitude, mz * magnitude
else:
mx, my, mz = moment[0], moment[1], moment[2]
XYZ = utils.asArray_N_x_Dim(XYZ, 3)
dx = XYZ[:, 0] - srcLoc[0]
dy = XYZ[:, 1] - srcLoc[1]
dz = XYZ[:, 2] - srcLoc[2]
r = np.sqrt(dx**2.0 + dy**2.0 + dz**2.0)
k = np.sqrt(-1j * w * mu * sig + w**2 * mu * eps)
kr = k * r
if fieldType == "e":
front = 1 / (4.0 * np.pi * sig * r**3) * np.exp(-1j * k * r)
mid = -(k**2) * r**2 + 3 * 1j * k * r + 3
Fx = front * (
mx * ((dx**2 / r**2) * mid + (k**2 * r**2 - 1j * k * r - 1.0))
+ my * (dy * dx / r**2) * mid
+ mz * (dz * dx / r**2) * mid
)
Fy = front * (
mx * (dx * dy / r**2) * mid
+ my * ((dy**2 / r**2) * mid + (k**2 * r**2 - 1j * k * r - 1.0))
+ mz * (dz * dy / r**2) * mid
)
Fz = front * (
mx * (dx * dz / r**2) * mid
+ my * (dy * dz / r**2) * mid
+ mz * ((dz**2 / r**2) * mid + (k**2 * r**2 - 1j * k * r - 1.0))
)
elif fieldType in ["h", "b"]:
front = (1 + 1j * kr) / (4.0 * np.pi * r**2) * np.exp(-1j * k * r)
Fx = front * (my * (dz / r) + mz * (-dy / r))
Fy = front * (mx * (-dz / r) + mz * (dx / r))
Fz = front * (mx * (dy / r) + my * (-dx / r))
if fieldType == "b":
Fx, Fy, Fz = mu * Fx, mu * Fy, mu * Fz
return Fx, Fy, Fz
| true | true |
f715c4faf3c9fe1f421e85c3edcd776dc7e1569d | 5,188 | py | Python | test/functional/nulldummy.py | chx381/platopia | 563c616db768f813aa4482d39d8ed1d8aacaad4f | [
"MIT"
] | 5 | 2018-07-21T15:58:30.000Z | 2019-04-25T01:45:36.000Z | test/functional/nulldummy.py | chx381/platopia | 563c616db768f813aa4482d39d8ed1d8aacaad4f | [
"MIT"
] | null | null | null | test/functional/nulldummy.py | chx381/platopia | 563c616db768f813aa4482d39d8ed1d8aacaad4f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, NetworkThread
from test_framework.blocktools import create_coinbase, create_block
from test_framework.script import CScript
from io import BytesIO
import time
NULLDUMMY_ERROR = "64: non-mandatory-script-verify-flag (Dummy CHECKMULTISIG argument must be zero)"
def trueDummy(tx):
scriptSig = CScript(tx.vin[0].scriptSig)
newscript = []
for i in scriptSig:
if (len(newscript) == 0):
assert(len(i) == 0)
newscript.append(b'\x51')
else:
newscript.append(i)
tx.vin[0].scriptSig = CScript(newscript)
tx.rehash()
'''
This test is meant to exercise NULLDUMMY softfork.
Connect to a single node.
Generate 2 blocks (save the coinbases for later).
Generate 427 more blocks.
[Policy/Consensus] Check that NULLDUMMY compliant transactions are accepted in the 430th block.
[Policy] Check that non-NULLDUMMY transactions are rejected before activation.
[Consensus] Check that the new NULLDUMMY rules are not enforced on the 431st block.
[Policy/Consensus] Check that the new NULLDUMMY rules are enforced on the 432nd block.
'''
class NULLDUMMYTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [['-whitelist=127.0.0.1', '-walletprematurewitness']]
def run_test(self):
self.address = self.nodes[0].getnewaddress()
self.ms_address = self.nodes[0].addmultisigaddress(1, [self.address])
NetworkThread().start() # Start up network handling in another thread
self.coinbase_blocks = self.nodes[0].generate(2) # Block 2
coinbase_txid = []
for i in self.coinbase_blocks:
coinbase_txid.append(self.nodes[0].getblock(i)['tx'][0])
self.nodes[0].generate(427) # Block 429
self.lastblockhash = self.nodes[0].getbestblockhash()
self.tip = int("0x" + self.lastblockhash, 0)
self.lastblockheight = 429
self.lastblocktime = int(time.time()) + 429
self.log.info(
"Test 1: NULLDUMMY compliant base transactions should be accepted to mempool and mined before activation [430]")
test1txs = [self.create_transaction(
self.nodes[0], coinbase_txid[0], self.ms_address, 49)]
txid1 = self.tx_submit(self.nodes[0], test1txs[0])
test1txs.append(self.create_transaction(
self.nodes[0], txid1, self.ms_address, 48))
txid2 = self.tx_submit(self.nodes[0], test1txs[1])
self.block_submit(self.nodes[0], test1txs, False, True)
self.log.info(
"Test 2: Non-NULLDUMMY base multisig transaction should not be accepted to mempool before activation")
test2tx = self.create_transaction(
self.nodes[0], txid2, self.ms_address, 48)
trueDummy(test2tx)
txid4 = self.tx_submit(self.nodes[0], test2tx, NULLDUMMY_ERROR)
self.log.info(
"Test 3: Non-NULLDUMMY base transactions should be accepted in a block before activation [431]")
self.block_submit(self.nodes[0], [test2tx], False, True)
def create_transaction(self, node, txid, to_address, amount):
inputs = [{"txid": txid, "vout": 0}]
outputs = {to_address: amount}
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx, None, None, "ALL|FORKID")
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(signresult['hex']))
tx.deserialize(f)
return tx
def tx_submit(self, node, tx, msg=""):
tx.rehash()
try:
node.sendrawtransaction(
bytes_to_hex_str(tx.serialize_with_witness()), True)
except JSONRPCException as exp:
assert_equal(exp.error["message"], msg)
else:
assert_equal('', msg)
return tx.hash
def block_submit(self, node, txs, witness=False, accept=False):
block = create_block(self.tip, create_coinbase(
self.lastblockheight + 1), self.lastblocktime + 1)
block.nVersion = 4
for tx in txs:
tx.rehash()
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
node.submitblock(bytes_to_hex_str(block.serialize(True)))
if (accept):
assert_equal(node.getbestblockhash(), block.hash)
self.tip = block.sha256
self.lastblockhash = block.hash
self.lastblocktime += 1
self.lastblockheight += 1
else:
assert_equal(node.getbestblockhash(), self.lastblockhash)
if __name__ == '__main__':
NULLDUMMYTest().main()
| 40.53125 | 125 | 0.644372 |
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, NetworkThread
from test_framework.blocktools import create_coinbase, create_block
from test_framework.script import CScript
from io import BytesIO
import time
NULLDUMMY_ERROR = "64: non-mandatory-script-verify-flag (Dummy CHECKMULTISIG argument must be zero)"
def trueDummy(tx):
scriptSig = CScript(tx.vin[0].scriptSig)
newscript = []
for i in scriptSig:
if (len(newscript) == 0):
assert(len(i) == 0)
newscript.append(b'\x51')
else:
newscript.append(i)
tx.vin[0].scriptSig = CScript(newscript)
tx.rehash()
class NULLDUMMYTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [['-whitelist=127.0.0.1', '-walletprematurewitness']]
def run_test(self):
self.address = self.nodes[0].getnewaddress()
self.ms_address = self.nodes[0].addmultisigaddress(1, [self.address])
NetworkThread().start()
self.coinbase_blocks = self.nodes[0].generate(2)
coinbase_txid = []
for i in self.coinbase_blocks:
coinbase_txid.append(self.nodes[0].getblock(i)['tx'][0])
self.nodes[0].generate(427)
self.lastblockhash = self.nodes[0].getbestblockhash()
self.tip = int("0x" + self.lastblockhash, 0)
self.lastblockheight = 429
self.lastblocktime = int(time.time()) + 429
self.log.info(
"Test 1: NULLDUMMY compliant base transactions should be accepted to mempool and mined before activation [430]")
test1txs = [self.create_transaction(
self.nodes[0], coinbase_txid[0], self.ms_address, 49)]
txid1 = self.tx_submit(self.nodes[0], test1txs[0])
test1txs.append(self.create_transaction(
self.nodes[0], txid1, self.ms_address, 48))
txid2 = self.tx_submit(self.nodes[0], test1txs[1])
self.block_submit(self.nodes[0], test1txs, False, True)
self.log.info(
"Test 2: Non-NULLDUMMY base multisig transaction should not be accepted to mempool before activation")
test2tx = self.create_transaction(
self.nodes[0], txid2, self.ms_address, 48)
trueDummy(test2tx)
txid4 = self.tx_submit(self.nodes[0], test2tx, NULLDUMMY_ERROR)
self.log.info(
"Test 3: Non-NULLDUMMY base transactions should be accepted in a block before activation [431]")
self.block_submit(self.nodes[0], [test2tx], False, True)
def create_transaction(self, node, txid, to_address, amount):
inputs = [{"txid": txid, "vout": 0}]
outputs = {to_address: amount}
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx, None, None, "ALL|FORKID")
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(signresult['hex']))
tx.deserialize(f)
return tx
def tx_submit(self, node, tx, msg=""):
tx.rehash()
try:
node.sendrawtransaction(
bytes_to_hex_str(tx.serialize_with_witness()), True)
except JSONRPCException as exp:
assert_equal(exp.error["message"], msg)
else:
assert_equal('', msg)
return tx.hash
def block_submit(self, node, txs, witness=False, accept=False):
block = create_block(self.tip, create_coinbase(
self.lastblockheight + 1), self.lastblocktime + 1)
block.nVersion = 4
for tx in txs:
tx.rehash()
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
node.submitblock(bytes_to_hex_str(block.serialize(True)))
if (accept):
assert_equal(node.getbestblockhash(), block.hash)
self.tip = block.sha256
self.lastblockhash = block.hash
self.lastblocktime += 1
self.lastblockheight += 1
else:
assert_equal(node.getbestblockhash(), self.lastblockhash)
if __name__ == '__main__':
NULLDUMMYTest().main()
| true | true |
f715c501c1f5d7c019455fce3fc6397536a093ce | 288 | py | Python | universal/items.py | universalscraper/universal-spider | 0b6d82ee0c749cf32dcf501e6d84f518ee2e8437 | [
"MIT"
] | 2 | 2017-01-14T20:09:24.000Z | 2019-09-23T09:26:23.000Z | universal/items.py | scraperize/universal-spider | 0b6d82ee0c749cf32dcf501e6d84f518ee2e8437 | [
"MIT"
] | null | null | null | universal/items.py | scraperize/universal-spider | 0b6d82ee0c749cf32dcf501e6d84f518ee2e8437 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class UniversalItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
| 19.2 | 51 | 0.6875 |
import scrapy
class UniversalItem(scrapy.Item):
pass
| true | true |
f715c602904311d44b8bf950698fcd77ad53a6a8 | 3,800 | py | Python | img2pose/utils/renderer.py | jiacheng1gujiaxin/poseface | 316924e224477f881240712a13a925bdd27adf4c | [
"MIT"
] | null | null | null | img2pose/utils/renderer.py | jiacheng1gujiaxin/poseface | 316924e224477f881240712a13a925bdd27adf4c | [
"MIT"
] | null | null | null | img2pose/utils/renderer.py | jiacheng1gujiaxin/poseface | 316924e224477f881240712a13a925bdd27adf4c | [
"MIT"
] | null | null | null | import cv2
import numpy as np
from Sim3DR import RenderPipeline
from .pose_operations import plot_3d_landmark
def _to_ctype(arr):
if not arr.flags.c_contiguous:
return arr.copy(order="C")
return arr
def get_colors(img, ver):
h, w, _ = img.shape
ver[0, :] = np.minimum(np.maximum(ver[0, :], 0), w - 1) # x
ver[1, :] = np.minimum(np.maximum(ver[1, :], 0), h - 1) # y
ind = np.round(ver).astype(np.int32)
colors = img[ind[1, :], ind[0, :], :] / 255.0 # n x 3
return colors.copy()
class Renderer:
def __init__(
self,
vertices_path="../pose_references/vertices_trans.npy",
triangles_path="../pose_references/triangles.npy",
):
self.vertices = np.load(vertices_path)
self.triangles = _to_ctype(np.load(triangles_path).T)
self.vertices[:, 0] *= -1
self.cfg = {
"intensity_ambient": 0.3,
"color_ambient": (1, 1, 1),
"intensity_directional": 0.6,
"color_directional": (1, 1, 1),
"intensity_specular": 0.1,
"specular_exp": 5,
"light_pos": (0, 0, 5),
"view_pos": (0, 0, 5),
}
self.render_app = RenderPipeline(**self.cfg)
def transform_vertices(self, img, poses, global_intrinsics=None):
(w, h) = img.size
if global_intrinsics is None:
global_intrinsics = np.array(
[[w + h, 0, w // 2], [0, w + h, h // 2], [0, 0, 1]]
)
transformed_vertices = []
for pose in poses:
projected_lms = np.zeros_like(self.vertices)
projected_lms[:, :2], lms_3d_trans_proj = plot_3d_landmark(
self.vertices, pose, global_intrinsics
)
projected_lms[:, 2] = lms_3d_trans_proj[:, 2] * -1
range_x = np.max(projected_lms[:, 0]) - np.min(projected_lms[:, 0])
range_y = np.max(projected_lms[:, 1]) - np.min(projected_lms[:, 1])
s = (h + w) / pose[5]
projected_lms[:, 2] *= s
projected_lms[:, 2] += (range_x + range_y) * 3
transformed_vertices.append(projected_lms)
return transformed_vertices
def render(self, img, transformed_vertices, alpha=0.9, save_path=None):
img = np.asarray(img)
overlap = img.copy()
for vertices in transformed_vertices:
vertices = _to_ctype(vertices) # transpose
overlap = self.render_app(vertices, self.triangles, overlap)
res = cv2.addWeighted(img, 1 - alpha, overlap, alpha, 0)
if save_path is not None:
cv2.imwrite(save_path, res)
print(f"Save visualization result to {save_path}")
return res
def save_to_obj(self, img, ver_lst, height, save_path):
n_obj = len(ver_lst) # count obj
if n_obj <= 0:
return
n_vertex = ver_lst[0].T.shape[1]
n_face = self.triangles.shape[0]
with open(save_path, "w") as f:
for i in range(n_obj):
ver = ver_lst[i].T
colors = get_colors(img, ver)
for j in range(n_vertex):
x, y, z = ver[:, j]
f.write(
f"v {x:.2f} {height - y:.2f} {z:.2f} {colors[j, 2]:.2f} "
f"{colors[j, 1]:.2f} {colors[j, 0]:.2f}\n"
)
for i in range(n_obj):
offset = i * n_vertex
for j in range(n_face):
idx1, idx2, idx3 = self.triangles[j] # m x 3
f.write(
f"f {idx3 + 1 + offset} {idx2 + 1 + offset} "
f"{idx1 + 1 + offset}\n"
)
print(f"Dump tp {save_path}")
| 31.666667 | 81 | 0.517105 | import cv2
import numpy as np
from Sim3DR import RenderPipeline
from .pose_operations import plot_3d_landmark
def _to_ctype(arr):
if not arr.flags.c_contiguous:
return arr.copy(order="C")
return arr
def get_colors(img, ver):
h, w, _ = img.shape
ver[0, :] = np.minimum(np.maximum(ver[0, :], 0), w - 1)
ver[1, :] = np.minimum(np.maximum(ver[1, :], 0), h - 1)
ind = np.round(ver).astype(np.int32)
colors = img[ind[1, :], ind[0, :], :] / 255.0
return colors.copy()
class Renderer:
def __init__(
self,
vertices_path="../pose_references/vertices_trans.npy",
triangles_path="../pose_references/triangles.npy",
):
self.vertices = np.load(vertices_path)
self.triangles = _to_ctype(np.load(triangles_path).T)
self.vertices[:, 0] *= -1
self.cfg = {
"intensity_ambient": 0.3,
"color_ambient": (1, 1, 1),
"intensity_directional": 0.6,
"color_directional": (1, 1, 1),
"intensity_specular": 0.1,
"specular_exp": 5,
"light_pos": (0, 0, 5),
"view_pos": (0, 0, 5),
}
self.render_app = RenderPipeline(**self.cfg)
def transform_vertices(self, img, poses, global_intrinsics=None):
(w, h) = img.size
if global_intrinsics is None:
global_intrinsics = np.array(
[[w + h, 0, w // 2], [0, w + h, h // 2], [0, 0, 1]]
)
transformed_vertices = []
for pose in poses:
projected_lms = np.zeros_like(self.vertices)
projected_lms[:, :2], lms_3d_trans_proj = plot_3d_landmark(
self.vertices, pose, global_intrinsics
)
projected_lms[:, 2] = lms_3d_trans_proj[:, 2] * -1
range_x = np.max(projected_lms[:, 0]) - np.min(projected_lms[:, 0])
range_y = np.max(projected_lms[:, 1]) - np.min(projected_lms[:, 1])
s = (h + w) / pose[5]
projected_lms[:, 2] *= s
projected_lms[:, 2] += (range_x + range_y) * 3
transformed_vertices.append(projected_lms)
return transformed_vertices
def render(self, img, transformed_vertices, alpha=0.9, save_path=None):
img = np.asarray(img)
overlap = img.copy()
for vertices in transformed_vertices:
vertices = _to_ctype(vertices)
overlap = self.render_app(vertices, self.triangles, overlap)
res = cv2.addWeighted(img, 1 - alpha, overlap, alpha, 0)
if save_path is not None:
cv2.imwrite(save_path, res)
print(f"Save visualization result to {save_path}")
return res
def save_to_obj(self, img, ver_lst, height, save_path):
n_obj = len(ver_lst)
if n_obj <= 0:
return
n_vertex = ver_lst[0].T.shape[1]
n_face = self.triangles.shape[0]
with open(save_path, "w") as f:
for i in range(n_obj):
ver = ver_lst[i].T
colors = get_colors(img, ver)
for j in range(n_vertex):
x, y, z = ver[:, j]
f.write(
f"v {x:.2f} {height - y:.2f} {z:.2f} {colors[j, 2]:.2f} "
f"{colors[j, 1]:.2f} {colors[j, 0]:.2f}\n"
)
for i in range(n_obj):
offset = i * n_vertex
for j in range(n_face):
idx1, idx2, idx3 = self.triangles[j]
f.write(
f"f {idx3 + 1 + offset} {idx2 + 1 + offset} "
f"{idx1 + 1 + offset}\n"
)
print(f"Dump tp {save_path}")
| true | true |
f715c633d888342e2bcb33e9b3f302a45f208031 | 6,067 | py | Python | servers/Thot/schema.py | DiegoCorrea/bottleOfMessages | 1281d3f82ce4d44a31e426aa8862c3c9b294cf03 | [
"MIT"
] | null | null | null | servers/Thot/schema.py | DiegoCorrea/bottleOfMessages | 1281d3f82ce4d44a31e426aa8862c3c9b294cf03 | [
"MIT"
] | null | null | null | servers/Thot/schema.py | DiegoCorrea/bottleOfMessages | 1281d3f82ce4d44a31e426aa8862c3c9b294cf03 | [
"MIT"
] | null | null | null | import sqlite3
import sys
import os
import inspect
from time import gmtime, strftime
from config.server import APP_DB_PATH, SERVER_DB_PATH, WHO_AM_I
sys.path.append('..')
# conectando...
conn = sqlite3.connect(
os.path.dirname(
os.path.abspath(
inspect.getfile(
inspect.currentframe()
)
)
) + APP_DB_PATH[1:]
)
# definindo um cursor
cursor = conn.cursor()
print(' -'*30)
print(' + name: ', WHO_AM_I['name'])
print(' + db-name: ', WHO_AM_I['db-name'])
print(' + ip: ', WHO_AM_I['ip'])
print(' + port: ', WHO_AM_I['port'])
print(' + position: ', WHO_AM_I['position'])
print(' + succession_order: ', WHO_AM_I['succession_order'])
print(' -'*30)
print('Deletando Tabelas se Existe')
cursor.execute("""
DROP TABLE IF EXISTS users;
""")
cursor.execute("""
DROP TABLE IF EXISTS contacts;
""")
cursor.execute("""
DROP TABLE IF EXISTS chats;
""")
cursor.execute("""
DROP TABLE IF EXISTS chat_messages;
""")
cursor.execute("""
DROP TABLE IF EXISTS groups;
""")
cursor.execute("""
DROP TABLE IF EXISTS user_groups;
""")
cursor.execute("""
DROP TABLE IF EXISTS group_messages;
""")
print('...Ok!')
# #########################################################3 #
# criando a tabela (schema)
print('Users')
cursor.execute("""
CREATE TABLE IF NOT EXISTS users (
email CHAR(64) NOT NULL PRIMARY KEY,
name VARCHAR(45) NOT NULL,
created_at TEXT NOT NULL
);
""")
print('...Ok!')
print('Contacts')
cursor.execute("""
CREATE TABLE IF NOT EXISTS contacts (
id CHAR(32) NOT NULL PRIMARY KEY,
user_id CHAR(64) NOT NULL,
contact_id CHAR(32) NOT NULL,
created_at TEXT NOT NULL,
FOREIGN KEY(user_id) REFERENCES users(email),
FOREIGN KEY(contact_id) REFERENCES users(email)
);
""")
print('...Ok!')
print('Chats')
cursor.execute("""
CREATE TABLE IF NOT EXISTS chats (
id CHAR(32) NOT NULL PRIMARY KEY,
user_id CHAR(64) NOT NULL,
contact_id CHAR(64) NOT NULL,
created_at TEXT NOT NULL,
FOREIGN KEY(user_id) REFERENCES users(email),
FOREIGN KEY(contact_id) REFERENCES users(email)
);
""")
print('...Ok!')
print('Chat Message')
cursor.execute("""
CREATE TABLE IF NOT EXISTS chat_messages (
id CHAR(32) NOT NULL PRIMARY KEY,
chat_id CHAR(32) NOT NULL,
sender_id CHAR(64) NOT NULL,
message TEXT NOT NULL,
created_at TEXT NOT NULL,
FOREIGN KEY(sender_id) REFERENCES users(email),
FOREIGN KEY(chat_id) REFERENCES chats(id)
);
""")
print('...Ok!')
print('Groups ')
cursor.execute("""
CREATE TABLE IF NOT EXISTS groups (
id CHAR(32) NOT NULL PRIMARY KEY,
name CHAR(32) NOT NULL,
created_at TEXT NOT NULL
);
""")
print('...Ok!')
print('Users Groups ')
cursor.execute("""
CREATE TABLE IF NOT EXISTS user_groups (
id CHAR(32) NOT NULL PRIMARY KEY,
user_id CHAR(64)NOT NULL,
group_id CHAR(32) NOT NULL,
created_at TEXT NOT NULL,
FOREIGN KEY(user_id) REFERENCES users(id),
FOREIGN KEY(group_id) REFERENCES groups(id)
);
""")
print('...OK!')
print('Group Messages')
cursor.execute("""
CREATE TABLE IF NOT EXISTS group_messages (
id CHAR(32) NOT NULL PRIMARY KEY,
sender_id CHAR(64) NOT NULL,
group_id CHAR(32) NOT NULL,
created_at TEXT NOT NULL,
message TEXT NOT NULL,
FOREIGN KEY(sender_id) REFERENCES users(id),
FOREIGN KEY(group_id) REFERENCES groups(id)
);
""")
print('...OK!')
print('Tabelas criadas com sucesso.')
# desconectando...
conn.close()
# ##################################################################### #
print ('\n\n')
# conectando...
conn = sqlite3.connect(
os.path.dirname(
os.path.abspath(
inspect.getfile(
inspect.currentframe()
)
)
) + SERVER_DB_PATH[1:]
)
# definindo um cursor
cursor = conn.cursor()
print('Deletando Tabelas de Servers se Existe')
cursor.execute("""
DROP TABLE IF EXISTS default_servers_list;
""")
cursor.execute("""
DROP TABLE IF EXISTS worker_servers_list;
""")
cursor.execute("""
DROP TABLE IF EXISTS suspect_servers_list;
""")
cursor.execute("""
DROP TABLE IF EXISTS round_times;
""")
print('...Ok!')
print('Default Server List')
cursor.execute("""
CREATE TABLE IF NOT EXISTS default_servers_list (
name CHAR(64) NOT NULL,
ip VARCHAR(32) NOT NULL,
port INTEGER NOT NULL,
succession_order INTEGER NOT NULL
);
""")
conn.commit()
cursor.execute("""
INSERT INTO default_servers_list
(ip, name, port, succession_order)
VALUES ('192.168.0.16', 'Hermes', 27001, 1);
""")
conn.commit()
cursor.execute("""
INSERT INTO default_servers_list
(ip, name, port, succession_order)
VALUES ('192.168.0.17', 'Thot', 27002, 2);
""")
conn.commit()
cursor.execute("""
INSERT INTO default_servers_list
(ip, name, port, succession_order)
VALUES ('192.168.0.10', 'Exu', 27000, 0);
""")
conn.commit()
print('...OK!')
print('Worker Server List')
cursor.execute("""
CREATE TABLE IF NOT EXISTS workers_servers_list (
name CHAR(64) NOT NULL,
ip VARCHAR(32) NOT NULL,
port INTEGER NOT NULL,
succession_order INTEGER NOT NULL
);
""")
conn.commit()
print('...OK!')
print('Suspect Server List')
cursor.execute("""
CREATE TABLE IF NOT EXISTS suspects_servers_list (
name CHAR(64) NOT NULL,
ip VARCHAR(32) NOT NULL,
port INTEGER NOT NULL
);
""")
conn.commit()
print('...OK!')
print('Round Times')
cursor.execute("""
CREATE TABLE IF NOT EXISTS round_times (
_round INTEGER NOT NULL PRIMARY KEY,
created_at TEXT NOT NULL
);
""")
conn.commit()
cursor.execute("""
INSERT INTO round_times
(_round, created_at)
VALUES (?, ?);
""", (
0,
strftime(
"%Y-%m-%d %H:%M:%S",
gmtime()
)
)
)
conn.commit()
print('...OK!')
# desconectando...
conn.close()
| 23.885827 | 73 | 0.606725 | import sqlite3
import sys
import os
import inspect
from time import gmtime, strftime
from config.server import APP_DB_PATH, SERVER_DB_PATH, WHO_AM_I
sys.path.append('..')
conn = sqlite3.connect(
os.path.dirname(
os.path.abspath(
inspect.getfile(
inspect.currentframe()
)
)
) + APP_DB_PATH[1:]
)
cursor = conn.cursor()
print(' -'*30)
print(' + name: ', WHO_AM_I['name'])
print(' + db-name: ', WHO_AM_I['db-name'])
print(' + ip: ', WHO_AM_I['ip'])
print(' + port: ', WHO_AM_I['port'])
print(' + position: ', WHO_AM_I['position'])
print(' + succession_order: ', WHO_AM_I['succession_order'])
print(' -'*30)
print('Deletando Tabelas se Existe')
cursor.execute("""
DROP TABLE IF EXISTS users;
""")
cursor.execute("""
DROP TABLE IF EXISTS contacts;
""")
cursor.execute("""
DROP TABLE IF EXISTS chats;
""")
cursor.execute("""
DROP TABLE IF EXISTS chat_messages;
""")
cursor.execute("""
DROP TABLE IF EXISTS groups;
""")
cursor.execute("""
DROP TABLE IF EXISTS user_groups;
""")
cursor.execute("""
DROP TABLE IF EXISTS group_messages;
""")
print('...Ok!')
REFERENCES groups(id)
);
""")
print('...OK!')
print('Group Messages')
cursor.execute("""
CREATE TABLE IF NOT EXISTS group_messages (
id CHAR(32) NOT NULL PRIMARY KEY,
sender_id CHAR(64) NOT NULL,
group_id CHAR(32) NOT NULL,
created_at TEXT NOT NULL,
message TEXT NOT NULL,
FOREIGN KEY(sender_id) REFERENCES users(id),
FOREIGN KEY(group_id) REFERENCES groups(id)
);
""")
print('...OK!')
print('Tabelas criadas com sucesso.')
conn.close()
| true | true |
f715c70e4981ec385e1f2070cf75f75007655155 | 293 | py | Python | sololearn/NewDriverLicense/DL.py | SneakyWizards/HackerRankSolutions | daf494e7775bb0de5afcfdcfd45aa73e6a950e0e | [
"RSA-MD"
] | 3 | 2020-01-08T18:33:11.000Z | 2022-02-08T00:38:26.000Z | sololearn/NewDriverLicense/DL.py | SneakyWizards/HackerRankSolutions | daf494e7775bb0de5afcfdcfd45aa73e6a950e0e | [
"RSA-MD"
] | null | null | null | sololearn/NewDriverLicense/DL.py | SneakyWizards/HackerRankSolutions | daf494e7775bb0de5afcfdcfd45aa73e6a950e0e | [
"RSA-MD"
] | 4 | 2020-08-08T22:02:23.000Z | 2022-02-07T17:40:15.000Z | #!/usr/bin/python
name = input()
num_agents = int(input())
drivers = input().split()
drivers.append(name)
drivers.sort()
index = drivers.index(name) + 1
if num_agents > index:
num_agents = index
rem = index % num_agents
div = index // num_agents
time = (rem + div) * 20
print(time) | 14.65 | 31 | 0.665529 |
name = input()
num_agents = int(input())
drivers = input().split()
drivers.append(name)
drivers.sort()
index = drivers.index(name) + 1
if num_agents > index:
num_agents = index
rem = index % num_agents
div = index // num_agents
time = (rem + div) * 20
print(time) | true | true |
f715c76c0e7bc0f285f65f27afbb7bee42da3afb | 805 | py | Python | server/urls.py | w769076810/myhome | 38e39b15c84f8c60fe3f02b46053a8971e081b9a | [
"MIT"
] | null | null | null | server/urls.py | w769076810/myhome | 38e39b15c84f8c60fe3f02b46053a8971e081b9a | [
"MIT"
] | null | null | null | server/urls.py | w769076810/myhome | 38e39b15c84f8c60fe3f02b46053a8971e081b9a | [
"MIT"
] | null | null | null | """server URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from server import views
urlpatterns = [
# path('admin/', admin.site.urls),
path('test/', views.test)
]
| 33.541667 | 77 | 0.70559 | from django.contrib import admin
from django.urls import path
from server import views
urlpatterns = [
path('test/', views.test)
]
| true | true |
f715c84088f7c8d2c89e008f545880f78639ed19 | 17,970 | py | Python | ansible/lib/ansible/modules/extras/storage/netapp/netapp_e_volume_copy.py | kiv-box/redis | 966a0c3f0a51282cd173b42a6e249d23f4e89dec | [
"Apache-2.0"
] | null | null | null | ansible/lib/ansible/modules/extras/storage/netapp/netapp_e_volume_copy.py | kiv-box/redis | 966a0c3f0a51282cd173b42a6e249d23f4e89dec | [
"Apache-2.0"
] | null | null | null | ansible/lib/ansible/modules/extras/storage/netapp/netapp_e_volume_copy.py | kiv-box/redis | 966a0c3f0a51282cd173b42a6e249d23f4e89dec | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# (c) 2016, NetApp, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = """
---
module: netapp_e_volume_copy
short_description: Create volume copy pairs
description:
- Create and delete snapshots images on volume groups for NetApp E-series storage arrays.
version_added: '2.2'
author: Kevin Hulquest (@hulquest)
options:
api_username:
required: true
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_password:
required: true
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_url:
required: true
description:
- The url to the SANtricity WebServices Proxy or embedded REST API.
example:
- https://prod-1.wahoo.acme.com/devmgr/v2
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
source_volume_id:
description:
- The the id of the volume copy source.
- If used, must be paired with destination_volume_id
- Mutually exclusive with volume_copy_pair_id, and search_volume_id
destination_volume_id:
description:
- The the id of the volume copy destination.
- If used, must be paired with source_volume_id
- Mutually exclusive with volume_copy_pair_id, and search_volume_id
volume_copy_pair_id:
description:
- The the id of a given volume copy pair
- Mutually exclusive with destination_volume_id, source_volume_id, and search_volume_id
- Can use to delete or check presence of volume pairs
- Must specify this or (destination_volume_id and source_volume_id)
state:
description:
- Whether the specified volume copy pair should exist or not.
required: True
choices: ['present', 'absent']
create_copy_pair_if_does_not_exist:
description:
- Defines if a copy pair will be created if it does not exist.
- If set to True destination_volume_id and source_volume_id are required.
choices: [True, False]
default: True
start_stop_copy:
description:
- starts a re-copy or stops a copy in progress
- "Note: If you stop the initial file copy before it it done the copy pair will be destroyed"
- Requires volume_copy_pair_id
search_volume_id:
description:
- Searches for all valid potential target and source volumes that could be used in a copy_pair
- Mutually exclusive with volume_copy_pair_id, destination_volume_id and source_volume_id
"""
RESULTS = """
"""
EXAMPLES = """
---
msg:
description: Success message
returned: success
type: string
sample: Json facts for the volume copy that was created.
"""
RETURN = """
msg:
description: Success message
returned: success
type: string
sample: Created Volume Copy Pair with ID
"""
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.urls import open_url
from ansible.module_utils.six.moves.urllib.error import HTTPError
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError:
err = get_exception()
r = err.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
def find_volume_copy_pair_id_from_source_volume_id_and_destination_volume_id(params):
get_status = 'storage-systems/%s/volume-copy-jobs' % params['ssid']
url = params['api_url'] + get_status
(rc, resp) = request(url, method='GET', url_username=params['api_username'],
url_password=params['api_password'], headers=HEADERS,
validate_certs=params['validate_certs'])
volume_copy_pair_id = None
for potential_copy_pair in resp:
if potential_copy_pair['sourceVolume'] == params['source_volume_id']:
if potential_copy_pair['sourceVolume'] == params['source_volume_id']:
volume_copy_pair_id = potential_copy_pair['id']
return volume_copy_pair_id
def create_copy_pair(params):
get_status = 'storage-systems/%s/volume-copy-jobs' % params['ssid']
url = params['api_url'] + get_status
rData = {
"sourceId": params['source_volume_id'],
"targetId": params['destination_volume_id']
}
(rc, resp) = request(url, data=json.dumps(rData), ignore_errors=True, method='POST',
url_username=params['api_username'], url_password=params['api_password'], headers=HEADERS,
validate_certs=params['validate_certs'])
if rc != 200:
return False, (rc, resp)
else:
return True, (rc, resp)
def delete_copy_pair_by_copy_pair_id(params):
get_status = 'storage-systems/%s/volume-copy-jobs/%s?retainRepositories=false' % (
params['ssid'], params['volume_copy_pair_id'])
url = params['api_url'] + get_status
(rc, resp) = request(url, ignore_errors=True, method='DELETE',
url_username=params['api_username'], url_password=params['api_password'], headers=HEADERS,
validate_certs=params['validate_certs'])
if rc != 204:
return False, (rc, resp)
else:
return True, (rc, resp)
def find_volume_copy_pair_id_by_volume_copy_pair_id(params):
get_status = 'storage-systems/%s/volume-copy-jobs/%s?retainRepositories=false' % (
params['ssid'], params['volume_copy_pair_id'])
url = params['api_url'] + get_status
(rc, resp) = request(url, ignore_errors=True, method='DELETE',
url_username=params['api_username'], url_password=params['api_password'], headers=HEADERS,
validate_certs=params['validate_certs'])
if rc != 200:
return False, (rc, resp)
else:
return True, (rc, resp)
def start_stop_copy(params):
get_status = 'storage-systems/%s/volume-copy-jobs-control/%s?control=%s' % (
params['ssid'], params['volume_copy_pair_id'], params['start_stop_copy'])
url = params['api_url'] + get_status
(response_code, response_data) = request(url, ignore_errors=True, method='POST',
url_username=params['api_username'], url_password=params['api_password'],
headers=HEADERS,
validate_certs=params['validate_certs'])
if response_code == 200:
return True, response_data[0]['percentComplete']
else:
return False, response_data
def check_copy_status(params):
get_status = 'storage-systems/%s/volume-copy-jobs-control/%s' % (
params['ssid'], params['volume_copy_pair_id'])
url = params['api_url'] + get_status
(response_code, response_data) = request(url, ignore_errors=True, method='GET',
url_username=params['api_username'], url_password=params['api_password'],
headers=HEADERS,
validate_certs=params['validate_certs'])
if response_code == 200:
if response_data['percentComplete'] != -1:
return True, response_data['percentComplete']
else:
return False, response_data['percentComplete']
else:
return False, response_data
def find_valid_copy_pair_targets_and_sources(params):
get_status = 'storage-systems/%s/volumes' % params['ssid']
url = params['api_url'] + get_status
(response_code, response_data) = request(url, ignore_errors=True, method='GET',
url_username=params['api_username'], url_password=params['api_password'],
headers=HEADERS,
validate_certs=params['validate_certs'])
if response_code == 200:
source_capacity = None
candidates = []
for volume in response_data:
if volume['id'] == params['search_volume_id']:
source_capacity = volume['capacity']
else:
candidates.append(volume)
potential_sources = []
potential_targets = []
for volume in candidates:
if volume['capacity'] > source_capacity:
if volume['volumeCopyTarget'] is False:
if volume['volumeCopySource'] is False:
potential_targets.append(volume['id'])
else:
if volume['volumeCopyTarget'] is False:
if volume['volumeCopySource'] is False:
potential_sources.append(volume['id'])
return potential_targets, potential_sources
else:
raise Exception("Response [%s]" % response_code)
def main():
module = AnsibleModule(argument_spec=dict(
source_volume_id=dict(type='str'),
destination_volume_id=dict(type='str'),
copy_priority=dict(required=False, default=0, type='int'),
ssid=dict(required=True, type='str'),
api_url=dict(required=True),
api_username=dict(required=False),
api_password=dict(required=False, no_log=True),
validate_certs=dict(required=False, default=True),
targetWriteProtected=dict(required=False, default=True, type='bool'),
onlineCopy=dict(required=False, default=False, type='bool'),
volume_copy_pair_id=dict(type='str'),
status=dict(required=True, choices=['present', 'absent'], type='str'),
create_copy_pair_if_does_not_exist=dict(required=False, default=True, type='bool'),
start_stop_copy=dict(required=False, choices=['start', 'stop'], type='str'),
search_volume_id=dict(type='str'),
),
mutually_exclusive=[['volume_copy_pair_id', 'destination_volume_id'],
['volume_copy_pair_id', 'source_volume_id'],
['volume_copy_pair_id', 'search_volume_id'],
['search_volume_id', 'destination_volume_id'],
['search_volume_id', 'source_volume_id'],
],
required_together=[['source_volume_id', 'destination_volume_id'],
],
required_if=[["create_copy_pair_if_does_not_exist", True, ['source_volume_id', 'destination_volume_id'], ],
["start_stop_copy", 'stop', ['volume_copy_pair_id'], ],
["start_stop_copy", 'start', ['volume_copy_pair_id'], ],
]
)
params = module.params
if not params['api_url'].endswith('/'):
params['api_url'] += '/'
# Check if we want to search
if params['search_volume_id'] is not None:
try:
potential_targets, potential_sources = find_valid_copy_pair_targets_and_sources(params)
except:
e = get_exception()
module.fail_json(msg="Failed to find valid copy pair candidates. Error [%s]" % str(e))
module.exit_json(changed=False,
msg=' Valid source devices found: %s Valid target devices found: %s' % (len(potential_sources), len(potential_targets)),
search_volume_id=params['search_volume_id'],
valid_targets=potential_targets,
valid_sources=potential_sources)
# Check if we want to start or stop a copy operation
if params['start_stop_copy'] == 'start' or params['start_stop_copy'] == 'stop':
# Get the current status info
currenty_running, status_info = check_copy_status(params)
# If we want to start
if params['start_stop_copy'] == 'start':
# If we have already started
if currenty_running is True:
module.exit_json(changed=False, msg='Volume Copy Pair copy has started.',
volume_copy_pair_id=params['volume_copy_pair_id'], percent_done=status_info)
# If we need to start
else:
start_status, info = start_stop_copy(params)
if start_status is True:
module.exit_json(changed=True, msg='Volume Copy Pair copy has started.',
volume_copy_pair_id=params['volume_copy_pair_id'], percent_done=info)
else:
module.fail_json(msg="Could not start volume copy pair Error: %s" % info)
# If we want to stop
else:
# If it has already stopped
if currenty_running is False:
module.exit_json(changed=False, msg='Volume Copy Pair copy is stopped.',
volume_copy_pair_id=params['volume_copy_pair_id'])
# If we need to stop it
else:
start_status, info = start_stop_copy(params)
if start_status is True:
module.exit_json(changed=True, msg='Volume Copy Pair copy has been stopped.',
volume_copy_pair_id=params['volume_copy_pair_id'])
else:
module.fail_json(msg="Could not stop volume copy pair Error: %s" % info)
# If we want the copy pair to exist we do this stuff
if params['status'] == 'present':
# We need to check if it exists first
if params['volume_copy_pair_id'] is None:
params['volume_copy_pair_id'] = find_volume_copy_pair_id_from_source_volume_id_and_destination_volume_id(
params)
# If no volume copy pair is found we need need to make it.
if params['volume_copy_pair_id'] is None:
# In order to create we can not do so with just a volume_copy_pair_id
copy_began_status, (rc, resp) = create_copy_pair(params)
if copy_began_status is True:
module.exit_json(changed=True, msg='Created Volume Copy Pair with ID: %s' % resp['id'])
else:
module.fail_json(msg="Could not create volume copy pair Code: %s Error: %s" % (rc, resp))
# If it does exist we do nothing
else:
# We verify that it exists
exist_status, (exist_status_code, exist_status_data) = find_volume_copy_pair_id_by_volume_copy_pair_id(
params)
if exist_status:
module.exit_json(changed=False,
msg=' Volume Copy Pair with ID: %s exists' % params['volume_copy_pair_id'])
else:
if exist_status_code == 404:
module.fail_json(
msg=' Volume Copy Pair with ID: %s does not exist. Can not create without source_volume_id and destination_volume_id' %
params['volume_copy_pair_id'])
else:
module.fail_json(msg="Could not find volume copy pair Code: %s Error: %s" % (
exist_status_code, exist_status_data))
module.fail_json(msg="Done")
# If we want it to not exist we do this
else:
if params['volume_copy_pair_id'] is None:
params['volume_copy_pair_id'] = find_volume_copy_pair_id_from_source_volume_id_and_destination_volume_id(
params)
# We delete it by the volume_copy_pair_id
delete_status, (delete_status_code, delete_status_data) = delete_copy_pair_by_copy_pair_id(params)
if delete_status is True:
module.exit_json(changed=True,
msg=' Volume Copy Pair with ID: %s was deleted' % params['volume_copy_pair_id'])
else:
if delete_status_code == 404:
module.exit_json(changed=False,
msg=' Volume Copy Pair with ID: %s does not exist' % params['volume_copy_pair_id'])
else:
module.fail_json(msg="Could not delete volume copy pair Code: %s Error: %s" % (
delete_status_code, delete_status_data))
if __name__ == '__main__':
main()
| 40.840909 | 145 | 0.617641 |
DOCUMENTATION = """
---
module: netapp_e_volume_copy
short_description: Create volume copy pairs
description:
- Create and delete snapshots images on volume groups for NetApp E-series storage arrays.
version_added: '2.2'
author: Kevin Hulquest (@hulquest)
options:
api_username:
required: true
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_password:
required: true
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_url:
required: true
description:
- The url to the SANtricity WebServices Proxy or embedded REST API.
example:
- https://prod-1.wahoo.acme.com/devmgr/v2
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
source_volume_id:
description:
- The the id of the volume copy source.
- If used, must be paired with destination_volume_id
- Mutually exclusive with volume_copy_pair_id, and search_volume_id
destination_volume_id:
description:
- The the id of the volume copy destination.
- If used, must be paired with source_volume_id
- Mutually exclusive with volume_copy_pair_id, and search_volume_id
volume_copy_pair_id:
description:
- The the id of a given volume copy pair
- Mutually exclusive with destination_volume_id, source_volume_id, and search_volume_id
- Can use to delete or check presence of volume pairs
- Must specify this or (destination_volume_id and source_volume_id)
state:
description:
- Whether the specified volume copy pair should exist or not.
required: True
choices: ['present', 'absent']
create_copy_pair_if_does_not_exist:
description:
- Defines if a copy pair will be created if it does not exist.
- If set to True destination_volume_id and source_volume_id are required.
choices: [True, False]
default: True
start_stop_copy:
description:
- starts a re-copy or stops a copy in progress
- "Note: If you stop the initial file copy before it it done the copy pair will be destroyed"
- Requires volume_copy_pair_id
search_volume_id:
description:
- Searches for all valid potential target and source volumes that could be used in a copy_pair
- Mutually exclusive with volume_copy_pair_id, destination_volume_id and source_volume_id
"""
RESULTS = """
"""
EXAMPLES = """
---
msg:
description: Success message
returned: success
type: string
sample: Json facts for the volume copy that was created.
"""
RETURN = """
msg:
description: Success message
returned: success
type: string
sample: Created Volume Copy Pair with ID
"""
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.urls import open_url
from ansible.module_utils.six.moves.urllib.error import HTTPError
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError:
err = get_exception()
r = err.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
def find_volume_copy_pair_id_from_source_volume_id_and_destination_volume_id(params):
get_status = 'storage-systems/%s/volume-copy-jobs' % params['ssid']
url = params['api_url'] + get_status
(rc, resp) = request(url, method='GET', url_username=params['api_username'],
url_password=params['api_password'], headers=HEADERS,
validate_certs=params['validate_certs'])
volume_copy_pair_id = None
for potential_copy_pair in resp:
if potential_copy_pair['sourceVolume'] == params['source_volume_id']:
if potential_copy_pair['sourceVolume'] == params['source_volume_id']:
volume_copy_pair_id = potential_copy_pair['id']
return volume_copy_pair_id
def create_copy_pair(params):
get_status = 'storage-systems/%s/volume-copy-jobs' % params['ssid']
url = params['api_url'] + get_status
rData = {
"sourceId": params['source_volume_id'],
"targetId": params['destination_volume_id']
}
(rc, resp) = request(url, data=json.dumps(rData), ignore_errors=True, method='POST',
url_username=params['api_username'], url_password=params['api_password'], headers=HEADERS,
validate_certs=params['validate_certs'])
if rc != 200:
return False, (rc, resp)
else:
return True, (rc, resp)
def delete_copy_pair_by_copy_pair_id(params):
get_status = 'storage-systems/%s/volume-copy-jobs/%s?retainRepositories=false' % (
params['ssid'], params['volume_copy_pair_id'])
url = params['api_url'] + get_status
(rc, resp) = request(url, ignore_errors=True, method='DELETE',
url_username=params['api_username'], url_password=params['api_password'], headers=HEADERS,
validate_certs=params['validate_certs'])
if rc != 204:
return False, (rc, resp)
else:
return True, (rc, resp)
def find_volume_copy_pair_id_by_volume_copy_pair_id(params):
get_status = 'storage-systems/%s/volume-copy-jobs/%s?retainRepositories=false' % (
params['ssid'], params['volume_copy_pair_id'])
url = params['api_url'] + get_status
(rc, resp) = request(url, ignore_errors=True, method='DELETE',
url_username=params['api_username'], url_password=params['api_password'], headers=HEADERS,
validate_certs=params['validate_certs'])
if rc != 200:
return False, (rc, resp)
else:
return True, (rc, resp)
def start_stop_copy(params):
get_status = 'storage-systems/%s/volume-copy-jobs-control/%s?control=%s' % (
params['ssid'], params['volume_copy_pair_id'], params['start_stop_copy'])
url = params['api_url'] + get_status
(response_code, response_data) = request(url, ignore_errors=True, method='POST',
url_username=params['api_username'], url_password=params['api_password'],
headers=HEADERS,
validate_certs=params['validate_certs'])
if response_code == 200:
return True, response_data[0]['percentComplete']
else:
return False, response_data
def check_copy_status(params):
get_status = 'storage-systems/%s/volume-copy-jobs-control/%s' % (
params['ssid'], params['volume_copy_pair_id'])
url = params['api_url'] + get_status
(response_code, response_data) = request(url, ignore_errors=True, method='GET',
url_username=params['api_username'], url_password=params['api_password'],
headers=HEADERS,
validate_certs=params['validate_certs'])
if response_code == 200:
if response_data['percentComplete'] != -1:
return True, response_data['percentComplete']
else:
return False, response_data['percentComplete']
else:
return False, response_data
def find_valid_copy_pair_targets_and_sources(params):
get_status = 'storage-systems/%s/volumes' % params['ssid']
url = params['api_url'] + get_status
(response_code, response_data) = request(url, ignore_errors=True, method='GET',
url_username=params['api_username'], url_password=params['api_password'],
headers=HEADERS,
validate_certs=params['validate_certs'])
if response_code == 200:
source_capacity = None
candidates = []
for volume in response_data:
if volume['id'] == params['search_volume_id']:
source_capacity = volume['capacity']
else:
candidates.append(volume)
potential_sources = []
potential_targets = []
for volume in candidates:
if volume['capacity'] > source_capacity:
if volume['volumeCopyTarget'] is False:
if volume['volumeCopySource'] is False:
potential_targets.append(volume['id'])
else:
if volume['volumeCopyTarget'] is False:
if volume['volumeCopySource'] is False:
potential_sources.append(volume['id'])
return potential_targets, potential_sources
else:
raise Exception("Response [%s]" % response_code)
def main():
module = AnsibleModule(argument_spec=dict(
source_volume_id=dict(type='str'),
destination_volume_id=dict(type='str'),
copy_priority=dict(required=False, default=0, type='int'),
ssid=dict(required=True, type='str'),
api_url=dict(required=True),
api_username=dict(required=False),
api_password=dict(required=False, no_log=True),
validate_certs=dict(required=False, default=True),
targetWriteProtected=dict(required=False, default=True, type='bool'),
onlineCopy=dict(required=False, default=False, type='bool'),
volume_copy_pair_id=dict(type='str'),
status=dict(required=True, choices=['present', 'absent'], type='str'),
create_copy_pair_if_does_not_exist=dict(required=False, default=True, type='bool'),
start_stop_copy=dict(required=False, choices=['start', 'stop'], type='str'),
search_volume_id=dict(type='str'),
),
mutually_exclusive=[['volume_copy_pair_id', 'destination_volume_id'],
['volume_copy_pair_id', 'source_volume_id'],
['volume_copy_pair_id', 'search_volume_id'],
['search_volume_id', 'destination_volume_id'],
['search_volume_id', 'source_volume_id'],
],
required_together=[['source_volume_id', 'destination_volume_id'],
],
required_if=[["create_copy_pair_if_does_not_exist", True, ['source_volume_id', 'destination_volume_id'], ],
["start_stop_copy", 'stop', ['volume_copy_pair_id'], ],
["start_stop_copy", 'start', ['volume_copy_pair_id'], ],
]
)
params = module.params
if not params['api_url'].endswith('/'):
params['api_url'] += '/'
if params['search_volume_id'] is not None:
try:
potential_targets, potential_sources = find_valid_copy_pair_targets_and_sources(params)
except:
e = get_exception()
module.fail_json(msg="Failed to find valid copy pair candidates. Error [%s]" % str(e))
module.exit_json(changed=False,
msg=' Valid source devices found: %s Valid target devices found: %s' % (len(potential_sources), len(potential_targets)),
search_volume_id=params['search_volume_id'],
valid_targets=potential_targets,
valid_sources=potential_sources)
if params['start_stop_copy'] == 'start' or params['start_stop_copy'] == 'stop':
currenty_running, status_info = check_copy_status(params)
if params['start_stop_copy'] == 'start':
if currenty_running is True:
module.exit_json(changed=False, msg='Volume Copy Pair copy has started.',
volume_copy_pair_id=params['volume_copy_pair_id'], percent_done=status_info)
else:
start_status, info = start_stop_copy(params)
if start_status is True:
module.exit_json(changed=True, msg='Volume Copy Pair copy has started.',
volume_copy_pair_id=params['volume_copy_pair_id'], percent_done=info)
else:
module.fail_json(msg="Could not start volume copy pair Error: %s" % info)
else:
if currenty_running is False:
module.exit_json(changed=False, msg='Volume Copy Pair copy is stopped.',
volume_copy_pair_id=params['volume_copy_pair_id'])
else:
start_status, info = start_stop_copy(params)
if start_status is True:
module.exit_json(changed=True, msg='Volume Copy Pair copy has been stopped.',
volume_copy_pair_id=params['volume_copy_pair_id'])
else:
module.fail_json(msg="Could not stop volume copy pair Error: %s" % info)
if params['status'] == 'present':
if params['volume_copy_pair_id'] is None:
params['volume_copy_pair_id'] = find_volume_copy_pair_id_from_source_volume_id_and_destination_volume_id(
params)
if params['volume_copy_pair_id'] is None:
copy_began_status, (rc, resp) = create_copy_pair(params)
if copy_began_status is True:
module.exit_json(changed=True, msg='Created Volume Copy Pair with ID: %s' % resp['id'])
else:
module.fail_json(msg="Could not create volume copy pair Code: %s Error: %s" % (rc, resp))
else:
exist_status, (exist_status_code, exist_status_data) = find_volume_copy_pair_id_by_volume_copy_pair_id(
params)
if exist_status:
module.exit_json(changed=False,
msg=' Volume Copy Pair with ID: %s exists' % params['volume_copy_pair_id'])
else:
if exist_status_code == 404:
module.fail_json(
msg=' Volume Copy Pair with ID: %s does not exist. Can not create without source_volume_id and destination_volume_id' %
params['volume_copy_pair_id'])
else:
module.fail_json(msg="Could not find volume copy pair Code: %s Error: %s" % (
exist_status_code, exist_status_data))
module.fail_json(msg="Done")
else:
if params['volume_copy_pair_id'] is None:
params['volume_copy_pair_id'] = find_volume_copy_pair_id_from_source_volume_id_and_destination_volume_id(
params)
delete_status, (delete_status_code, delete_status_data) = delete_copy_pair_by_copy_pair_id(params)
if delete_status is True:
module.exit_json(changed=True,
msg=' Volume Copy Pair with ID: %s was deleted' % params['volume_copy_pair_id'])
else:
if delete_status_code == 404:
module.exit_json(changed=False,
msg=' Volume Copy Pair with ID: %s does not exist' % params['volume_copy_pair_id'])
else:
module.fail_json(msg="Could not delete volume copy pair Code: %s Error: %s" % (
delete_status_code, delete_status_data))
if __name__ == '__main__':
main()
| true | true |
f715ca8ab55b1a3d741b6e97e2473b7911154537 | 708 | py | Python | scripts/basic_support/robot_patrol_test.py | liminglong/micros_mars_task_alloc | 9b216e5494dbff6abd7b4c74eb72fc35eb392ca3 | [
"BSD-3-Clause"
] | 4 | 2016-06-15T02:44:43.000Z | 2021-12-20T15:43:32.000Z | scripts/basic_support/robot_patrol_test.py | liminglong/micros_mars_task_alloc | 9b216e5494dbff6abd7b4c74eb72fc35eb392ca3 | [
"BSD-3-Clause"
] | null | null | null | scripts/basic_support/robot_patrol_test.py | liminglong/micros_mars_task_alloc | 9b216e5494dbff6abd7b4c74eb72fc35eb392ca3 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
__author__ = 'Minglong Li'
#import sys
#sys.path.append("~/catkin_ws/src/multi_robot_patrol/scripts/basic_support")
from robot_patrol_area_0 import RobotPatrolArea0
from robot_patrol_area_1 import RobotPatrolArea1
from robot_patrol_area_2 import RobotPatrolArea2
from motivational_behavior import MotivationalBehavior
from switch import Switch
from std_msgs.msg import Bool
#ob1 = RobotPatrolArea0()
ob2 = RobotPatrolArea1()
ob3 = RobotPatrolArea2()
#ob1.start()
ob2.start()
ob3.start()
#ob4 = MotivationalBehavior('mb0',0,0,'switch0/activate')#nodename,robotid,behaviorid
#ob4.start()
#ob5 = Switch('switch0','topic01',Bool,'topic01s')#nodename,subtopic,type,pubtopic
#ob5.start()
| 28.32 | 85 | 0.80226 |
__author__ = 'Minglong Li'
from robot_patrol_area_0 import RobotPatrolArea0
from robot_patrol_area_1 import RobotPatrolArea1
from robot_patrol_area_2 import RobotPatrolArea2
from motivational_behavior import MotivationalBehavior
from switch import Switch
from std_msgs.msg import Bool
ob2 = RobotPatrolArea1()
ob3 = RobotPatrolArea2()
ob2.start()
ob3.start()
| true | true |
f715cb6225840f9ec494e8f8b22c82e88df7a2f3 | 45,794 | py | Python | flair.py | MustafaElshani/flair | ea058f3cc056e92b6f8a9ec7f7790dd6bed5766c | [
"BSD-3-Clause"
] | null | null | null | flair.py | MustafaElshani/flair | ea058f3cc056e92b6f8a9ec7f7790dd6bed5766c | [
"BSD-3-Clause"
] | null | null | null | flair.py | MustafaElshani/flair | ea058f3cc056e92b6f8a9ec7f7790dd6bed5766c | [
"BSD-3-Clause"
] | null | null | null | """ ADT, CMS """
import sys, argparse, subprocess, os, tempfile, glob
def align():
parser = argparse.ArgumentParser(description='flair-align parse options', \
usage='python flair.py align -g genome.fa -r <reads.fq>|<reads.fa> [options]')
parser.add_argument('align')
required = parser.add_argument_group('required named arguments')
required.add_argument('-r', '--reads', action='store', dest='r', \
nargs='+', type=str, required=True, help='FastA/FastQ files of raw reads')
required.add_argument('-g', '--genome', action='store', dest='g', \
type=str, required=True, help='FastA of reference genome, can be minimap2 indexed')
parser.add_argument('-m', '--minimap2', type=str, default='minimap2', \
action='store', dest='m', help='path to minimap2 if not in $PATH')
parser.add_argument('-o', '--output', \
action='store', dest='o', default='flair.aligned', help='output file name base (default: flair.aligned)')
parser.add_argument('-t', '--threads', type=str, \
action='store', dest='t', default='4', help='minimap2 number of threads (4)')
parser.add_argument('-sam', '--samtools', action='store', dest='sam', default='samtools', \
help='samtools executable path if not in $PATH')
parser.add_argument('-c', '--chromsizes', type=str, action='store', dest='c', default='', \
help='''chromosome sizes tab-separated file, used for converting sam to genome-browser
compatible psl file''')
parser.add_argument('--nvrna', action='store_true', dest='n', default=False, \
help='specify this flag to use native-RNA specific alignment parameters for minimap2')
parser.add_argument('--psl', action='store_true', dest='p', \
help='also output sam-converted psl')
parser.add_argument('-v1.3', '--version1.3', action='store_true', dest='v', \
help='specify if samtools version 1.3+')
parser.add_argument('--quality', type=int, action='store', dest='quality', default=1, \
help='minimum MAPQ of read alignment to the genome (1)')
parser.add_argument('--quiet', default=False, action='store_true', dest='quiet', \
help='''Suppress progress statements from being printed''')
args, unknown = parser.parse_known_args()
if unknown and not args.quiet:
sys.stderr.write('Align unrecognized arguments: {}\n'.format(' '.join(unknown)))
if args.m[-8:] != 'minimap2':
if args.m[-1] == '/':
args.m += 'minimap2'
else:
args.m += '/minimap2'
try:
mm2_command = [args.m, '-ax', 'splice', '-t', args.t, '--secondary=no', args.g]+args.r
if args.n:
mm2_command[5:5] = ['-uf', '-k14']
if args.quiet:
if subprocess.call(mm2_command, stdout=open(args.o+'.sam', 'w'), \
stderr=open(args.o+'.mm2_stderr', 'w')):
return 1
elif subprocess.call(mm2_command, stdout=open(args.o+'.sam', 'w')):
return 1
except:
sys.stderr.write('Possible minimap2 error, specify executable path with -m\n')
return 1
if args.quality != 0:
if subprocess.call([args.sam, 'view', '-q', str(args.quality), '-h', '-S', args.o+'.sam'], \
stdout=open(args.o+'.q.sam', 'w'), stderr=open(args.o+'.samtools_stderr', 'w')):
sys.stderr.write('Possible issue with samtools, see {}\n'.format(args.o+'.samtools_stderr'))
return 1
subprocess.call(['mv', args.o+'.q.sam', args.o+'.sam'])
if args.p and subprocess.call([sys.executable, path+'bin/sam_to_psl.py', args.o+'.sam', \
args.o+'.psl', args.c]):
return 1
if subprocess.call([args.sam, 'view', '-h', '-Sb', '-@', args.t, args.o+'.sam'], \
stdout=open(args.o+'.unsorted.bam', 'w')): # calls samtools view, exit if an error code that != 0 results
sys.stderr.write('Possible issue with samtools executable\n')
return 1
if not args.v: # samtools version is < 1.3 or unspecified --> detect version
ver = subprocess.Popen([args.sam], stderr=subprocess.PIPE, universal_newlines=True)
for line in ver.stderr:
if 'Version:' in line:
v = line.rstrip()[line.find('Version:')+9:line.find('Version:')+12]
try:
if float(v) >= 1.3:
if not args.quiet: sys.stderr.write('Samtools version >= 1.3 detected\n')
args.v = True
break
except:
if not args.quiet: sys.stderr.write('Could not detect samtools version, assuming < 1.3\n')
if args.v: # samtools verison 1.3+
subprocess.call([args.sam, 'sort', '-@', args.t, args.o+'.unsorted.bam', '-o', args.o+'.bam'], \
stderr=open(args.o+'.unsorted.bam.stderr', 'w'))
elif subprocess.call([args.sam, 'sort', '-@', args.t, args.o+'.unsorted.bam', args.o], \
stderr=open(args.o+'.unsorted.bam.stderr', 'w')):
sys.stderr.write('If using samtools v1.3+, please specify -v1.3 argument\n')
return 1
subprocess.call([args.sam, 'index', args.o+'.bam'])
subprocess.call([sys.executable, path+'bin/bam2Bed12.py', '-i', args.o+'.bam'], stdout=open(args.o+'.bed', 'w'))
subprocess.call(['rm', args.o+'.unsorted.bam', args.o+'.unsorted.bam.stderr', args.o+'.samtools_stderr'])
return args.o+'.bed'
def correct(aligned_reads=''):
parser = argparse.ArgumentParser(description='flair-correct parse options', \
usage='python flair.py correct -q query.bed12 [-f annotation.gtf]v[-j introns.tab] -g genome.fa [options]')
parser.add_argument('correct')
required = parser.add_argument_group('required named arguments')
atleastone = parser.add_argument_group('at least one of the following arguments is required')
if not aligned_reads:
required.add_argument('-q', '--query', type=str, default='', required=True, \
action='store', dest='q', help='uncorrected bed12 file')
required.add_argument('-g', '--genome', action='store', dest='g', \
type=str, required=True, help='FastA of reference genome')
atleastone.add_argument('-j', '--shortread', action='store', dest='j', type=str, default='', \
help='bed format splice junctions from short-read sequencing')
atleastone.add_argument('-f', '--gtf', default='', \
action='store', dest='f', help='GTF annotation file')
parser.add_argument('-c', '--chromsizes', type=str, \
action='store', dest='c', default='', help='chromosome sizes tab-separated file')
parser.add_argument('--nvrna', action='store_true', dest='n', default=False, help='specify this flag to keep \
the strand of a read consistent after correction')
parser.add_argument('-t', '--threads', type=str, action='store', dest='t', default='4', \
help='splice site correction script number of threads (4)')
parser.add_argument('-w', '--ss_window', action='store', dest='w', default='10', \
help='window size for correcting splice sites (W=10)')
parser.add_argument('-o', '--output', \
action='store', dest='o', default='flair', help='output name base (default: flair)')
parser.add_argument('--print_check', \
action='store_true', dest='p', default=False, help='Print err.txt with step checking.')
args, unknown = parser.parse_known_args()
if unknown:
sys.stderr.write('Correct unrecognized arguments: {}\n'.format(' '.join(unknown)))
if aligned_reads:
args.q = aligned_reads
if not args.j and not args.f:
sys.stderr.write('Please specify at least one of the -f or -j arguments for correction\n')
return 1
correction_cmd = [sys.executable, path+'bin/ssCorrect.py', '-i', args.q, \
'-w', args.w, '-p', args.t, '-o', args.o, '--progress', '-f', args.g]
if not args.n:
correction_cmd += ['--correctStrand']
if args.j:
correction_cmd += ['-j', args.j]
if args.f:
correction_cmd += ['-g', args.f]
if args.p:
correction_cmd += ['--print_check']
if subprocess.call(correction_cmd):
sys.stderr.write('Correction command did not exit with success status\n')
if args.c and subprocess.call([sys.executable, path+'bin/bed_to_psl.py', args.c, args.o+'_all_corrected.bed', \
args.o+'_all_corrected.psl']):
return 1
return args.o+'_all_corrected.bed'
def collapse_range(corrected_reads='', aligned_reads=''):
parser = argparse.ArgumentParser(description='flair-collapse parse options', \
usage='python flair.py collapse-range -g genome.fa -r reads.bam -q <query.psl>|<query.bed> [options]')
parser.add_argument('collapse')
required = parser.add_argument_group('required named arguments')
required.add_argument('-r', '--reads', action='store', dest='r', nargs='+', \
type=str, required=True, help='bam file(s) of the aligned reads')
if not corrected_reads:
required.add_argument('-q', '--query', type=str, default='', required=True, \
action='store', dest='q', help='bed or psl file of aligned/corrected reads')
required.add_argument('-g', '--genome', action='store', dest='g', \
type=str, required=True, help='FastA of reference genome')
parser.add_argument('-f', '--gtf', default='', action='store', dest='f', \
help='GTF annotation file, used for renaming FLAIR isoforms to annotated isoforms and adjusting TSS/TESs')
parser.add_argument('-m', '--minimap2', type=str, default='minimap2', \
action='store', dest='m', help='path to minimap2 if not in $PATH')
parser.add_argument('-t', '--threads', type=int, \
action='store', dest='t', default=4, help='minimap2 number of threads (4)')
parser.add_argument('-p', '--promoters', action='store', dest='p', default='', \
help='promoter regions bed file to identify full-length reads')
parser.add_argument('-b', '--bedtools', action='store', dest='b', default='bedtools', \
help='bedtools executable path, provide if promoter regions specified and bedtools is not in $PATH')
parser.add_argument('-sam', '--samtools', action='store', dest='sam', default='samtools', \
help='samtools executable path if not in $PATH')
parser.add_argument('-w', '--end_window', default='100', action='store', dest='w', \
help='window size for comparing TSS/TES (100)')
parser.add_argument('-s', '--support', default='3', action='store', dest='s', \
help='minimum number of supporting reads for an isoform (3)')
parser.add_argument('--stringent', default=False, action='store_true', dest='stringent', \
help='''specify if all supporting reads need to be full-length \
(80%% coverage and spanning 25 bp of the first and last exons)''')
parser.add_argument('-n', '--no_redundant', default='none', action='store', dest='n', \
help='''For each unique splice junction chain, report options include:
none--best TSSs/TESs chosen for each unique set of splice junctions;
longest--single TSS/TES chosen to maximize length;
best_only--single most supported TSS/TES used in conjunction chosen (none)''')
parser.add_argument('-i', '--isoformtss', default=False, action='store_true', dest='i', \
help='when specified, TSS/TES for each isoform will be determined from supporting reads \
for individual isoforms (default: not specified, determined at the gene level)')
parser.add_argument('--max_ends', default=2, action='store', dest='max_ends', \
help='maximum number of TSS/TES picked per isoform (2)')
parser.add_argument('--trust_ends', default=False, action='store_true', dest='trust_ends', \
help='specify if reads are generated from a long read method with minimal fragmentation')
parser.add_argument('--filter', default='default', action='store', dest='filter', \
help='''Report options include:
nosubset--any isoforms that are a proper set of another isoform are removed;
default--subset isoforms are removed based on support;
comprehensive--default set + all subset isoforms;
ginormous--comprehensive set + single exon subset isoforms''')
parser.add_argument('--quality', type=int, action='store', dest='quality', default=1, \
help='minimum MAPQ of read assignment to an isoform (1)')
parser.add_argument('--keep_intermediate', default=False, action='store_true', dest='keep_intermediate', \
help='''specify if intermediate and temporary files are to be kept for debugging.
Intermediate files include: promoter-supported reads file,
read assignments to firstpass isoforms''')
parser.add_argument('--generate_map', default=False, action='store_true', dest='generate_map', \
help='''specify this argument to generate a txt file of which reads are assigned to each isoform.
note: only works if the quantification method is not using salmon (default: not specified)''')
parser.add_argument('--quiet', default=False, action='store_true', dest='quiet', \
help='''Suppress progress statements from being printed''')
parser.add_argument('--salmon', type=str, action='store', dest='salmon', \
default='', help='Path to salmon executable, specify if salmon quantification is desired')
parser.add_argument('--temp_dir', default='', action='store', dest='temp_dir', \
help='directory to put temporary files. use "./" to indicate current directory (default: python tempfile directory)')
parser.add_argument('-o', '--output', default='flair.collapse', \
action='store', dest='o', help='output file name base for FLAIR isoforms (default: flair.collapse)')
args, unknown = parser.parse_known_args()
if unknown and not args.quiet:
sys.stderr.write('Collapse-range unrecognized arguments: {}\n'.format(' '.join(unknown)))
if corrected_reads:
args.q = corrected_reads
args.r = [aligned_reads[:-3]+'bam']
if args.r[0][-3:] != 'bam':
sys.stderr.write('Must provide genome alignment BAM with -r if range is specified\n')
return 1
if args.temp_dir == '':
args.temp_dir = tempfile.NamedTemporaryFile().name+'/'
if not os.path.isdir(args.temp_dir): # make temporary directory
if subprocess.call(['mkdir', args.temp_dir]):
sys.stderr.write('Could not make temporary directory {}\n'.format(args.temp_dir))
return 1
if args.temp_dir[-1] != '/':
args.temp_dir += '/'
# convert query to bed
if args.q[-3:].lower() == 'psl':
subprocess.call([sys.executable, path+'bin/psl_to_bed.py', args.q, args.q+'.bed'])
args.q = args.q+'.bed'
# partition the bed file into independent regions
subprocess.call(['sort','-k1,1', '-k2,2n', '--parallel='+str(args.t), args.q], \
stdout=open(args.temp_dir+run_id+'.sorted.bed', 'w'))
if subprocess.call(['bedPartition', '-parallel='+str(args.t), args.temp_dir+run_id+'.sorted.bed', args.o+'.ranges.bed']):
sys.stderr.write('''Make sure bedPartition (http://hgdownload.cse.ucsc.edu/admin/exe/linux.x86_64/)
is an executable in your $PATH\n''')
return 1
ranges = []
for line in open(args.o+'.ranges.bed'):
line = line.rstrip().split('\t')
ranges += [line[0]+':'+line[1]+'-'+line[2]]
# index the bed file
subprocess.call(['bgzip', args.temp_dir+run_id+'.sorted.bed'])
if subprocess.call(['tabix', '-f', '--preset', 'bed', '--zero-based', args.temp_dir+run_id+'.sorted.bed.gz']):
return 1
# call collapse on all the ranges
p = Pool(args.t)
if 1 in p.map(collapse, ranges): # if a process failed
return 1
p.terminate()
# consolidate all the isoforms from all the ranges
subprocess.call(['cat']+glob.glob(args.temp_dir+run_id+'*isoforms.bed'), stdout=open(args.o+'.isoforms.bed', 'w'))
subprocess.call(['cat']+glob.glob(args.temp_dir+run_id+'*isoforms.fa'), stdout=open(args.o+'.isoforms.fa', 'w'))
if args.f:
subprocess.call(['cat']+glob.glob(args.temp_dir+run_id+'*isoforms.gtf'), stdout=open(args.o+'.isoforms.gtf', 'w'))
subprocess.call(['rm']+glob.glob(args.temp_dir+run_id+'*'))
return args.o+'.isoforms.bed', args.o+'.isoforms.fa'
def collapse(genomic_range='', corrected_reads=''):
parser = argparse.ArgumentParser(description='flair-collapse parse options', \
usage='python flair.py collapse -g genome.fa -q <query.psl>|<query.bed> \
-r <reads.fq>/<reads.fa> [options]')
parser.add_argument('collapse')
required = parser.add_argument_group('required named arguments')
if not corrected_reads:
required.add_argument('-q', '--query', type=str, default='', required=True, \
action='store', dest='q', help='bed or psl file of aligned/corrected reads')
required.add_argument('-g', '--genome', action='store', dest='g', \
type=str, required=True, help='FastA of reference genome')
required.add_argument('-r', '--reads', action='store', dest='r', nargs='+', \
type=str, required=True, help='FastA/FastQ files of raw reads')
parser.add_argument('-f', '--gtf', default='', action='store', dest='f', \
help='GTF annotation file, used for renaming FLAIR isoforms to annotated isoforms and adjusting TSS/TESs')
parser.add_argument('-m', '--minimap2', type=str, default='minimap2', \
action='store', dest='m', help='path to minimap2 if not in $PATH')
parser.add_argument('-t', '--threads', type=int, \
action='store', dest='t', default=4, help='minimap2 number of threads (4)')
parser.add_argument('-p', '--promoters', action='store', dest='p', default='', \
help='promoter regions bed file to identify full-length reads')
parser.add_argument('--3prime_regions', action='store', dest='threeprime', default='', \
help='TES regions bed file to identify full-length reads')
parser.add_argument('-b', '--bedtools', action='store', dest='b', default='bedtools', \
help='bedtools executable path, provide if TSS/TES regions specified and bedtools is not in $PATH')
parser.add_argument('-sam', '--samtools', action='store', dest='sam', default='samtools', \
help='samtools executable path if not in $PATH')
parser.add_argument('-w', '--end_window', default='100', action='store', dest='w', \
help='window size for comparing TSS/TES (100)')
parser.add_argument('-s', '--support', default='3', action='store', dest='s', \
help='minimum number of supporting reads for an isoform (3)')
parser.add_argument('--stringent', default=False, action='store_true', dest='stringent', \
help='''specify if all supporting reads need to be full-length \
(80%% coverage and spanning 25 bp of the first and last exons)''')
parser.add_argument('-n', '--no_redundant', default='none', action='store', dest='n', \
help='''For each unique splice junction chain, report options include:
none--best TSSs/TESs chosen for each unique set of splice junctions;
longest--single TSS/TES chosen to maximize length;
best_only--single most supported TSS/TES used in conjunction chosen (none)''')
parser.add_argument('-i', '--isoformtss', default=False, action='store_true', dest='i', \
help='when specified, TSS/TES for each isoform will be determined from supporting reads \
for individual isoforms (default: not specified, determined at the gene level)')
parser.add_argument('--no_end_adjustment', default=False, action='store_true', dest='no_end_adjustment', \
help='''when specified, TSS/TES from the gtf provided with -f will not be used to adjust isoform
TSSs/TESs each isoform will be determined from supporting reads''')
parser.add_argument('--max_ends', default=2, action='store', dest='max_ends', \
help='maximum number of TSS/TES picked per isoform (2)')
parser.add_argument('--trust_ends', default=False, action='store_true', dest='trust_ends', \
help='specify if reads are generated from a long read method with minimal fragmentation')
parser.add_argument('--filter', default='default', action='store', dest='filter', \
help='''Report options include:
nosubset--any isoforms that are a proper set of another isoform are removed;
default--subset isoforms are removed based on support;
comprehensive--default set + all subset isoforms;
ginormous--comprehensive set + single exon subset isoforms''')
parser.add_argument('--quality', type=int, action='store', dest='quality', default=1, \
help='minimum MAPQ of read assignment to an isoform (1)')
parser.add_argument('--keep_intermediate', default=False, action='store_true', dest='keep_intermediate', \
help='''specify if intermediate and temporary files are to be kept for debugging.
Intermediate files include: promoter-supported reads file,
read assignments to firstpass isoforms''')
parser.add_argument('--generate_map', default=False, action='store_true', dest='generate_map', \
help='''specify this argument to generate a txt file of which reads are assigned to each isoform.
note: only works if the quantification method is not using salmon (default: not specified)''')
parser.add_argument('--quiet', default=False, action='store_true', dest='quiet', \
help='''Suppress progress statements from being printed''')
parser.add_argument('--range', default='', action='store', dest='range', \
help='''interval for which to collapse isoforms for, formatted chromosome:coord1-coord2 or tab-delimited;
if a range is specified, then the aligned reads bam must be specified with -r
and the query must be a sorted, bgzip-ed bed file''')
parser.add_argument('--salmon', type=str, action='store', dest='salmon', \
default='', help='Path to salmon executable, specify if salmon quantification is desired')
parser.add_argument('--temp_dir', default='', action='store', dest='temp_dir', \
help='directory to put temporary files. use "./" to indicate current directory (default: python tempfile directory)')
parser.add_argument('-o', '--output', default='flair.collapse', \
action='store', dest='o', help='output file name base for FLAIR isoforms (default: flair.collapse)')
args, unknown = parser.parse_known_args()
if unknown and not args.quiet:
sys.stderr.write('Collapse unrecognized arguments: {}\n'.format(' '.join(unknown)))
if corrected_reads:
args.q = corrected_reads
# housekeeping stuff
tempfile_dir = tempfile.NamedTemporaryFile().name
tempfile_name = tempfile_dir[tempfile_dir.rfind('/')+1:]+'.'
if args.temp_dir == '':
args.temp_dir = tempfile_dir+'/'
if not os.path.isdir(args.temp_dir): # make temporary directory
if subprocess.call(['mkdir', args.temp_dir]):
sys.stderr.write('Could not make temporary directory {}\n'.format(args.temp_dir))
return 1
if args.temp_dir[-1] != '/':
args.temp_dir += '/'
if genomic_range: # this module was called internally from collapse_range
args.range = genomic_range
args.o = args.temp_dir+run_id
args.q = args.temp_dir+run_id+'.sorted.bed.gz'
args.quiet = True
if args.m[-8:] != 'minimap2':
if args.m[-1] == '/':
args.m += 'minimap2'
else:
args.m += '/minimap2'
args.t, args.quality = str(args.t), str(args.quality) # convert from int to str
args.o += '.'
if not os.path.exists(args.q):
sys.stderr.write('Query file path does not exist\n')
return 1
if os.stat(args.q).st_size == 0:
sys.stderr.write('Query file is empty\n')
return 1
# separate out the read sequences and corrected reads corresponding to the specified range
if args.range:
if '\t' in args.range:
args.range = args.range.split('\t')
args.range = args.range[0]+':'+args.range[1]+'-'+args.range[2]
ext = '.bed' # query file extension will be 'bed'
args.o += args.range+'.'
if args.r[0][-3:] != 'bam':
sys.stderr.write('Must provide genome alignment BAM with -r if range is specified\n')
return 1
bams = []
for i in range(len(args.r)): # subset bam file for alignments within range
bams += [args.temp_dir+tempfile_name+args.range+str(i)+'.bam']
if subprocess.call([args.sam, 'view', '-h', args.r[i], args.range], \
stdout=open(bams[-1], 'w')):
return 1
args.r = []
for i in range(len(bams)): # read sequences of the alignments within range
args.r += [bams[i][:-3]+'fasta']
subprocess.call([args.sam, 'fasta', bams[i]], \
stdout=open(args.r[-1], 'w'), \
stderr=open(args.temp_dir+tempfile_name+'bam2fq_stderr', 'w'))
subprocess.call(['rm'] + bams)
chrom = args.range[:args.range.find(':')]
coord1 = args.range[args.range.find(':')+1:args.range.find('-')]
coord2 = args.range[args.range.find('-')+1:]
precollapse = args.temp_dir+tempfile_name+args.range+'.bed' # name of subsetted query file
coordfile = open(args.temp_dir+tempfile_name+args.range+'.range.bed', 'wt') # write range to a bed file
coordfile.write('\t'.join([chrom, coord1, coord2]))
coordfile.close()
if subprocess.call(['tabix', '-R', args.temp_dir+tempfile_name+args.range+'.range.bed', args.q], \
stdout=open(precollapse, 'w')):
sys.stderr.write('Query file needs to be a sorted, bgzip-ed, tabix-indexed bed file if range is specified\n')
return 1
else:
ext = '.'+args.q[-3:] # query file extension (bed or psl)
precollapse = args.q # query file unchanged
args.r = args.r[0].split(',') if ',' in args.r[0] else args.r # read sequences
# filter out the reads with TSSs without promoter support
intermediate = []
if args.p:
if not args.quiet: sys.stderr.write('Filtering out reads without promoter-supported TSS\n')
if subprocess.call([sys.executable, path+'bin/pull_starts.py', args.q, args.temp_dir+tempfile_name+'tss.bed']):
return 1
if subprocess.call([args.b, 'intersect', '-a', args.temp_dir+tempfile_name+'tss.bed', '-b', args.p], \
stdout=open(args.temp_dir+tempfile_name+'promoter_intersect.bed', 'w')):
return 1
precollapse = args.o+'promoter_supported'+ext # filename of promoter-supported, corrected reads
subprocess.call([sys.executable, path+'bin/psl_reads_from_bed.py', args.temp_dir+tempfile_name+'promoter_intersect.bed', \
args.q, precollapse])
intermediate += [args.temp_dir+tempfile_name+'tss.bed', precollapse]
if args.threeprime:
if not args.quiet: sys.stderr.write('Filtering out reads without TES support\n')
if subprocess.call([sys.executable, path+'bin/pull_starts.py', precollapse, args.temp_dir+tempfile_name+'tes.bed', 'reverse']):
return 1
if subprocess.call([args.b, 'intersect', '-a', args.temp_dir+tempfile_name+'tes.bed', '-b', args.threeprime], \
stdout=open(args.temp_dir+tempfile_name+'tes_intersect.bed', 'w')):
return 1
precollapse = args.o+'tes_supported'+ext # filename of 3' end-supported, corrected reads
subprocess.call([sys.executable, path+'bin/psl_reads_from_bed.py', args.temp_dir+tempfile_name+'tes_intersect.bed', \
args.q, precollapse])
intermediate += [args.temp_dir+tempfile_name+'tes.bed', precollapse]
collapse_cmd = [sys.executable, path+'bin/collapse_isoforms_precise.py', '-q', precollapse, \
'-m', str(args.max_ends), '-w', args.w, '-n', args.n, '-o', args.o+'firstpass.unfiltered'+ext]
if args.f and not args.no_end_adjustment:
collapse_cmd += ['-f', args.f]
if args.i:
collapse_cmd += ['-i']
if args.quiet:
collapse_cmd += ['--quiet']
if subprocess.call(collapse_cmd):
return 1
# filtering out subset isoforms with insuficient support
if subprocess.call([sys.executable, path+'bin/filter_collapsed_isoforms.py', \
args.o+'firstpass.unfiltered'+ext, args.filter, args.o+'firstpass'+ext, args.w]):
return 1
intermediate += [args.o+'firstpass.unfiltered'+ext]
# rename first-pass isoforms to annotated transcript IDs if they match
if args.f:
if not args.quiet: sys.stderr.write('Renaming isoforms\n')
if subprocess.call([sys.executable, path+'bin/identify_gene_isoform.py', \
args.o+'firstpass'+ext, args.f, args.o+'firstpass.named'+ext]):
sys.exit(1)
subprocess.call(['mv', args.o+'firstpass.named'+ext, args.o+'firstpass'+ext])
if subprocess.call([sys.executable, path+'bin/psl_to_sequence.py', args.o+'firstpass'+ext, \
args.g, args.o+'firstpass.fa']):
return 1
# reassign reads to first-pass isoforms
if not args.quiet: sys.stderr.write('Aligning reads to first-pass isoform reference\n')
count_files, align_files = [], []
alignout = args.temp_dir + tempfile_name +'firstpass.'
try:
if subprocess.call([args.m, '-a', '-t', args.t, '-N', '4', args.o+'firstpass.fa'] + args.r, \
stdout=open(alignout+'sam', 'w'), stderr=open(alignout+'mm2_stderr', 'w')):
return 1
except Exception as e:
sys.stderr.write(str(e)+'\n\n\nMinimap2 error, please check that all file, directory, and executable paths exist\n')
return 1
# count the number of supporting reads for each first-pass isoform
if args.salmon: # use salmon to count
if subprocess.call([args.sam, 'view', '-F', '4', '-h', '-S', alignout+'sam'], \
stdout=open(alignout+'mapped.sam', 'w')):
return 1
subprocess.call(['mv', alignout+'mapped.sam', alignout+'sam'])
subprocess.call([args.salmon, 'quant', '-t', args.o+'firstpass.fa', '-o', alignout+'salmon', \
'-p', args.t, '-l', 'U', '-a', alignout+'sam'], stderr=open(alignout+'salmon_stderr.txt', 'w'))
count_file = alignout+'salmon/quant.sf'
align_files += [alignout+'sam', alignout+'salmon/quant.sf']
else:
args.quality = '0' if args.trust_ends else args.quality
if args.quality != '0':
subprocess.call([args.sam, 'view', '-q', args.quality, '-h', '-S', alignout+'sam'], \
stdout=open(alignout+'q.sam', 'w'), stderr=open(alignout+'q.samtools_stderr', 'w'))
align_files += [alignout+'sam']
else:
subprocess.call(['mv', alignout+'sam', alignout+'q.sam'])
count_cmd = [sys.executable, path+'bin/count_sam_transcripts.py', '-s', alignout+'q.sam', \
'-o', alignout+'q.counts', '-t', args.t, '--quality', args.quality]
if args.stringent:
count_cmd += ['--stringent', '-i', args.o+'firstpass'+ext]
if args.trust_ends:
count_cmd += ['--trust_ends']
if args.generate_map:
count_cmd += ['--generate_map', args.o+'isoform.read.map.txt']
if subprocess.call(count_cmd):
sys.stderr.write('Failed at counting step for isoform read support\n')
return 1
count_file = alignout+'q.counts'
align_files += [alignout+'q.sam']
subprocess.call([sys.executable, path+'bin/combine_counts.py', count_file, args.o+'firstpass.q.counts'])
if not args.quiet: sys.stderr.write('Filtering isoforms by read coverage\n')
subprocess.call([sys.executable, path+'bin/match_counts.py', args.o+'firstpass.q.counts', \
args.o+'firstpass'+ext, args.s, args.o+'isoforms'+ext])
subprocess.call([sys.executable, path+'bin/psl_to_sequence.py', args.o+'isoforms'+ext, \
args.g, args.o+'isoforms.fa'])
if args.f:
subprocess.call([sys.executable, path+'bin/psl_to_gtf.py', args.o+'isoforms'+ext], \
stdout=open(args.o+'isoforms.gtf', 'w'))
subprocess.call(['rm', '-rf', args.o+'firstpass.fa', alignout+'q.counts'])
if not args.keep_intermediate:
subprocess.call(['rm', args.o+'firstpass.q.counts', args.o+'firstpass'+ext])
subprocess.call(['rm', '-rf'] + glob.glob(args.temp_dir+'*'+tempfile_name+'*') + align_files + intermediate)
return args.o+'isoforms.bed', args.o+'isoforms.fa'
def quantify(isoform_sequences=''):
parser = argparse.ArgumentParser(description='flair-quantify parse options', \
usage='python flair.py quantify -r reads_manifest.tsv -i isoforms.fa [options]')
parser.add_argument('quantify')
required = parser.add_argument_group('required named arguments')
if not isoform_sequences:
required.add_argument('-r', '--reads_manifest', action='store', dest='r', type=str, \
required=True, help='Tab delimited file containing sample id, condition, batch, reads.fq')
required.add_argument('-i', '--isoforms', action='store', dest='i', \
type=str, required=True, help='FastA of FLAIR collapsed isoforms')
else:
required.add_argument('--reads_manifest', action='store', dest='r', type=str, \
required=True, help='Tab delimited file containing sample id, condition, batch, reads.fq')
parser.add_argument('-m', '--minimap2', type=str, default='minimap2', \
action='store', dest='m', help='path to minimap2 if not in $PATH')
parser.add_argument('-t', '--threads', type=int, \
action='store', dest='t', default=4, help='minimap2 number of threads (4)')
parser.add_argument('-sam', '--samtools', action='store', dest='sam', default='samtools', \
help='specify a samtools executable path if not in $PATH if --quality is also used')
parser.add_argument('--quality', type=int, action='store', dest='quality', default=1, \
help='''minimum MAPQ of read assignment to an isoform. If using salmon, all alignments are
used (1)''')
parser.add_argument('-o', '--output', type=str, action='store', dest='o', \
default='counts_matrix.tsv', help='Counts matrix output file name prefix (counts_matrix.tsv)')
parser.add_argument('--salmon', type=str, action='store', dest='salmon', \
default='', help='Path to salmon executable, specify if salmon quantification is desired')
parser.add_argument('--tpm', action='store_true', dest='tpm', default=False, \
help='specify this flag to output additional file with expression in TPM')
parser.add_argument('--trust_ends', default=False, action='store_true', dest='trust_ends', \
help='specify if reads are generated from a long read method with minimal fragmentation')
parser.add_argument('--temp_dir', default='', action='store', dest='temp_dir', \
help='''directory to put temporary files. use "./" to indicate current directory
(default: python tempfile directory)''')
args, unknown = parser.parse_known_args()
if unknown:
sys.stderr.write('Quantify unrecognized arguments: {}\n'.format(' '.join(unknown)))
if isoform_sequences:
args.i = isoform_sequences
args.o += '.counts_matrix.tsv'
try:
import numpy as np
import codecs
except:
sys.stderr.write('Numpy import error. Please pip install numpy. Exiting.\n')
sys.exit(1)
if args.m[-8:] != 'minimap2':
if args.m[-1] == '/':
args.m += 'minimap2'
else:
args.m += '/minimap2'
args.t, args.quality = str(args.t), str(args.quality)
samData = list()
with codecs.open(args.r, "r", encoding='utf-8', errors='ignore') as lines:
for line in lines:
cols = line.rstrip().split('\t')
if len(cols)<4:
sys.stderr.write('Expected 4 columns in manifest.tsv, got %s. Exiting.\n' % len(cols))
return 1
sample, group, batch, readFile = cols
readFileRoot = tempfile.NamedTemporaryFile().name
if args.temp_dir != '':
if not os.path.isdir(args.temp_dir):
subprocess.call(['mkdir', args.temp_dir])
readFileRoot = args.temp_dir + '/' + readFileRoot[readFileRoot.rfind('/')+1:]
samData.append(cols + [readFileRoot + '.sam'])
for num,sample in enumerate(samData,0):
sys.stderr.write("Step 1/3. Aligning sample %s_%s: %s/%s \r" % (sample[0],sample[2],num+1,len(samData)))
mm2_command = [args.m, '-a', '-N', '4', '-t', args.t, args.i, sample[-2]]
try:
if subprocess.call(mm2_command, stdout=open(sample[-1], 'w'), \
stderr=open(sample[-1]+'.mm2_stderr.txt', 'w')):
sys.stderr.write('Check {} file\n'.format(sample[-1]+'.mm2_stderr.txt'))
return 1
except:
sys.stderr.write('''Possible minimap2 error, please check that all file, directory,
and executable paths exist\n''')
return 1
subprocess.call(['rm', sample[-1]+'.mm2_stderr.txt'])
sys.stderr.flush()
if args.quality != '0' and not args.trust_ends and not args.salmon:
if subprocess.call([args.sam, 'view', '-q', args.quality, '-h', '-S', sample[-1]], \
stdout=open(sample[-1]+'.qual.sam', 'w')):
return 1
subprocess.call(['mv', sample[-1]+'.qual.sam', sample[-1]])
countData = dict()
for num,data in enumerate(samData):
sample, group, batch, readFile, samOut = data
sys.stderr.write("Step 2/3. Quantifying isoforms for sample %s_%s: %s/%s \r" % (sample,batch,num+1,len(samData)))
if not args.salmon:
count_cmd = [sys.executable, path+'bin/count_sam_transcripts.py', '-s', samOut, \
'-o', samOut+'.counts.txt', '-t', args.t, '--quality', args.quality]
if args.trust_ends:
count_cmd += ['--trust_ends']
subprocess.call(count_cmd)
for line in open(samOut+'.counts.txt'):
line = line.rstrip().split('\t')
iso, numreads = line[0], line[1]
if iso not in countData: countData[iso] = np.zeros(len(samData))
countData[iso][num] = numreads
else:
subprocess.call([args.salmon, 'quant', '-t', args.i, '-o', samOut[:-4]+'.salmon', \
'-p', args.t, '-l', 'U', '-a', samOut], stderr=open('salmon_stderr.txt', 'w'))
salmonOut = open(samOut[:-4]+'.salmon/quant.sf')
salmonOut.readline() # header
for line in salmonOut:
line = line.rstrip().split('\t')
iso, tpm, numreads = line[0], line[3], line[4]
if iso not in countData: countData[iso] = np.zeros(len(samData))
if args.tpm:
countData[iso][num] = tpm
else:
countData[iso][num] = numreads
subprocess.call(['rm', '-r', samOut[:-4]+'.salmon/', 'salmon_stderr.txt'])
sys.stderr.flush()
subprocess.call(['rm', samOut])
sys.stderr.write("Step 3/3. Writing counts to {} \r".format(args.o))
countMatrix = open(args.o,'w')
countMatrix.write("ids\t%s\n" % "\t".join(["_".join(x[:3]) for x in samData]))
features = sorted(list(countData.keys()))
for f in features:
countMatrix.write("%s\t%s\n" % (f,"\t".join(str(x) for x in countData[f])))
countMatrix.close()
sys.stderr.flush()
sys.stderr.write("\n")
if args.tpm and not args.salmon:
subprocess.call([sys.executable, path+'bin/counts_to_tpm.py', args.o, args.o+'.tpm.tsv'])
return args.o
def diffExp(counts_matrix=''):
parser = argparse.ArgumentParser(description='flair-diffExp parse options', \
usage='python flair.py diffExp -q counts_matrix.tsv --out_dir out_dir [options]')
parser.add_argument('diffExp')
required = parser.add_argument_group('required named arguments')
if not counts_matrix:
required.add_argument('-q', '--counts_matrix', action='store', dest='q', \
type=str, required=True, help='Tab-delimited isoform count matrix from flair quantify module.')
required.add_argument('-o', '--out_dir', action='store', dest='o', \
type=str, required=True, help='Output directory for tables and plots.')
parser.add_argument('-t', '--threads', action='store', dest='t', \
type=int, required=False, default=4, help='Number of threads for parallel DRIMSeq.')
parser.add_argument('-e', '--exp_thresh', action='store', dest='e', type=int, required=False, \
default=10, help='Read count expression threshold. Isoforms in which \
both conditions contain fewer than E reads are filtered out (Default E=10)')
parser.add_argument('-of', '--out_dir_force', action='store_true', dest='of', \
required=False, help='''Specify this argument to force overwriting of files in
an existing output directory''')
args, unknown = parser.parse_known_args()
if unknown:
sys.stderr.write('DiffExp unrecognized arguments: {}\n'.format(' '.join(unknown)))
if counts_matrix:
args.q = counts_matrix
args.o+'.diffExp'
scriptsBin = path + "bin/"
runDE = scriptsBin + "deFLAIR.py"
DEcommand = [sys.executable, '-W ignore', runDE, '--filter', str(args.e), '--threads', \
str(args.t), '--outDir', args.o, '--matrix', args.q]
if args.of:
DEcommand += ['-of']
subprocess.call(DEcommand)
return
def diffSplice(isoforms='', counts_matrix=''):
parser = argparse.ArgumentParser(description='flair-diffSplice parse options', \
usage='python flair.py diffSplice -i isoforms.bed|isoforms.psl -q counts_matrix.tsv [options]')
parser.add_argument('diffExp')
required = parser.add_argument_group('required named arguments')
if not isoforms:
required.add_argument('-i', '--isoforms', action='store', dest='i', required=True, \
type=str, help='isoforms in bed or psl format')
required.add_argument('-q', '--counts_matrix', action='store', dest='q', \
type=str, required=True, help='tab-delimited isoform count matrix from flair quantify module')
parser.add_argument('-o', '--output', action='store', dest='o', default='flair.diffsplice', type=str, \
required=False, help='output file name base for FLAIR isoforms (default: flair.diffsplice)')
parser.add_argument('--test', action='store_true', dest='test', \
required=False, default=False, help='Run DRIMSeq statistical testing')
parser.add_argument('-t', '--threads', action='store', dest='t', \
type=int, required=False, default=1, help='Number of threads DRIMSeq (1)')
parser.add_argument('--drim1', action='store', dest='drim1', type=int, required=False, default=6, \
help='''The minimum number of samples that have coverage over an AS event inclusion/exclusion
for DRIMSeq testing; events with too few samples are filtered out and not tested (6)''')
parser.add_argument('--drim2', action='store', dest='drim2', type=int, required=False, default=3, \
help='''The minimum number of samples expressing the inclusion of an AS event;
events with too few samples are filtered out and not tested (3)''')
parser.add_argument('--drim3', action='store', dest='drim3', type=int, required=False, default=15, \
help='''The minimum number of reads covering an AS event inclusion/exclusion for DRIMSeq testing,
events with too few samples are filtered out and not tested (15)''')
parser.add_argument('--drim4', action='store', dest='drim4', type=int, required=False, default=5, \
help='''The minimum number of reads covering an AS event inclusion for DRIMSeq testing,
events with too few samples are filtered out and not tested (5)''')
parser.add_argument('--batch', action='store_true', dest='batch', required=False, default=False, \
help='''If specified with --test, DRIMSeq will perform batch correction''')
parser.add_argument('--conditionA', action='store', dest='conditionA', required=False, default='', \
help='''Specify one condition corresponding to samples in the counts_matrix to be compared against
condition2; by default, the first two unique conditions are used''')
parser.add_argument('--conditionB', action='store', dest='conditionB', required=False, default='', \
help='''Specify another condition corresponding to samples in the counts_matrix to be compared against
conditionA''')
args, unknown = parser.parse_known_args()
if unknown:
sys.stderr.write('DiffSplice unrecognized arguments: {}\n'.format(' '.join(unknown)))
if isoforms:
args.i = isoforms
args.q = counts_matrix
if args.i[-3:].lower() == 'psl':
subprocess.call([sys.executable, path+'bin/psl_to_bed.py', args.i, args.i+'.bed'])
args.i = args.i+'.bed'
subprocess.call([sys.executable, path+'bin/call_diffsplice_events.py', args.i, args.o, args.q])
subprocess.call([sys.executable, path+'bin/es_as.py', args.i], stdout=open(args.o+'.es.events.tsv','w'))
subprocess.call([sys.executable, path+'bin/es_as_inc_excl_to_counts.py', args.q, args.o+'.es.events.tsv'], \
stdout=open(args.o+'.es.events.quant.tsv','w'))
subprocess.call(['rm', args.o+'.es.events.tsv'])
if args.test or args.drim1 or args.drim2 or args.drim4 or args.drim4:
sys.stderr.write('DRIMSeq testing for each AS event type\n')
drim1, drim2, drim3, drim4 = [str(x) for x in [args.drim1, args.drim2, args.drim3, args.drim4]]
ds_command = [sys.executable, path+'bin/runDS.py', '--threads', str(args.t), \
'--drim1', drim1, '--drim2', drim2, '--drim3', drim3, '--drim4', drim4]
if args.batch:
ds_command += ['--batch']
if args.conditionA:
if not args.conditionB:
sys.stderr.write('Both conditionA and conditionB must be specified, or both left unspecified\n')
return 1
ds_command += ['--conditionA', args.conditionA, '--conditionB', args.conditionB]
with open(args.o+'.stderr.txt', 'w') as ds_stderr:
subprocess.call(ds_command + ['--matrix', args.o+'.es.events.quant.tsv', '--prefix', args.o+'.es'], stderr=ds_stderr)
subprocess.call(ds_command + ['--matrix', args.o+'.alt5.events.quant.tsv', '--prefix', args.o+'.alt5'], stderr=ds_stderr)
subprocess.call(ds_command + ['--matrix', args.o+'.alt3.events.quant.tsv', '--prefix', args.o+'.alt3'], stderr=ds_stderr)
subprocess.call(ds_command + ['--matrix', args.o+'.ir.events.quant.tsv', '--prefix', args.o+'.ir'], stderr=ds_stderr)
return
path = '/'.join(os.path.realpath(__file__).split("/")[:-1])+'/'
if len(sys.argv) < 2:
sys.stderr.write('usage: python flair.py <mode> --help \n')
sys.stderr.write('modes: align, correct, collapse, quantify, diffExp, diffSplice\n')
sys.stderr.write('Multiple modules can be run when specified using numbers, e.g.:\n')
sys.stderr.write('python flair.py 1234 ...')
sys.exit(1)
else:
mode = sys.argv[1].lower()
aligned_reads, corrected_reads, isoforms, isoform_sequences, counts_matrix = [0]*5
if mode == 'align' or '1' in mode:
status = align()
if status == 1:
sys.exit(1)
else:
aligned_reads = status
if mode == 'correct' or '2' in mode:
if aligned_reads:
status = correct(aligned_reads=aligned_reads)
else:
status = correct()
if status == 1:
sys.exit(1)
else:
corrected_reads = status
if mode == 'collapse' or ('3' in mode and '3.5' not in mode):
if corrected_reads:
status = collapse(corrected_reads=corrected_reads)
else:
status = collapse()
if status == 1:
sys.exit(1)
else:
isoforms, isoform_sequences = status
if mode == 'collapse-range' or '3.5' in mode:
from multiprocessing import Pool
tempfile_name = tempfile.NamedTemporaryFile().name
run_id = tempfile_name[tempfile_name.rfind('/')+1:]
if corrected_reads and not aligned_reads:
sys.stderr.write('''Collapse 3.5 run consecutively without align module; will assume {}
to be the name of the aligned reads bam file\n'''.format(corrected_reads[:-18]+'.bam'))
status = collapse_range(corrected_reads=corrected_reads, \
aligned_reads=corrected_reads[:-18]+'.bam')
elif corrected_reads and aligned_reads:
status = collapse_range(corrected_reads=corrected_reads, aligned_reads=aligned_reads)
elif not corrected_reads and aligned_reads:
sys.stderr.write('Correct module not run...\n')
status = collapse_range(corrected_reads=aligned_reads, aligned_reads=aligned_reads)
else:
status = collapse_range()
if status == 1:
sys.exit(1)
else:
isoforms, isoform_sequences = status
mode = mode.replace('3.5', 'x')
if mode == 'quantify' or '4' in mode:
if isoform_sequences:
status = quantify(isoform_sequences=isoform_sequences)
else:
status = quantify()
if status == 1:
sys.exit(1)
else:
counts_matrix = status
if mode == 'diffexp' or '5' in mode:
if counts_matrix:
status = diffExp(counts_matrix=counts_matrix)
else:
status = diffExp()
if status == 1:
sys.exit(1)
if mode == 'diffsplice' or '6' in mode:
if counts_matrix and isoforms:
status = diffSplice(isoforms=isoforms, counts_matrix=counts_matrix)
elif not isoforms and counts_matrix:
sys.stderr.write('DiffSplice run consecutively without collapse module, exiting\n')
sys.exit(1)
else:
status = diffSplice()
if status == 1:
sys.exit(1)
if mode == '--version':
sys.stderr.write('FLAIR v1.5.1\n')
| 50.769401 | 129 | 0.694414 |
import sys, argparse, subprocess, os, tempfile, glob
def align():
parser = argparse.ArgumentParser(description='flair-align parse options', \
usage='python flair.py align -g genome.fa -r <reads.fq>|<reads.fa> [options]')
parser.add_argument('align')
required = parser.add_argument_group('required named arguments')
required.add_argument('-r', '--reads', action='store', dest='r', \
nargs='+', type=str, required=True, help='FastA/FastQ files of raw reads')
required.add_argument('-g', '--genome', action='store', dest='g', \
type=str, required=True, help='FastA of reference genome, can be minimap2 indexed')
parser.add_argument('-m', '--minimap2', type=str, default='minimap2', \
action='store', dest='m', help='path to minimap2 if not in $PATH')
parser.add_argument('-o', '--output', \
action='store', dest='o', default='flair.aligned', help='output file name base (default: flair.aligned)')
parser.add_argument('-t', '--threads', type=str, \
action='store', dest='t', default='4', help='minimap2 number of threads (4)')
parser.add_argument('-sam', '--samtools', action='store', dest='sam', default='samtools', \
help='samtools executable path if not in $PATH')
parser.add_argument('-c', '--chromsizes', type=str, action='store', dest='c', default='', \
help='''chromosome sizes tab-separated file, used for converting sam to genome-browser
compatible psl file''')
parser.add_argument('--nvrna', action='store_true', dest='n', default=False, \
help='specify this flag to use native-RNA specific alignment parameters for minimap2')
parser.add_argument('--psl', action='store_true', dest='p', \
help='also output sam-converted psl')
parser.add_argument('-v1.3', '--version1.3', action='store_true', dest='v', \
help='specify if samtools version 1.3+')
parser.add_argument('--quality', type=int, action='store', dest='quality', default=1, \
help='minimum MAPQ of read alignment to the genome (1)')
parser.add_argument('--quiet', default=False, action='store_true', dest='quiet', \
help='''Suppress progress statements from being printed''')
args, unknown = parser.parse_known_args()
if unknown and not args.quiet:
sys.stderr.write('Align unrecognized arguments: {}\n'.format(' '.join(unknown)))
if args.m[-8:] != 'minimap2':
if args.m[-1] == '/':
args.m += 'minimap2'
else:
args.m += '/minimap2'
try:
mm2_command = [args.m, '-ax', 'splice', '-t', args.t, '--secondary=no', args.g]+args.r
if args.n:
mm2_command[5:5] = ['-uf', '-k14']
if args.quiet:
if subprocess.call(mm2_command, stdout=open(args.o+'.sam', 'w'), \
stderr=open(args.o+'.mm2_stderr', 'w')):
return 1
elif subprocess.call(mm2_command, stdout=open(args.o+'.sam', 'w')):
return 1
except:
sys.stderr.write('Possible minimap2 error, specify executable path with -m\n')
return 1
if args.quality != 0:
if subprocess.call([args.sam, 'view', '-q', str(args.quality), '-h', '-S', args.o+'.sam'], \
stdout=open(args.o+'.q.sam', 'w'), stderr=open(args.o+'.samtools_stderr', 'w')):
sys.stderr.write('Possible issue with samtools, see {}\n'.format(args.o+'.samtools_stderr'))
return 1
subprocess.call(['mv', args.o+'.q.sam', args.o+'.sam'])
if args.p and subprocess.call([sys.executable, path+'bin/sam_to_psl.py', args.o+'.sam', \
args.o+'.psl', args.c]):
return 1
if subprocess.call([args.sam, 'view', '-h', '-Sb', '-@', args.t, args.o+'.sam'], \
stdout=open(args.o+'.unsorted.bam', 'w')):
sys.stderr.write('Possible issue with samtools executable\n')
return 1
if not args.v:
ver = subprocess.Popen([args.sam], stderr=subprocess.PIPE, universal_newlines=True)
for line in ver.stderr:
if 'Version:' in line:
v = line.rstrip()[line.find('Version:')+9:line.find('Version:')+12]
try:
if float(v) >= 1.3:
if not args.quiet: sys.stderr.write('Samtools version >= 1.3 detected\n')
args.v = True
break
except:
if not args.quiet: sys.stderr.write('Could not detect samtools version, assuming < 1.3\n')
if args.v:
subprocess.call([args.sam, 'sort', '-@', args.t, args.o+'.unsorted.bam', '-o', args.o+'.bam'], \
stderr=open(args.o+'.unsorted.bam.stderr', 'w'))
elif subprocess.call([args.sam, 'sort', '-@', args.t, args.o+'.unsorted.bam', args.o], \
stderr=open(args.o+'.unsorted.bam.stderr', 'w')):
sys.stderr.write('If using samtools v1.3+, please specify -v1.3 argument\n')
return 1
subprocess.call([args.sam, 'index', args.o+'.bam'])
subprocess.call([sys.executable, path+'bin/bam2Bed12.py', '-i', args.o+'.bam'], stdout=open(args.o+'.bed', 'w'))
subprocess.call(['rm', args.o+'.unsorted.bam', args.o+'.unsorted.bam.stderr', args.o+'.samtools_stderr'])
return args.o+'.bed'
def correct(aligned_reads=''):
parser = argparse.ArgumentParser(description='flair-correct parse options', \
usage='python flair.py correct -q query.bed12 [-f annotation.gtf]v[-j introns.tab] -g genome.fa [options]')
parser.add_argument('correct')
required = parser.add_argument_group('required named arguments')
atleastone = parser.add_argument_group('at least one of the following arguments is required')
if not aligned_reads:
required.add_argument('-q', '--query', type=str, default='', required=True, \
action='store', dest='q', help='uncorrected bed12 file')
required.add_argument('-g', '--genome', action='store', dest='g', \
type=str, required=True, help='FastA of reference genome')
atleastone.add_argument('-j', '--shortread', action='store', dest='j', type=str, default='', \
help='bed format splice junctions from short-read sequencing')
atleastone.add_argument('-f', '--gtf', default='', \
action='store', dest='f', help='GTF annotation file')
parser.add_argument('-c', '--chromsizes', type=str, \
action='store', dest='c', default='', help='chromosome sizes tab-separated file')
parser.add_argument('--nvrna', action='store_true', dest='n', default=False, help='specify this flag to keep \
the strand of a read consistent after correction')
parser.add_argument('-t', '--threads', type=str, action='store', dest='t', default='4', \
help='splice site correction script number of threads (4)')
parser.add_argument('-w', '--ss_window', action='store', dest='w', default='10', \
help='window size for correcting splice sites (W=10)')
parser.add_argument('-o', '--output', \
action='store', dest='o', default='flair', help='output name base (default: flair)')
parser.add_argument('--print_check', \
action='store_true', dest='p', default=False, help='Print err.txt with step checking.')
args, unknown = parser.parse_known_args()
if unknown:
sys.stderr.write('Correct unrecognized arguments: {}\n'.format(' '.join(unknown)))
if aligned_reads:
args.q = aligned_reads
if not args.j and not args.f:
sys.stderr.write('Please specify at least one of the -f or -j arguments for correction\n')
return 1
correction_cmd = [sys.executable, path+'bin/ssCorrect.py', '-i', args.q, \
'-w', args.w, '-p', args.t, '-o', args.o, '--progress', '-f', args.g]
if not args.n:
correction_cmd += ['--correctStrand']
if args.j:
correction_cmd += ['-j', args.j]
if args.f:
correction_cmd += ['-g', args.f]
if args.p:
correction_cmd += ['--print_check']
if subprocess.call(correction_cmd):
sys.stderr.write('Correction command did not exit with success status\n')
if args.c and subprocess.call([sys.executable, path+'bin/bed_to_psl.py', args.c, args.o+'_all_corrected.bed', \
args.o+'_all_corrected.psl']):
return 1
return args.o+'_all_corrected.bed'
def collapse_range(corrected_reads='', aligned_reads=''):
parser = argparse.ArgumentParser(description='flair-collapse parse options', \
usage='python flair.py collapse-range -g genome.fa -r reads.bam -q <query.psl>|<query.bed> [options]')
parser.add_argument('collapse')
required = parser.add_argument_group('required named arguments')
required.add_argument('-r', '--reads', action='store', dest='r', nargs='+', \
type=str, required=True, help='bam file(s) of the aligned reads')
if not corrected_reads:
required.add_argument('-q', '--query', type=str, default='', required=True, \
action='store', dest='q', help='bed or psl file of aligned/corrected reads')
required.add_argument('-g', '--genome', action='store', dest='g', \
type=str, required=True, help='FastA of reference genome')
parser.add_argument('-f', '--gtf', default='', action='store', dest='f', \
help='GTF annotation file, used for renaming FLAIR isoforms to annotated isoforms and adjusting TSS/TESs')
parser.add_argument('-m', '--minimap2', type=str, default='minimap2', \
action='store', dest='m', help='path to minimap2 if not in $PATH')
parser.add_argument('-t', '--threads', type=int, \
action='store', dest='t', default=4, help='minimap2 number of threads (4)')
parser.add_argument('-p', '--promoters', action='store', dest='p', default='', \
help='promoter regions bed file to identify full-length reads')
parser.add_argument('-b', '--bedtools', action='store', dest='b', default='bedtools', \
help='bedtools executable path, provide if promoter regions specified and bedtools is not in $PATH')
parser.add_argument('-sam', '--samtools', action='store', dest='sam', default='samtools', \
help='samtools executable path if not in $PATH')
parser.add_argument('-w', '--end_window', default='100', action='store', dest='w', \
help='window size for comparing TSS/TES (100)')
parser.add_argument('-s', '--support', default='3', action='store', dest='s', \
help='minimum number of supporting reads for an isoform (3)')
parser.add_argument('--stringent', default=False, action='store_true', dest='stringent', \
help='''specify if all supporting reads need to be full-length \
(80%% coverage and spanning 25 bp of the first and last exons)''')
parser.add_argument('-n', '--no_redundant', default='none', action='store', dest='n', \
help='''For each unique splice junction chain, report options include:
none--best TSSs/TESs chosen for each unique set of splice junctions;
longest--single TSS/TES chosen to maximize length;
best_only--single most supported TSS/TES used in conjunction chosen (none)''')
parser.add_argument('-i', '--isoformtss', default=False, action='store_true', dest='i', \
help='when specified, TSS/TES for each isoform will be determined from supporting reads \
for individual isoforms (default: not specified, determined at the gene level)')
parser.add_argument('--max_ends', default=2, action='store', dest='max_ends', \
help='maximum number of TSS/TES picked per isoform (2)')
parser.add_argument('--trust_ends', default=False, action='store_true', dest='trust_ends', \
help='specify if reads are generated from a long read method with minimal fragmentation')
parser.add_argument('--filter', default='default', action='store', dest='filter', \
help='''Report options include:
nosubset--any isoforms that are a proper set of another isoform are removed;
default--subset isoforms are removed based on support;
comprehensive--default set + all subset isoforms;
ginormous--comprehensive set + single exon subset isoforms''')
parser.add_argument('--quality', type=int, action='store', dest='quality', default=1, \
help='minimum MAPQ of read assignment to an isoform (1)')
parser.add_argument('--keep_intermediate', default=False, action='store_true', dest='keep_intermediate', \
help='''specify if intermediate and temporary files are to be kept for debugging.
Intermediate files include: promoter-supported reads file,
read assignments to firstpass isoforms''')
parser.add_argument('--generate_map', default=False, action='store_true', dest='generate_map', \
help='''specify this argument to generate a txt file of which reads are assigned to each isoform.
note: only works if the quantification method is not using salmon (default: not specified)''')
parser.add_argument('--quiet', default=False, action='store_true', dest='quiet', \
help='''Suppress progress statements from being printed''')
parser.add_argument('--salmon', type=str, action='store', dest='salmon', \
default='', help='Path to salmon executable, specify if salmon quantification is desired')
parser.add_argument('--temp_dir', default='', action='store', dest='temp_dir', \
help='directory to put temporary files. use "./" to indicate current directory (default: python tempfile directory)')
parser.add_argument('-o', '--output', default='flair.collapse', \
action='store', dest='o', help='output file name base for FLAIR isoforms (default: flair.collapse)')
args, unknown = parser.parse_known_args()
if unknown and not args.quiet:
sys.stderr.write('Collapse-range unrecognized arguments: {}\n'.format(' '.join(unknown)))
if corrected_reads:
args.q = corrected_reads
args.r = [aligned_reads[:-3]+'bam']
if args.r[0][-3:] != 'bam':
sys.stderr.write('Must provide genome alignment BAM with -r if range is specified\n')
return 1
if args.temp_dir == '':
args.temp_dir = tempfile.NamedTemporaryFile().name+'/'
if not os.path.isdir(args.temp_dir):
if subprocess.call(['mkdir', args.temp_dir]):
sys.stderr.write('Could not make temporary directory {}\n'.format(args.temp_dir))
return 1
if args.temp_dir[-1] != '/':
args.temp_dir += '/'
if args.q[-3:].lower() == 'psl':
subprocess.call([sys.executable, path+'bin/psl_to_bed.py', args.q, args.q+'.bed'])
args.q = args.q+'.bed'
subprocess.call(['sort','-k1,1', '-k2,2n', '--parallel='+str(args.t), args.q], \
stdout=open(args.temp_dir+run_id+'.sorted.bed', 'w'))
if subprocess.call(['bedPartition', '-parallel='+str(args.t), args.temp_dir+run_id+'.sorted.bed', args.o+'.ranges.bed']):
sys.stderr.write('''Make sure bedPartition (http://hgdownload.cse.ucsc.edu/admin/exe/linux.x86_64/)
is an executable in your $PATH\n''')
return 1
ranges = []
for line in open(args.o+'.ranges.bed'):
line = line.rstrip().split('\t')
ranges += [line[0]+':'+line[1]+'-'+line[2]]
subprocess.call(['bgzip', args.temp_dir+run_id+'.sorted.bed'])
if subprocess.call(['tabix', '-f', '--preset', 'bed', '--zero-based', args.temp_dir+run_id+'.sorted.bed.gz']):
return 1
p = Pool(args.t)
if 1 in p.map(collapse, ranges):
return 1
p.terminate()
subprocess.call(['cat']+glob.glob(args.temp_dir+run_id+'*isoforms.bed'), stdout=open(args.o+'.isoforms.bed', 'w'))
subprocess.call(['cat']+glob.glob(args.temp_dir+run_id+'*isoforms.fa'), stdout=open(args.o+'.isoforms.fa', 'w'))
if args.f:
subprocess.call(['cat']+glob.glob(args.temp_dir+run_id+'*isoforms.gtf'), stdout=open(args.o+'.isoforms.gtf', 'w'))
subprocess.call(['rm']+glob.glob(args.temp_dir+run_id+'*'))
return args.o+'.isoforms.bed', args.o+'.isoforms.fa'
def collapse(genomic_range='', corrected_reads=''):
parser = argparse.ArgumentParser(description='flair-collapse parse options', \
usage='python flair.py collapse -g genome.fa -q <query.psl>|<query.bed> \
-r <reads.fq>/<reads.fa> [options]')
parser.add_argument('collapse')
required = parser.add_argument_group('required named arguments')
if not corrected_reads:
required.add_argument('-q', '--query', type=str, default='', required=True, \
action='store', dest='q', help='bed or psl file of aligned/corrected reads')
required.add_argument('-g', '--genome', action='store', dest='g', \
type=str, required=True, help='FastA of reference genome')
required.add_argument('-r', '--reads', action='store', dest='r', nargs='+', \
type=str, required=True, help='FastA/FastQ files of raw reads')
parser.add_argument('-f', '--gtf', default='', action='store', dest='f', \
help='GTF annotation file, used for renaming FLAIR isoforms to annotated isoforms and adjusting TSS/TESs')
parser.add_argument('-m', '--minimap2', type=str, default='minimap2', \
action='store', dest='m', help='path to minimap2 if not in $PATH')
parser.add_argument('-t', '--threads', type=int, \
action='store', dest='t', default=4, help='minimap2 number of threads (4)')
parser.add_argument('-p', '--promoters', action='store', dest='p', default='', \
help='promoter regions bed file to identify full-length reads')
parser.add_argument('--3prime_regions', action='store', dest='threeprime', default='', \
help='TES regions bed file to identify full-length reads')
parser.add_argument('-b', '--bedtools', action='store', dest='b', default='bedtools', \
help='bedtools executable path, provide if TSS/TES regions specified and bedtools is not in $PATH')
parser.add_argument('-sam', '--samtools', action='store', dest='sam', default='samtools', \
help='samtools executable path if not in $PATH')
parser.add_argument('-w', '--end_window', default='100', action='store', dest='w', \
help='window size for comparing TSS/TES (100)')
parser.add_argument('-s', '--support', default='3', action='store', dest='s', \
help='minimum number of supporting reads for an isoform (3)')
parser.add_argument('--stringent', default=False, action='store_true', dest='stringent', \
help='''specify if all supporting reads need to be full-length \
(80%% coverage and spanning 25 bp of the first and last exons)''')
parser.add_argument('-n', '--no_redundant', default='none', action='store', dest='n', \
help='''For each unique splice junction chain, report options include:
none--best TSSs/TESs chosen for each unique set of splice junctions;
longest--single TSS/TES chosen to maximize length;
best_only--single most supported TSS/TES used in conjunction chosen (none)''')
parser.add_argument('-i', '--isoformtss', default=False, action='store_true', dest='i', \
help='when specified, TSS/TES for each isoform will be determined from supporting reads \
for individual isoforms (default: not specified, determined at the gene level)')
parser.add_argument('--no_end_adjustment', default=False, action='store_true', dest='no_end_adjustment', \
help='''when specified, TSS/TES from the gtf provided with -f will not be used to adjust isoform
TSSs/TESs each isoform will be determined from supporting reads''')
parser.add_argument('--max_ends', default=2, action='store', dest='max_ends', \
help='maximum number of TSS/TES picked per isoform (2)')
parser.add_argument('--trust_ends', default=False, action='store_true', dest='trust_ends', \
help='specify if reads are generated from a long read method with minimal fragmentation')
parser.add_argument('--filter', default='default', action='store', dest='filter', \
help='''Report options include:
nosubset--any isoforms that are a proper set of another isoform are removed;
default--subset isoforms are removed based on support;
comprehensive--default set + all subset isoforms;
ginormous--comprehensive set + single exon subset isoforms''')
parser.add_argument('--quality', type=int, action='store', dest='quality', default=1, \
help='minimum MAPQ of read assignment to an isoform (1)')
parser.add_argument('--keep_intermediate', default=False, action='store_true', dest='keep_intermediate', \
help='''specify if intermediate and temporary files are to be kept for debugging.
Intermediate files include: promoter-supported reads file,
read assignments to firstpass isoforms''')
parser.add_argument('--generate_map', default=False, action='store_true', dest='generate_map', \
help='''specify this argument to generate a txt file of which reads are assigned to each isoform.
note: only works if the quantification method is not using salmon (default: not specified)''')
parser.add_argument('--quiet', default=False, action='store_true', dest='quiet', \
help='''Suppress progress statements from being printed''')
parser.add_argument('--range', default='', action='store', dest='range', \
help='''interval for which to collapse isoforms for, formatted chromosome:coord1-coord2 or tab-delimited;
if a range is specified, then the aligned reads bam must be specified with -r
and the query must be a sorted, bgzip-ed bed file''')
parser.add_argument('--salmon', type=str, action='store', dest='salmon', \
default='', help='Path to salmon executable, specify if salmon quantification is desired')
parser.add_argument('--temp_dir', default='', action='store', dest='temp_dir', \
help='directory to put temporary files. use "./" to indicate current directory (default: python tempfile directory)')
parser.add_argument('-o', '--output', default='flair.collapse', \
action='store', dest='o', help='output file name base for FLAIR isoforms (default: flair.collapse)')
args, unknown = parser.parse_known_args()
if unknown and not args.quiet:
sys.stderr.write('Collapse unrecognized arguments: {}\n'.format(' '.join(unknown)))
if corrected_reads:
args.q = corrected_reads
tempfile_dir = tempfile.NamedTemporaryFile().name
tempfile_name = tempfile_dir[tempfile_dir.rfind('/')+1:]+'.'
if args.temp_dir == '':
args.temp_dir = tempfile_dir+'/'
if not os.path.isdir(args.temp_dir):
if subprocess.call(['mkdir', args.temp_dir]):
sys.stderr.write('Could not make temporary directory {}\n'.format(args.temp_dir))
return 1
if args.temp_dir[-1] != '/':
args.temp_dir += '/'
if genomic_range:
args.range = genomic_range
args.o = args.temp_dir+run_id
args.q = args.temp_dir+run_id+'.sorted.bed.gz'
args.quiet = True
if args.m[-8:] != 'minimap2':
if args.m[-1] == '/':
args.m += 'minimap2'
else:
args.m += '/minimap2'
args.t, args.quality = str(args.t), str(args.quality)
args.o += '.'
if not os.path.exists(args.q):
sys.stderr.write('Query file path does not exist\n')
return 1
if os.stat(args.q).st_size == 0:
sys.stderr.write('Query file is empty\n')
return 1
if args.range:
if '\t' in args.range:
args.range = args.range.split('\t')
args.range = args.range[0]+':'+args.range[1]+'-'+args.range[2]
ext = '.bed'
args.o += args.range+'.'
if args.r[0][-3:] != 'bam':
sys.stderr.write('Must provide genome alignment BAM with -r if range is specified\n')
return 1
bams = []
for i in range(len(args.r)):
bams += [args.temp_dir+tempfile_name+args.range+str(i)+'.bam']
if subprocess.call([args.sam, 'view', '-h', args.r[i], args.range], \
stdout=open(bams[-1], 'w')):
return 1
args.r = []
for i in range(len(bams)):
args.r += [bams[i][:-3]+'fasta']
subprocess.call([args.sam, 'fasta', bams[i]], \
stdout=open(args.r[-1], 'w'), \
stderr=open(args.temp_dir+tempfile_name+'bam2fq_stderr', 'w'))
subprocess.call(['rm'] + bams)
chrom = args.range[:args.range.find(':')]
coord1 = args.range[args.range.find(':')+1:args.range.find('-')]
coord2 = args.range[args.range.find('-')+1:]
precollapse = args.temp_dir+tempfile_name+args.range+'.bed'
coordfile = open(args.temp_dir+tempfile_name+args.range+'.range.bed', 'wt')
coordfile.write('\t'.join([chrom, coord1, coord2]))
coordfile.close()
if subprocess.call(['tabix', '-R', args.temp_dir+tempfile_name+args.range+'.range.bed', args.q], \
stdout=open(precollapse, 'w')):
sys.stderr.write('Query file needs to be a sorted, bgzip-ed, tabix-indexed bed file if range is specified\n')
return 1
else:
ext = '.'+args.q[-3:]
precollapse = args.q
args.r = args.r[0].split(',') if ',' in args.r[0] else args.r
intermediate = []
if args.p:
if not args.quiet: sys.stderr.write('Filtering out reads without promoter-supported TSS\n')
if subprocess.call([sys.executable, path+'bin/pull_starts.py', args.q, args.temp_dir+tempfile_name+'tss.bed']):
return 1
if subprocess.call([args.b, 'intersect', '-a', args.temp_dir+tempfile_name+'tss.bed', '-b', args.p], \
stdout=open(args.temp_dir+tempfile_name+'promoter_intersect.bed', 'w')):
return 1
precollapse = args.o+'promoter_supported'+ext
subprocess.call([sys.executable, path+'bin/psl_reads_from_bed.py', args.temp_dir+tempfile_name+'promoter_intersect.bed', \
args.q, precollapse])
intermediate += [args.temp_dir+tempfile_name+'tss.bed', precollapse]
if args.threeprime:
if not args.quiet: sys.stderr.write('Filtering out reads without TES support\n')
if subprocess.call([sys.executable, path+'bin/pull_starts.py', precollapse, args.temp_dir+tempfile_name+'tes.bed', 'reverse']):
return 1
if subprocess.call([args.b, 'intersect', '-a', args.temp_dir+tempfile_name+'tes.bed', '-b', args.threeprime], \
stdout=open(args.temp_dir+tempfile_name+'tes_intersect.bed', 'w')):
return 1
precollapse = args.o+'tes_supported'+ext
subprocess.call([sys.executable, path+'bin/psl_reads_from_bed.py', args.temp_dir+tempfile_name+'tes_intersect.bed', \
args.q, precollapse])
intermediate += [args.temp_dir+tempfile_name+'tes.bed', precollapse]
collapse_cmd = [sys.executable, path+'bin/collapse_isoforms_precise.py', '-q', precollapse, \
'-m', str(args.max_ends), '-w', args.w, '-n', args.n, '-o', args.o+'firstpass.unfiltered'+ext]
if args.f and not args.no_end_adjustment:
collapse_cmd += ['-f', args.f]
if args.i:
collapse_cmd += ['-i']
if args.quiet:
collapse_cmd += ['--quiet']
if subprocess.call(collapse_cmd):
return 1
# filtering out subset isoforms with insuficient support
if subprocess.call([sys.executable, path+'bin/filter_collapsed_isoforms.py', \
args.o+'firstpass.unfiltered'+ext, args.filter, args.o+'firstpass'+ext, args.w]):
return 1
intermediate += [args.o+'firstpass.unfiltered'+ext]
# rename first-pass isoforms to annotated transcript IDs if they match
if args.f:
if not args.quiet: sys.stderr.write('Renaming isoforms\n')
if subprocess.call([sys.executable, path+'bin/identify_gene_isoform.py', \
args.o+'firstpass'+ext, args.f, args.o+'firstpass.named'+ext]):
sys.exit(1)
subprocess.call(['mv', args.o+'firstpass.named'+ext, args.o+'firstpass'+ext])
if subprocess.call([sys.executable, path+'bin/psl_to_sequence.py', args.o+'firstpass'+ext, \
args.g, args.o+'firstpass.fa']):
return 1
# reassign reads to first-pass isoforms
if not args.quiet: sys.stderr.write('Aligning reads to first-pass isoform reference\n')
count_files, align_files = [], []
alignout = args.temp_dir + tempfile_name +'firstpass.'
try:
if subprocess.call([args.m, '-a', '-t', args.t, '-N', '4', args.o+'firstpass.fa'] + args.r, \
stdout=open(alignout+'sam', 'w'), stderr=open(alignout+'mm2_stderr', 'w')):
return 1
except Exception as e:
sys.stderr.write(str(e)+'\n\n\nMinimap2 error, please check that all file, directory, and executable paths exist\n')
return 1
# count the number of supporting reads for each first-pass isoform
if args.salmon: # use salmon to count
if subprocess.call([args.sam, 'view', '-F', '4', '-h', '-S', alignout+'sam'], \
stdout=open(alignout+'mapped.sam', 'w')):
return 1
subprocess.call(['mv', alignout+'mapped.sam', alignout+'sam'])
subprocess.call([args.salmon, 'quant', '-t', args.o+'firstpass.fa', '-o', alignout+'salmon', \
'-p', args.t, '-l', 'U', '-a', alignout+'sam'], stderr=open(alignout+'salmon_stderr.txt', 'w'))
count_file = alignout+'salmon/quant.sf'
align_files += [alignout+'sam', alignout+'salmon/quant.sf']
else:
args.quality = '0' if args.trust_ends else args.quality
if args.quality != '0':
subprocess.call([args.sam, 'view', '-q', args.quality, '-h', '-S', alignout+'sam'], \
stdout=open(alignout+'q.sam', 'w'), stderr=open(alignout+'q.samtools_stderr', 'w'))
align_files += [alignout+'sam']
else:
subprocess.call(['mv', alignout+'sam', alignout+'q.sam'])
count_cmd = [sys.executable, path+'bin/count_sam_transcripts.py', '-s', alignout+'q.sam', \
'-o', alignout+'q.counts', '-t', args.t, '--quality', args.quality]
if args.stringent:
count_cmd += ['--stringent', '-i', args.o+'firstpass'+ext]
if args.trust_ends:
count_cmd += ['--trust_ends']
if args.generate_map:
count_cmd += ['--generate_map', args.o+'isoform.read.map.txt']
if subprocess.call(count_cmd):
sys.stderr.write('Failed at counting step for isoform read support\n')
return 1
count_file = alignout+'q.counts'
align_files += [alignout+'q.sam']
subprocess.call([sys.executable, path+'bin/combine_counts.py', count_file, args.o+'firstpass.q.counts'])
if not args.quiet: sys.stderr.write('Filtering isoforms by read coverage\n')
subprocess.call([sys.executable, path+'bin/match_counts.py', args.o+'firstpass.q.counts', \
args.o+'firstpass'+ext, args.s, args.o+'isoforms'+ext])
subprocess.call([sys.executable, path+'bin/psl_to_sequence.py', args.o+'isoforms'+ext, \
args.g, args.o+'isoforms.fa'])
if args.f:
subprocess.call([sys.executable, path+'bin/psl_to_gtf.py', args.o+'isoforms'+ext], \
stdout=open(args.o+'isoforms.gtf', 'w'))
subprocess.call(['rm', '-rf', args.o+'firstpass.fa', alignout+'q.counts'])
if not args.keep_intermediate:
subprocess.call(['rm', args.o+'firstpass.q.counts', args.o+'firstpass'+ext])
subprocess.call(['rm', '-rf'] + glob.glob(args.temp_dir+'*'+tempfile_name+'*') + align_files + intermediate)
return args.o+'isoforms.bed', args.o+'isoforms.fa'
def quantify(isoform_sequences=''):
parser = argparse.ArgumentParser(description='flair-quantify parse options', \
usage='python flair.py quantify -r reads_manifest.tsv -i isoforms.fa [options]')
parser.add_argument('quantify')
required = parser.add_argument_group('required named arguments')
if not isoform_sequences:
required.add_argument('-r', '--reads_manifest', action='store', dest='r', type=str, \
required=True, help='Tab delimited file containing sample id, condition, batch, reads.fq')
required.add_argument('-i', '--isoforms', action='store', dest='i', \
type=str, required=True, help='FastA of FLAIR collapsed isoforms')
else:
required.add_argument('--reads_manifest', action='store', dest='r', type=str, \
required=True, help='Tab delimited file containing sample id, condition, batch, reads.fq')
parser.add_argument('-m', '--minimap2', type=str, default='minimap2', \
action='store', dest='m', help='path to minimap2 if not in $PATH')
parser.add_argument('-t', '--threads', type=int, \
action='store', dest='t', default=4, help='minimap2 number of threads (4)')
parser.add_argument('-sam', '--samtools', action='store', dest='sam', default='samtools', \
help='specify a samtools executable path if not in $PATH if --quality is also used')
parser.add_argument('--quality', type=int, action='store', dest='quality', default=1, \
help='''minimum MAPQ of read assignment to an isoform. If using salmon, all alignments are
used (1)''')
parser.add_argument('-o', '--output', type=str, action='store', dest='o', \
default='counts_matrix.tsv', help='Counts matrix output file name prefix (counts_matrix.tsv)')
parser.add_argument('--salmon', type=str, action='store', dest='salmon', \
default='', help='Path to salmon executable, specify if salmon quantification is desired')
parser.add_argument('--tpm', action='store_true', dest='tpm', default=False, \
help='specify this flag to output additional file with expression in TPM')
parser.add_argument('--trust_ends', default=False, action='store_true', dest='trust_ends', \
help='specify if reads are generated from a long read method with minimal fragmentation')
parser.add_argument('--temp_dir', default='', action='store', dest='temp_dir', \
help='''directory to put temporary files. use "./" to indicate current directory
(default: python tempfile directory)''')
args, unknown = parser.parse_known_args()
if unknown:
sys.stderr.write('Quantify unrecognized arguments: {}\n'.format(' '.join(unknown)))
if isoform_sequences:
args.i = isoform_sequences
args.o += '.counts_matrix.tsv'
try:
import numpy as np
import codecs
except:
sys.stderr.write('Numpy import error. Please pip install numpy. Exiting.\n')
sys.exit(1)
if args.m[-8:] != 'minimap2':
if args.m[-1] == '/':
args.m += 'minimap2'
else:
args.m += '/minimap2'
args.t, args.quality = str(args.t), str(args.quality)
samData = list()
with codecs.open(args.r, "r", encoding='utf-8', errors='ignore') as lines:
for line in lines:
cols = line.rstrip().split('\t')
if len(cols)<4:
sys.stderr.write('Expected 4 columns in manifest.tsv, got %s. Exiting.\n' % len(cols))
return 1
sample, group, batch, readFile = cols
readFileRoot = tempfile.NamedTemporaryFile().name
if args.temp_dir != '':
if not os.path.isdir(args.temp_dir):
subprocess.call(['mkdir', args.temp_dir])
readFileRoot = args.temp_dir + '/' + readFileRoot[readFileRoot.rfind('/')+1:]
samData.append(cols + [readFileRoot + '.sam'])
for num,sample in enumerate(samData,0):
sys.stderr.write("Step 1/3. Aligning sample %s_%s: %s/%s \r" % (sample[0],sample[2],num+1,len(samData)))
mm2_command = [args.m, '-a', '-N', '4', '-t', args.t, args.i, sample[-2]]
try:
if subprocess.call(mm2_command, stdout=open(sample[-1], 'w'), \
stderr=open(sample[-1]+'.mm2_stderr.txt', 'w')):
sys.stderr.write('Check {} file\n'.format(sample[-1]+'.mm2_stderr.txt'))
return 1
except:
sys.stderr.write('''Possible minimap2 error, please check that all file, directory,
and executable paths exist\n''')
return 1
subprocess.call(['rm', sample[-1]+'.mm2_stderr.txt'])
sys.stderr.flush()
if args.quality != '0' and not args.trust_ends and not args.salmon:
if subprocess.call([args.sam, 'view', '-q', args.quality, '-h', '-S', sample[-1]], \
stdout=open(sample[-1]+'.qual.sam', 'w')):
return 1
subprocess.call(['mv', sample[-1]+'.qual.sam', sample[-1]])
countData = dict()
for num,data in enumerate(samData):
sample, group, batch, readFile, samOut = data
sys.stderr.write("Step 2/3. Quantifying isoforms for sample %s_%s: %s/%s \r" % (sample,batch,num+1,len(samData)))
if not args.salmon:
count_cmd = [sys.executable, path+'bin/count_sam_transcripts.py', '-s', samOut, \
'-o', samOut+'.counts.txt', '-t', args.t, '--quality', args.quality]
if args.trust_ends:
count_cmd += ['--trust_ends']
subprocess.call(count_cmd)
for line in open(samOut+'.counts.txt'):
line = line.rstrip().split('\t')
iso, numreads = line[0], line[1]
if iso not in countData: countData[iso] = np.zeros(len(samData))
countData[iso][num] = numreads
else:
subprocess.call([args.salmon, 'quant', '-t', args.i, '-o', samOut[:-4]+'.salmon', \
'-p', args.t, '-l', 'U', '-a', samOut], stderr=open('salmon_stderr.txt', 'w'))
salmonOut = open(samOut[:-4]+'.salmon/quant.sf')
salmonOut.readline() # header
for line in salmonOut:
line = line.rstrip().split('\t')
iso, tpm, numreads = line[0], line[3], line[4]
if iso not in countData: countData[iso] = np.zeros(len(samData))
if args.tpm:
countData[iso][num] = tpm
else:
countData[iso][num] = numreads
subprocess.call(['rm', '-r', samOut[:-4]+'.salmon/', 'salmon_stderr.txt'])
sys.stderr.flush()
subprocess.call(['rm', samOut])
sys.stderr.write("Step 3/3. Writing counts to {} \r".format(args.o))
countMatrix = open(args.o,'w')
countMatrix.write("ids\t%s\n" % "\t".join(["_".join(x[:3]) for x in samData]))
features = sorted(list(countData.keys()))
for f in features:
countMatrix.write("%s\t%s\n" % (f,"\t".join(str(x) for x in countData[f])))
countMatrix.close()
sys.stderr.flush()
sys.stderr.write("\n")
if args.tpm and not args.salmon:
subprocess.call([sys.executable, path+'bin/counts_to_tpm.py', args.o, args.o+'.tpm.tsv'])
return args.o
def diffExp(counts_matrix=''):
parser = argparse.ArgumentParser(description='flair-diffExp parse options', \
usage='python flair.py diffExp -q counts_matrix.tsv --out_dir out_dir [options]')
parser.add_argument('diffExp')
required = parser.add_argument_group('required named arguments')
if not counts_matrix:
required.add_argument('-q', '--counts_matrix', action='store', dest='q', \
type=str, required=True, help='Tab-delimited isoform count matrix from flair quantify module.')
required.add_argument('-o', '--out_dir', action='store', dest='o', \
type=str, required=True, help='Output directory for tables and plots.')
parser.add_argument('-t', '--threads', action='store', dest='t', \
type=int, required=False, default=4, help='Number of threads for parallel DRIMSeq.')
parser.add_argument('-e', '--exp_thresh', action='store', dest='e', type=int, required=False, \
default=10, help='Read count expression threshold. Isoforms in which \
both conditions contain fewer than E reads are filtered out (Default E=10)')
parser.add_argument('-of', '--out_dir_force', action='store_true', dest='of', \
required=False, help='''Specify this argument to force overwriting of files in
an existing output directory''')
args, unknown = parser.parse_known_args()
if unknown:
sys.stderr.write('DiffExp unrecognized arguments: {}\n'.format(' '.join(unknown)))
if counts_matrix:
args.q = counts_matrix
args.o+'.diffExp'
scriptsBin = path + "bin/"
runDE = scriptsBin + "deFLAIR.py"
DEcommand = [sys.executable, '-W ignore', runDE, '--filter', str(args.e), '--threads', \
str(args.t), '--outDir', args.o, '--matrix', args.q]
if args.of:
DEcommand += ['-of']
subprocess.call(DEcommand)
return
def diffSplice(isoforms='', counts_matrix=''):
parser = argparse.ArgumentParser(description='flair-diffSplice parse options', \
usage='python flair.py diffSplice -i isoforms.bed|isoforms.psl -q counts_matrix.tsv [options]')
parser.add_argument('diffExp')
required = parser.add_argument_group('required named arguments')
if not isoforms:
required.add_argument('-i', '--isoforms', action='store', dest='i', required=True, \
type=str, help='isoforms in bed or psl format')
required.add_argument('-q', '--counts_matrix', action='store', dest='q', \
type=str, required=True, help='tab-delimited isoform count matrix from flair quantify module')
parser.add_argument('-o', '--output', action='store', dest='o', default='flair.diffsplice', type=str, \
required=False, help='output file name base for FLAIR isoforms (default: flair.diffsplice)')
parser.add_argument('--test', action='store_true', dest='test', \
required=False, default=False, help='Run DRIMSeq statistical testing')
parser.add_argument('-t', '--threads', action='store', dest='t', \
type=int, required=False, default=1, help='Number of threads DRIMSeq (1)')
parser.add_argument('--drim1', action='store', dest='drim1', type=int, required=False, default=6, \
help='''The minimum number of samples that have coverage over an AS event inclusion/exclusion
for DRIMSeq testing; events with too few samples are filtered out and not tested (6)''')
parser.add_argument('--drim2', action='store', dest='drim2', type=int, required=False, default=3, \
help='''The minimum number of samples expressing the inclusion of an AS event;
events with too few samples are filtered out and not tested (3)''')
parser.add_argument('--drim3', action='store', dest='drim3', type=int, required=False, default=15, \
help='''The minimum number of reads covering an AS event inclusion/exclusion for DRIMSeq testing,
events with too few samples are filtered out and not tested (15)''')
parser.add_argument('--drim4', action='store', dest='drim4', type=int, required=False, default=5, \
help='''The minimum number of reads covering an AS event inclusion for DRIMSeq testing,
events with too few samples are filtered out and not tested (5)''')
parser.add_argument('--batch', action='store_true', dest='batch', required=False, default=False, \
help='''If specified with --test, DRIMSeq will perform batch correction''')
parser.add_argument('--conditionA', action='store', dest='conditionA', required=False, default='', \
help='''Specify one condition corresponding to samples in the counts_matrix to be compared against
condition2; by default, the first two unique conditions are used''')
parser.add_argument('--conditionB', action='store', dest='conditionB', required=False, default='', \
help='''Specify another condition corresponding to samples in the counts_matrix to be compared against
conditionA''')
args, unknown = parser.parse_known_args()
if unknown:
sys.stderr.write('DiffSplice unrecognized arguments: {}\n'.format(' '.join(unknown)))
if isoforms:
args.i = isoforms
args.q = counts_matrix
if args.i[-3:].lower() == 'psl':
subprocess.call([sys.executable, path+'bin/psl_to_bed.py', args.i, args.i+'.bed'])
args.i = args.i+'.bed'
subprocess.call([sys.executable, path+'bin/call_diffsplice_events.py', args.i, args.o, args.q])
subprocess.call([sys.executable, path+'bin/es_as.py', args.i], stdout=open(args.o+'.es.events.tsv','w'))
subprocess.call([sys.executable, path+'bin/es_as_inc_excl_to_counts.py', args.q, args.o+'.es.events.tsv'], \
stdout=open(args.o+'.es.events.quant.tsv','w'))
subprocess.call(['rm', args.o+'.es.events.tsv'])
if args.test or args.drim1 or args.drim2 or args.drim4 or args.drim4:
sys.stderr.write('DRIMSeq testing for each AS event type\n')
drim1, drim2, drim3, drim4 = [str(x) for x in [args.drim1, args.drim2, args.drim3, args.drim4]]
ds_command = [sys.executable, path+'bin/runDS.py', '--threads', str(args.t), \
'--drim1', drim1, '--drim2', drim2, '--drim3', drim3, '--drim4', drim4]
if args.batch:
ds_command += ['--batch']
if args.conditionA:
if not args.conditionB:
sys.stderr.write('Both conditionA and conditionB must be specified, or both left unspecified\n')
return 1
ds_command += ['--conditionA', args.conditionA, '--conditionB', args.conditionB]
with open(args.o+'.stderr.txt', 'w') as ds_stderr:
subprocess.call(ds_command + ['--matrix', args.o+'.es.events.quant.tsv', '--prefix', args.o+'.es'], stderr=ds_stderr)
subprocess.call(ds_command + ['--matrix', args.o+'.alt5.events.quant.tsv', '--prefix', args.o+'.alt5'], stderr=ds_stderr)
subprocess.call(ds_command + ['--matrix', args.o+'.alt3.events.quant.tsv', '--prefix', args.o+'.alt3'], stderr=ds_stderr)
subprocess.call(ds_command + ['--matrix', args.o+'.ir.events.quant.tsv', '--prefix', args.o+'.ir'], stderr=ds_stderr)
return
path = '/'.join(os.path.realpath(__file__).split("/")[:-1])+'/'
if len(sys.argv) < 2:
sys.stderr.write('usage: python flair.py <mode> --help \n')
sys.stderr.write('modes: align, correct, collapse, quantify, diffExp, diffSplice\n')
sys.stderr.write('Multiple modules can be run when specified using numbers, e.g.:\n')
sys.stderr.write('python flair.py 1234 ...')
sys.exit(1)
else:
mode = sys.argv[1].lower()
aligned_reads, corrected_reads, isoforms, isoform_sequences, counts_matrix = [0]*5
if mode == 'align' or '1' in mode:
status = align()
if status == 1:
sys.exit(1)
else:
aligned_reads = status
if mode == 'correct' or '2' in mode:
if aligned_reads:
status = correct(aligned_reads=aligned_reads)
else:
status = correct()
if status == 1:
sys.exit(1)
else:
corrected_reads = status
if mode == 'collapse' or ('3' in mode and '3.5' not in mode):
if corrected_reads:
status = collapse(corrected_reads=corrected_reads)
else:
status = collapse()
if status == 1:
sys.exit(1)
else:
isoforms, isoform_sequences = status
if mode == 'collapse-range' or '3.5' in mode:
from multiprocessing import Pool
tempfile_name = tempfile.NamedTemporaryFile().name
run_id = tempfile_name[tempfile_name.rfind('/')+1:]
if corrected_reads and not aligned_reads:
sys.stderr.write('''Collapse 3.5 run consecutively without align module; will assume {}
to be the name of the aligned reads bam file\n'''.format(corrected_reads[:-18]+'.bam'))
status = collapse_range(corrected_reads=corrected_reads, \
aligned_reads=corrected_reads[:-18]+'.bam')
elif corrected_reads and aligned_reads:
status = collapse_range(corrected_reads=corrected_reads, aligned_reads=aligned_reads)
elif not corrected_reads and aligned_reads:
sys.stderr.write('Correct module not run...\n')
status = collapse_range(corrected_reads=aligned_reads, aligned_reads=aligned_reads)
else:
status = collapse_range()
if status == 1:
sys.exit(1)
else:
isoforms, isoform_sequences = status
mode = mode.replace('3.5', 'x')
if mode == 'quantify' or '4' in mode:
if isoform_sequences:
status = quantify(isoform_sequences=isoform_sequences)
else:
status = quantify()
if status == 1:
sys.exit(1)
else:
counts_matrix = status
if mode == 'diffexp' or '5' in mode:
if counts_matrix:
status = diffExp(counts_matrix=counts_matrix)
else:
status = diffExp()
if status == 1:
sys.exit(1)
if mode == 'diffsplice' or '6' in mode:
if counts_matrix and isoforms:
status = diffSplice(isoforms=isoforms, counts_matrix=counts_matrix)
elif not isoforms and counts_matrix:
sys.stderr.write('DiffSplice run consecutively without collapse module, exiting\n')
sys.exit(1)
else:
status = diffSplice()
if status == 1:
sys.exit(1)
if mode == '--version':
sys.stderr.write('FLAIR v1.5.1\n')
| true | true |
f715cce6602d941edd23731c966b3365ce9f1f13 | 1,536 | py | Python | polling_stations/apps/data_collection/management/commands/import_hertsmere.py | chris48s/UK-Polling-Stations | 4742b527dae94f0276d35c80460837be743b7d17 | [
"BSD-3-Clause"
] | null | null | null | polling_stations/apps/data_collection/management/commands/import_hertsmere.py | chris48s/UK-Polling-Stations | 4742b527dae94f0276d35c80460837be743b7d17 | [
"BSD-3-Clause"
] | null | null | null | polling_stations/apps/data_collection/management/commands/import_hertsmere.py | chris48s/UK-Polling-Stations | 4742b527dae94f0276d35c80460837be743b7d17 | [
"BSD-3-Clause"
] | null | null | null | from data_collection.management.commands import BaseShpStationsShpDistrictsImporter
class Command(BaseShpStationsShpDistrictsImporter):
council_id = 'E07000098'
srid = 27700
districts_srid = 27700
districts_name = 'PollingDistricts'
stations_name = 'PollingStations.shp'
elections = [
'local.hertfordshire.2017-05-04',
'parl.2017-06-08'
]
def district_record_to_dict(self, record):
return {
'internal_council_id': str(record[0]).strip(),
'name': str(record[1]).strip(),
'polling_station_id': str(record[0]).strip(),
}
def format_address(self, record):
address_parts = [record[x].strip() for x in range(3, 7)]
for i, part in enumerate(address_parts):
if part == b'':
address_parts[i] = ''
for i, part in enumerate(address_parts):
if len(part) <= 3 and len(part) > 0:
address_parts[i+1] = part + ' ' + address_parts[i+1]
address_parts[i] = ''
break
address = "\n".join(address_parts)
while "\n\n" in address:
address = address.replace("\n\n", "\n")
return address.strip()
def station_record_to_dict(self, record):
postcode = record[8].strip()
if postcode == b'':
postcode = ''
return {
'internal_council_id': str(record[1]).strip(),
'address' : self.format_address(record),
'postcode': postcode,
}
| 34.133333 | 83 | 0.570313 | from data_collection.management.commands import BaseShpStationsShpDistrictsImporter
class Command(BaseShpStationsShpDistrictsImporter):
council_id = 'E07000098'
srid = 27700
districts_srid = 27700
districts_name = 'PollingDistricts'
stations_name = 'PollingStations.shp'
elections = [
'local.hertfordshire.2017-05-04',
'parl.2017-06-08'
]
def district_record_to_dict(self, record):
return {
'internal_council_id': str(record[0]).strip(),
'name': str(record[1]).strip(),
'polling_station_id': str(record[0]).strip(),
}
def format_address(self, record):
address_parts = [record[x].strip() for x in range(3, 7)]
for i, part in enumerate(address_parts):
if part == b'':
address_parts[i] = ''
for i, part in enumerate(address_parts):
if len(part) <= 3 and len(part) > 0:
address_parts[i+1] = part + ' ' + address_parts[i+1]
address_parts[i] = ''
break
address = "\n".join(address_parts)
while "\n\n" in address:
address = address.replace("\n\n", "\n")
return address.strip()
def station_record_to_dict(self, record):
postcode = record[8].strip()
if postcode == b'':
postcode = ''
return {
'internal_council_id': str(record[1]).strip(),
'address' : self.format_address(record),
'postcode': postcode,
}
| true | true |
f715cd43b332d5cf3dd6ecd996c97808734feaac | 1,497 | py | Python | superset/db_engine_specs/gsheets.py | ayuanty/superset | 132a8ef2cb55fa6692ea31d5c278f102d6c2886b | [
"Apache-2.0"
] | 1 | 2022-01-03T08:36:11.000Z | 2022-01-03T08:36:11.000Z | superset/db_engine_specs/gsheets.py | ayuanty/superset | 132a8ef2cb55fa6692ea31d5c278f102d6c2886b | [
"Apache-2.0"
] | 63 | 2021-06-12T18:25:14.000Z | 2022-03-21T07:57:02.000Z | superset/db_engine_specs/gsheets.py | ayuanty/superset | 132a8ef2cb55fa6692ea31d5c278f102d6c2886b | [
"Apache-2.0"
] | 1 | 2021-10-01T20:16:18.000Z | 2021-10-01T20:16:18.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Optional
from sqlalchemy.engine.url import URL
from superset import security_manager
from superset.db_engine_specs.sqlite import SqliteEngineSpec
class GSheetsEngineSpec(SqliteEngineSpec):
"""Engine for Google spreadsheets"""
engine = "gsheets"
engine_name = "Google Sheets"
allows_joins = False
allows_subqueries = True
@classmethod
def modify_url_for_impersonation(
cls, url: URL, impersonate_user: bool, username: Optional[str]
) -> None:
if impersonate_user and username is not None:
user = security_manager.find_user(username=username)
if user and user.email:
url.query["subject"] = user.email
| 36.512195 | 70 | 0.741483 |
from typing import Optional
from sqlalchemy.engine.url import URL
from superset import security_manager
from superset.db_engine_specs.sqlite import SqliteEngineSpec
class GSheetsEngineSpec(SqliteEngineSpec):
engine = "gsheets"
engine_name = "Google Sheets"
allows_joins = False
allows_subqueries = True
@classmethod
def modify_url_for_impersonation(
cls, url: URL, impersonate_user: bool, username: Optional[str]
) -> None:
if impersonate_user and username is not None:
user = security_manager.find_user(username=username)
if user and user.email:
url.query["subject"] = user.email
| true | true |
f715cd46be73951aea27a4ea4d8cd743000fd4dd | 1,164 | py | Python | test/functional/p2p_mempool.py | Pirontechv/Bitchain | 7ca7b6a8090f221d6982b09891c19ca5b7ace1d0 | [
"MIT"
] | 1 | 2020-03-13T14:59:52.000Z | 2020-03-13T14:59:52.000Z | test/functional/p2p_mempool.py | Pirontechv/Bitchain | 7ca7b6a8090f221d6982b09891c19ca5b7ace1d0 | [
"MIT"
] | null | null | null | test/functional/p2p_mempool.py | Pirontechv/Bitchain | 7ca7b6a8090f221d6982b09891c19ca5b7ace1d0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test p2p mempool message.
Test that nodes are disconnected if they send mempool messages when bloom
filters are not enabled.
"""
from test_framework.mininode import *
from test_framework.test_framework import BitchainTestFramework
from test_framework.util import *
class P2PMempoolTests(BitchainTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-peerbloomfilters=0"]]
def run_test(self):
# Add a p2p connection
self.nodes[0].add_p2p_connection(P2PInterface())
network_thread_start()
self.nodes[0].p2p.wait_for_verack()
#request mempool
self.nodes[0].p2p.send_message(msg_mempool())
self.nodes[0].p2p.wait_for_disconnect()
#mininode must be disconnected at this point
assert_equal(len(self.nodes[0].getpeerinfo()), 0)
if __name__ == '__main__':
P2PMempoolTests().main()
| 32.333333 | 73 | 0.717354 |
from test_framework.mininode import *
from test_framework.test_framework import BitchainTestFramework
from test_framework.util import *
class P2PMempoolTests(BitchainTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-peerbloomfilters=0"]]
def run_test(self):
self.nodes[0].add_p2p_connection(P2PInterface())
network_thread_start()
self.nodes[0].p2p.wait_for_verack()
self.nodes[0].p2p.send_message(msg_mempool())
self.nodes[0].p2p.wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 0)
if __name__ == '__main__':
P2PMempoolTests().main()
| true | true |
f715ce83ac789168b60816ea12ee97e12d21dee1 | 5,162 | py | Python | tinyquery/repeated_util.py | graingert/tinyquery | f26940a2ad240911e278ef7c82e3f14e0f4c5e4e | [
"MIT"
] | 104 | 2015-02-21T22:54:15.000Z | 2022-03-21T11:08:02.000Z | tinyquery/repeated_util.py | graingert/tinyquery | f26940a2ad240911e278ef7c82e3f14e0f4c5e4e | [
"MIT"
] | 14 | 2018-01-30T16:32:09.000Z | 2022-03-02T12:57:11.000Z | tinyquery/repeated_util.py | graingert/tinyquery | f26940a2ad240911e278ef7c82e3f14e0f4c5e4e | [
"MIT"
] | 28 | 2015-09-16T22:42:44.000Z | 2022-01-15T11:51:45.000Z | """Helper functions for dealing with repeated fields.
It comes up in a few places that we need to flatten or unflatten repeated
columns when using them in conjunction with other repeated or scalar fields.
These functions allow us to flatten into non-repeated columns to apply various
operations and then unflatten back into repeated columns afterwards.
"""
from __future__ import absolute_import
from tinyquery import tq_modes
def rebuild_column_values(repetitions, values, result):
"""Rebuild a repeated column from flattened results.
Args:
repetitions: a list of how many repeated values go in a row for
each of the rows to process.
values: a list of all the values that need to be packed into lists
result: a (partial) result list to which the rows will be appended.
Returns:
a list of lists of values representing len(repetitions) rows, each
of which with a number of values corresponding to that row's
entry in repetitions
"""
if len(repetitions) == 0:
return result
curr_repetition = repetitions[0]
# For rows with no values, we supplied a None, so we need to pop
# off one value no matter what. If that value is None, we go back
# to an empty list, otherwise we put the value in a list.
curr_values = normalize_repeated_null(values[:max(curr_repetition, 1)])
return rebuild_column_values(
repetitions[1:],
values[max(curr_repetition, 1):],
result + [curr_values])
def normalize_column_to_length(col, desired_count):
"""Given the value(s) for a column, normalize to a desired length.
If `col` is a scalar, it's duplicated in a list the desired number of
times. If `col` is a list, it must have 0, 1, or the desired number of
elements, in which cases `None` or the single element is duplicated, or
the original list is returned.
"""
desired_count = max(desired_count, 1)
if isinstance(col, list) and len(col) == desired_count:
return col
elif isinstance(col, list):
assert len(col) in (0, 1), (
'Unexpectedly got a row with the incorrect number of '
'repeated values.')
return (col or [None]) * desired_count
else:
return [col] * desired_count
def flatten_column_values(repeated_column_indices, column_values):
"""Take a list of columns and flatten them.
We need to acomplish three things during the flattening:
1. Flatten out any repeated fields.
2. Keep track of how many repeated values were in each row so that we
can go back
3. If there are other columns, duplicate their values so that we have
the same number of entries in all columns after flattening.
Args:
repeated_column_indices: the indices of the columns that
are repeated; if there's more than one repeated column, this
function assumes that we've already checked that the lengths of
these columns will match up, or that they have 0 or 1 element.
column_values: a list containing a list for each column's values.
Returns:
(repetition_counts, flattened_columns): a tuple
repetition_counts: a list containing one number per row,
representing the number of repeated values in that row
flattened_columns: a list containing one list for each column's
values. The list for each column will not contain nested
lists.
"""
# wrapping in list for python 3 support
rows = list(zip(*column_values))
repetition_counts = [
max(max(len(row[idx]) for idx in repeated_column_indices), 1)
for row in rows
]
rows_with_repetition_normalized = [
[
normalize_column_to_length(col, count)
for col in row
]
for row, count in zip(rows, repetition_counts)
]
normalized_columns = zip(*rows_with_repetition_normalized)
flattened_columns = [
[val for arr in col for val in arr]
for col in normalized_columns]
return (repetition_counts, flattened_columns)
def columns_have_allowed_repetition_counts(ref_col, col):
"""Determine if we could select col along with ref_col.
We assume ref_col is repeated. In tinyquery this is allowable if any of
the following is true:
- col is not repeated
- col is repeated but every row has only 0 or 1 element
- col is repeated but every row with more than 1 element matches the number
of elements in ref_col
"""
if col.mode != tq_modes.REPEATED:
return True
ref_counts = [len(val) for val in ref_col.values]
counts = [len(val) for val in col.values]
return all(
rc == c or c in (0, 1) or rc in (0, 1)
for rc, c in zip(ref_counts, counts))
def normalize_repeated_null(value):
"""Normalze the way we represent null in repeated fields.
There's 3 equivalent options: `None`, [], and `[None]`. We chose [] to be
the standard for repeated fields, so this turns any of these into [].
"""
if value is None or value == [None]:
return []
return value
| 38.522388 | 79 | 0.678419 | from __future__ import absolute_import
from tinyquery import tq_modes
def rebuild_column_values(repetitions, values, result):
if len(repetitions) == 0:
return result
curr_repetition = repetitions[0]
curr_values = normalize_repeated_null(values[:max(curr_repetition, 1)])
return rebuild_column_values(
repetitions[1:],
values[max(curr_repetition, 1):],
result + [curr_values])
def normalize_column_to_length(col, desired_count):
desired_count = max(desired_count, 1)
if isinstance(col, list) and len(col) == desired_count:
return col
elif isinstance(col, list):
assert len(col) in (0, 1), (
'Unexpectedly got a row with the incorrect number of '
'repeated values.')
return (col or [None]) * desired_count
else:
return [col] * desired_count
def flatten_column_values(repeated_column_indices, column_values):
rows = list(zip(*column_values))
repetition_counts = [
max(max(len(row[idx]) for idx in repeated_column_indices), 1)
for row in rows
]
rows_with_repetition_normalized = [
[
normalize_column_to_length(col, count)
for col in row
]
for row, count in zip(rows, repetition_counts)
]
normalized_columns = zip(*rows_with_repetition_normalized)
flattened_columns = [
[val for arr in col for val in arr]
for col in normalized_columns]
return (repetition_counts, flattened_columns)
def columns_have_allowed_repetition_counts(ref_col, col):
if col.mode != tq_modes.REPEATED:
return True
ref_counts = [len(val) for val in ref_col.values]
counts = [len(val) for val in col.values]
return all(
rc == c or c in (0, 1) or rc in (0, 1)
for rc, c in zip(ref_counts, counts))
def normalize_repeated_null(value):
if value is None or value == [None]:
return []
return value
| true | true |
f715ceb943279ec375261a9adc1d7aa35db8622f | 12,515 | py | Python | imputena/simple_imputation/linear_regression.py | macarro/imputena | 3a94ae1419a2af0d9707b20546ee078929ce99e8 | [
"MIT"
] | 6 | 2020-04-27T21:21:47.000Z | 2022-03-30T03:02:54.000Z | imputena/simple_imputation/linear_regression.py | macarro/imputena | 3a94ae1419a2af0d9707b20546ee078929ce99e8 | [
"MIT"
] | 1 | 2021-07-01T18:49:27.000Z | 2021-07-01T18:49:27.000Z | imputena/simple_imputation/linear_regression.py | macarro/imputena | 3a94ae1419a2af0d9707b20546ee078929ce99e8 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
from sklearn import linear_model
import logging
def linear_regression(
data=None, dependent=None, predictors=None, regressions='available',
noise=False, inplace=False):
"""Performs simple or multiple linear regression imputation on the data.
First, the regression equation for the dependent variable given the
predictor variables is computed. For this step, all rows that contain a
missing value in either the dependent variable or any of the predictor
variable is ignored via pairwise deletion. Then, missing valued in the
dependent column in imputed using the regression equation. If, in the same
row as a missing value in the dependent variable the value for any
predictor variable is missing, a regression model based on all available
predictors in calculated just to impute those values where the
predictor(s) are missing. This behavior can be changed by assigning to
the parameter regressions the value 'complete'. In this case, rows in
which a predictor variable is missing do not get imputed. If stochastic
regression imputation should be performed, set noise=True. In this
case, a random value is chosen from a normal distribution with the width
of the standard error of the regression model and added to the imputed
value. If the parameter predictors is omitted, all variables other than
the dependent are used as predictors. If the parameter dependent is
omitted, the operation is performed on all columns that contain missing
values.
:param data: The data on which to perform the linear regression imputation.
:type data: pandas.DataFrame
:param dependent: The dependent variable in which the missing values
should be imputed.
:type dependent: String, optional
:param predictors: The predictor variables on which the dependent variable
is dependent.
:type predictors: array-like, optional
:param regressions: If 'available': Impute missing values by modeling a
regression based on all available predictors if some predictors have
missing values themselves. If 'complete': Only impute with a
regression model based on all predictors and leave missing values in
rows in which some predictor value is missing itself unimputed.
:type regressions: {'available', 'complete'}, default 'available'
:param noise: Whether to add noise to the imputed values (stochastic
regression imputation)
:type noise: bool, default False
:param inplace: If True, do operation inplace and return None.
:type inplace: bool, default False
:return: The dataframe with linear regression imputation performed for the
incomplete variable(s) or None if inplace=True.
:rtype: pandas.DataFrame or None
:raises: TypeError, ValueError
"""
# Check if data is a dataframe:
if not isinstance(data, pd.DataFrame):
raise TypeError('The data has to be a DataFrame.')
# Check if the dependent variable is actually a column of the dataframe:
if dependent is not None and dependent not in data.columns:
raise ValueError(
'\'' + dependent + '\' is not a column of the data.')
# Check if each of the predictor variables is actually a column of the
# dataframe:
if predictors is not None:
for column in predictors:
if column not in data.columns:
raise ValueError(
'\'' + column + '\' is not a column of the data.')
# Assign value to do_available_regressions
if regressions == 'available':
do_available_regressions = True
elif regressions == 'complete':
do_available_regressions = False
else:
raise ValueError(regressions + 'could not be understood')
# Assign a reference or copy to res, depending on inplace:
if inplace:
res = data
else:
res = data.copy()
# If dependent is not set, apply the operation to each column that contains
# missing data:
if dependent is None:
for column in data.columns:
if data[column].isna().any():
res.loc[:, :] = linear_regression_one_dependent(
res, column, predictors, do_available_regressions,
noise)
# Otherwise apply the operation to the dependent column only:
else:
res.loc[:, :] = linear_regression_one_dependent(
data, dependent, predictors, do_available_regressions, noise)
# Return dataframe if the operation is not to be performed inplace:
if not inplace:
return res
def linear_regression_one_dependent(
data, dependent, predictors, do_available_regressions, noise):
"""Auxiliary function that performs linear regression imputation for the
dependent column. The difference with linear_regression() is that in
that function dependent can be None, in which case this function is
called for each column containing missing values,
:param data: The data on which to perform the linear regression imputation.
:type data: pandas.DataFrame
:param dependent: The dependent variable in which the missing values
should be imputed.
:type dependent: String
:param predictors: The predictor variables on which the dependent variable
is dependent.
:type predictors: array-like
:param do_available_regressions: Whether to do regressions for all
available predictor combinations or only on complete ones
:type do_available_regressions: bool
:param noise: Whether to add noise to the imputed values (stochastic
regression imputation)
:type noise: bool
:return: The dataframe with linear regression imputation performed for the
incomplete variable.
:rtype: pandas.DataFrame
"""
# This auxiliary function always returns a copy:
res = data.copy()
# If predictors is None, all variables except for the dependent one are
# considered predictors:
if predictors is None:
predictors = list(data.columns)
predictors.remove(dependent)
# Predictor combination sets and lists
limited_predictors_combs = set()
predictors_combs_done = []
predictors_combs_todo = [tuple(predictors)]
# Perform the operation:
while len(predictors_combs_todo) > 0:
# Select iteration predictors
it_predictors = predictors_combs_todo.pop(0)
# Log iteration beginning:
logging.info('Applying regression imputation with predictors: ' + str(
it_predictors))
# Perform iteration:
res.loc[:, :] = linear_regression_iter(
res, dependent, list(it_predictors), noise,
limited_predictors_combs)
# Update predictor combinations done and to do
predictors_combs_done.append(it_predictors)
if do_available_regressions:
predictors_combs_todo = list(
set(limited_predictors_combs) - set(predictors_combs_done))
# Log iteration end:
logging.info('Predictor combinations done: ' + str(
predictors_combs_done))
logging.info('Predictor combinations to do: ' + str(
predictors_combs_todo))
return res
def linear_regression_iter(
data, dependent, predictors, noise, limited_predictors_combs):
"""Auxiliary function that performs (simple or multiple) linear
regression imputation on the data, for the dependent column only. In rows
that contain a missing value for any predictor variable, the value of the
dependent variable does not get imputed. The operation is always
performed on a copy of the data, which is returned.
:param data: The data on which to perform the linear regression imputation.
:type data: pandas.DataFrame
:param dependent: The dependent variable in which the missing values
should be imputed.
:type dependent: String
:param predictors: The predictor variables on which the dependent variable
is dependent.
:type predictors: array-like
:param noise: Whether to add noise to the imputed value (stochastic
regression imputation)
:type noise: bool
:param limited_predictors_combs: Reference to the set which contains all
limited predictor combinations that are necessary to use because
some predictor had a missing value in some row.
:type limited_predictors_combs: set
:return: A copy of the dataframe with linear regression imputation
performed for the incomplete variable.
:rtype: pandas.DataFrame
"""
# Perform pairwise deletion before calculating the regression
data_pairwise_deleted = data.copy()
variables = predictors.copy()
variables.append(dependent)
data_pairwise_deleted.dropna(subset=variables, inplace=True)
# Calculate the regression:
x = data_pairwise_deleted[predictors]
y = data_pairwise_deleted[dependent]
model = linear_model.LinearRegression()
model.fit(x, y)
# Extract the regression parameters from the model
intercept = model.intercept_
coefs = model.coef_
# Log regression equation:
eq = str(dependent) + ' = ' + str(intercept)
for idx, coef in enumerate(coefs):
eq += ' + ' + str(coef) + '*' + predictors[idx]
logging.info('Regression equation: ' + eq)
# Calculate standard error:
std_error = (model.predict(x) - y).std()
logging.info('Standard error: ' + str(std_error))
# Implementation using apply:
return data.apply(
lambda row: get_imputed_row(
row, dependent, predictors, intercept, coefs, noise, std_error,
limited_predictors_combs),
axis=1, result_type='broadcast')
def get_imputed_row(
row, dependent, predictors, intercept, coefs, noise, std_error,
limited_predictors_combs):
"""Auxiliary function that receives a row of a DataFrame and returns the
same row. If the row contains a missing value for the dependent variable,
it gets imputed according to the regression equation specified by
predictors, intercept and coefs.
:param row: The row for which the missing value should be imputed
:type row: pandas.Series
:param dependent: The dependent variable for which the row might contain a
missing value
:type dependent: String
:param predictors: The predictor variables on which the dependent variable
is dependent.
:type predictors: array-like
:param intercept: The y-intercept of the regression equation.
:type intercept: scalar
:param coefs: The coefficients of the regression equation, in the same
order as the predictors.
:type coefs: array-like,
:param noise: Whether to add noise to the imputed value (stochastic
regression imputation)
:type noise: bool
:param std_error: The standard error of the regression model. Required
if noise=True
:type std_error: scalar
:param limited_predictors_combs: Reference to the set which contains all
limited predictor combinations that are necessary to use because
some predictor had a missing value in some row.
:type limited_predictors_combs: set
:return: The row, with the missing value imputed if it contains one.
:rtype: pandas.Series
"""
res = row.copy()
if pd.isnull(res[dependent]):
# Check whether there are predictors for which the value is NA
na_predictors = tuple(
row[predictors][row[predictors].isnull()].index.to_list())
# If the row contains NA values for one or several predictors,
# add the combination of predictors to na_predictor_combs, in order
# to perform regression without them:
if na_predictors != ():
limited_predictors = tuple(set(predictors) - set(na_predictors))
# Add the limited_predictors to the set only if the combination
# isn't empty:
if limited_predictors != ():
limited_predictors_combs.add(limited_predictors)
# If the row doesn't contain missing values for any predictor, impute:
else:
value = intercept
for idx, coef in enumerate(coefs):
value += coef * row[predictors[idx]]
# If noise == True, add noise (stochastic regression imputation)
if noise:
value += std_error * np.random.randn()
res[dependent] = value
return res
| 46.180812 | 79 | 0.696524 | import pandas as pd
import numpy as np
from sklearn import linear_model
import logging
def linear_regression(
data=None, dependent=None, predictors=None, regressions='available',
noise=False, inplace=False):
if not isinstance(data, pd.DataFrame):
raise TypeError('The data has to be a DataFrame.')
if dependent is not None and dependent not in data.columns:
raise ValueError(
'\'' + dependent + '\' is not a column of the data.')
if predictors is not None:
for column in predictors:
if column not in data.columns:
raise ValueError(
'\'' + column + '\' is not a column of the data.')
if regressions == 'available':
do_available_regressions = True
elif regressions == 'complete':
do_available_regressions = False
else:
raise ValueError(regressions + 'could not be understood')
if inplace:
res = data
else:
res = data.copy()
if dependent is None:
for column in data.columns:
if data[column].isna().any():
res.loc[:, :] = linear_regression_one_dependent(
res, column, predictors, do_available_regressions,
noise)
else:
res.loc[:, :] = linear_regression_one_dependent(
data, dependent, predictors, do_available_regressions, noise)
if not inplace:
return res
def linear_regression_one_dependent(
data, dependent, predictors, do_available_regressions, noise):
res = data.copy()
if predictors is None:
predictors = list(data.columns)
predictors.remove(dependent)
limited_predictors_combs = set()
predictors_combs_done = []
predictors_combs_todo = [tuple(predictors)]
while len(predictors_combs_todo) > 0:
it_predictors = predictors_combs_todo.pop(0)
logging.info('Applying regression imputation with predictors: ' + str(
it_predictors))
res.loc[:, :] = linear_regression_iter(
res, dependent, list(it_predictors), noise,
limited_predictors_combs)
predictors_combs_done.append(it_predictors)
if do_available_regressions:
predictors_combs_todo = list(
set(limited_predictors_combs) - set(predictors_combs_done))
logging.info('Predictor combinations done: ' + str(
predictors_combs_done))
logging.info('Predictor combinations to do: ' + str(
predictors_combs_todo))
return res
def linear_regression_iter(
data, dependent, predictors, noise, limited_predictors_combs):
data_pairwise_deleted = data.copy()
variables = predictors.copy()
variables.append(dependent)
data_pairwise_deleted.dropna(subset=variables, inplace=True)
x = data_pairwise_deleted[predictors]
y = data_pairwise_deleted[dependent]
model = linear_model.LinearRegression()
model.fit(x, y)
intercept = model.intercept_
coefs = model.coef_
eq = str(dependent) + ' = ' + str(intercept)
for idx, coef in enumerate(coefs):
eq += ' + ' + str(coef) + '*' + predictors[idx]
logging.info('Regression equation: ' + eq)
std_error = (model.predict(x) - y).std()
logging.info('Standard error: ' + str(std_error))
return data.apply(
lambda row: get_imputed_row(
row, dependent, predictors, intercept, coefs, noise, std_error,
limited_predictors_combs),
axis=1, result_type='broadcast')
def get_imputed_row(
row, dependent, predictors, intercept, coefs, noise, std_error,
limited_predictors_combs):
res = row.copy()
if pd.isnull(res[dependent]):
na_predictors = tuple(
row[predictors][row[predictors].isnull()].index.to_list())
if na_predictors != ():
limited_predictors = tuple(set(predictors) - set(na_predictors))
if limited_predictors != ():
limited_predictors_combs.add(limited_predictors)
# If the row doesn't contain missing values for any predictor, impute:
else:
value = intercept
for idx, coef in enumerate(coefs):
value += coef * row[predictors[idx]]
if noise:
value += std_error * np.random.randn()
res[dependent] = value
return res
| true | true |
f715d03e6e3bce6f65c548086393381517fcc295 | 143 | py | Python | packages/pyolite-kernel/py/piplite/piplite/__init__.py | luzpaz/jupyterlite | 4b9d9419918a4ac53bb45b78a3d44d0ca2cd9665 | [
"BSD-3-Clause"
] | null | null | null | packages/pyolite-kernel/py/piplite/piplite/__init__.py | luzpaz/jupyterlite | 4b9d9419918a4ac53bb45b78a3d44d0ca2cd9665 | [
"BSD-3-Clause"
] | null | null | null | packages/pyolite-kernel/py/piplite/piplite/__init__.py | luzpaz/jupyterlite | 4b9d9419918a4ac53bb45b78a3d44d0ca2cd9665 | [
"BSD-3-Clause"
] | null | null | null | """A configurable Python package backed by Pyodide's micropip"""
from .piplite import install
__version__ = "0.1.0a23"
__all__ = ["install"]
| 20.428571 | 64 | 0.734266 | from .piplite import install
__version__ = "0.1.0a23"
__all__ = ["install"]
| true | true |
f715d0e64ba7e66d2862699c039894e7931be245 | 7,609 | py | Python | custom_components/jlrincontrol/services.py | stefferber/homeassistant-jlrincontrol | d11d931e097cc011047b1ad128f9a4340822117c | [
"MIT"
] | 27 | 2020-04-16T06:47:41.000Z | 2022-01-06T01:55:54.000Z | custom_components/jlrincontrol/services.py | stefferber/homeassistant-jlrincontrol | d11d931e097cc011047b1ad128f9a4340822117c | [
"MIT"
] | 40 | 2020-04-16T07:13:08.000Z | 2022-02-08T21:27:49.000Z | custom_components/jlrincontrol/services.py | stefferber/homeassistant-jlrincontrol | d11d931e097cc011047b1ad128f9a4340822117c | [
"MIT"
] | 15 | 2020-04-16T07:09:19.000Z | 2022-03-02T07:06:49.000Z | import inspect
import logging
import asyncio
from urllib import error
from functools import partial
from .const import DOMAIN, JLR_DATA
from .util import convert_temp_value
_LOGGER = logging.getLogger(__name__)
class JLRService:
def __init__(self, hass, config_entry, vin):
self.hass = hass
self.data = hass.data[DOMAIN][config_entry.entry_id][JLR_DATA]
self.vin = vin
self.vehicle = self.data.vehicles[vin]
self.service_code = None
self.service_name = None
self.attributes = self.vehicle.attributes
self.nickname = self.attributes.get("nickname")
async def validate_service_call(self):
if self.service_code and self.service_name:
# Check this is a valid service
if self.check_service_enabled(self.service_code):
# Check no other service calls are awaiting
if not await self.async_get_services():
# OK to make service call
return True
else:
_LOGGER.debug(
"Error calling service {} on vehicle {}. ".format(
self.service_name, self.nickname,
)
+ "Another request is still processing. "
+ "Please try again later."
)
else:
_LOGGER.debug(
"Service {} is not available on vehicle {}".format(
self.service_name, self.nickname,
)
)
else:
_LOGGER.debug(
"Error calling service {}. Invalid parameters".format(
self.service_name
)
)
return False
async def async_call_service(self, **kwargs):
self.service_code = kwargs.get("service_code")
self.service_name = kwargs.get("service_name")
if await self.validate_service_call():
service_kwargs = {}
# populate required parameters for service call
service = getattr(self.vehicle, self.service_name)
for param in inspect.signature(service).parameters:
if param in ["target_value", "target_temp"]:
# convert temp values to car requirements
service_kwargs[param] = convert_temp_value(
self.hass.config.units.temperature_unit,
self.service_code,
kwargs.get(param),
)
else:
service_kwargs[param] = kwargs.get(param)
# Call service
try:
status = await self.hass.async_add_executor_job(
partial(service, **service_kwargs)
)
_LOGGER.info(
"Service {} called on vehicle {}. ".format(
self.service_name, self.nickname,
)
+ "Awaiting feedback on success."
)
# monitor service for success / failure
monitor_status = await self.async_monitor_service_call(
status.get("customerServiceId")
)
return monitor_status
except error.HTTPError as ex:
if ex.code == 401:
_LOGGER.warning(
"Service: {} on vehicle {} ".format(
self.service_name, self.nickname,
)
+ "- not authorised error. Is your pin correct?"
)
else:
_LOGGER.debug(
"Error calling service {} on vehicle {}. ".format(
self.service_name, self.nickname
)
+ "Error is {}".format(ex.msg)
)
except Exception as ex:
_LOGGER.debug(
"Error calling service {} on vehicle {}. ".format(
self.service_name, self.nickname
)
+ "Error is {}".format(ex)
)
else:
_LOGGER.debug(
"Error calling service {}. Invalid parameters".format(
self.service_name
)
)
def check_service_enabled(self, service_code):
"""Check service code is capable and enabled"""
if service_code == "NA":
return True
else:
for service in self.attributes.get("availableServices"):
if service.get("serviceType") == service_code:
if service.get("vehicleCapable") and service.get(
"serviceEnabled"
):
return True
return False
async def async_get_services(self):
"""Check for any exisitng queued service calls to vehicle"""
services = await self.hass.async_add_executor_job(
self.vehicle.get_services
)
if services:
services = services.get("services")
# Check if duplicate
for service in services:
service_id = service.replace(
"/vehicles/{}/services/".format(self.vin), ""
)
# Check service to see if matched to this service call
# TODO: need to test for equivalents like RDL and RDU
try:
status = await self.hass.async_add_executor_job(
partial(self.vehicle.get_service_status, service_id)
)
if status:
if status.get("serviceType") == self.service_code:
return True
except Exception:
pass
return False
else:
return False
async def async_check_service_status(self, service_id):
"""Get status of current service call"""
return await self.hass.async_add_executor_job(
self.vehicle.get_service_status, service_id
)
async def async_monitor_service_call(self, service_id):
result = await self.async_check_service_status(service_id)
if result:
status = result.get("status")
while status == "Started":
_LOGGER.info(
"Checking for {} service call result status.".format(
self.service_name
)
)
await asyncio.sleep(5)
result = await self.async_check_service_status(service_id)
status = result.get("status")
if status and status in ["Successful", "MessageDelivered"]:
_LOGGER.info(
"Service call ({}) to vehicle {} was successful".format(
self.service_name, self.nickname
)
)
return "Successful"
else:
_LOGGER.info(
"InControl service call ({}) to vehicle {} ".format(
self.service_name, self.nickname,
)
+ "failed due to {}. \r\nFull return is {}".format(
result.get("failureReason"), result,
)
)
return status
else:
return None
| 37.29902 | 76 | 0.490078 | import inspect
import logging
import asyncio
from urllib import error
from functools import partial
from .const import DOMAIN, JLR_DATA
from .util import convert_temp_value
_LOGGER = logging.getLogger(__name__)
class JLRService:
def __init__(self, hass, config_entry, vin):
self.hass = hass
self.data = hass.data[DOMAIN][config_entry.entry_id][JLR_DATA]
self.vin = vin
self.vehicle = self.data.vehicles[vin]
self.service_code = None
self.service_name = None
self.attributes = self.vehicle.attributes
self.nickname = self.attributes.get("nickname")
async def validate_service_call(self):
if self.service_code and self.service_name:
if self.check_service_enabled(self.service_code):
if not await self.async_get_services():
return True
else:
_LOGGER.debug(
"Error calling service {} on vehicle {}. ".format(
self.service_name, self.nickname,
)
+ "Another request is still processing. "
+ "Please try again later."
)
else:
_LOGGER.debug(
"Service {} is not available on vehicle {}".format(
self.service_name, self.nickname,
)
)
else:
_LOGGER.debug(
"Error calling service {}. Invalid parameters".format(
self.service_name
)
)
return False
async def async_call_service(self, **kwargs):
self.service_code = kwargs.get("service_code")
self.service_name = kwargs.get("service_name")
if await self.validate_service_call():
service_kwargs = {}
service = getattr(self.vehicle, self.service_name)
for param in inspect.signature(service).parameters:
if param in ["target_value", "target_temp"]:
service_kwargs[param] = convert_temp_value(
self.hass.config.units.temperature_unit,
self.service_code,
kwargs.get(param),
)
else:
service_kwargs[param] = kwargs.get(param)
try:
status = await self.hass.async_add_executor_job(
partial(service, **service_kwargs)
)
_LOGGER.info(
"Service {} called on vehicle {}. ".format(
self.service_name, self.nickname,
)
+ "Awaiting feedback on success."
)
monitor_status = await self.async_monitor_service_call(
status.get("customerServiceId")
)
return monitor_status
except error.HTTPError as ex:
if ex.code == 401:
_LOGGER.warning(
"Service: {} on vehicle {} ".format(
self.service_name, self.nickname,
)
+ "- not authorised error. Is your pin correct?"
)
else:
_LOGGER.debug(
"Error calling service {} on vehicle {}. ".format(
self.service_name, self.nickname
)
+ "Error is {}".format(ex.msg)
)
except Exception as ex:
_LOGGER.debug(
"Error calling service {} on vehicle {}. ".format(
self.service_name, self.nickname
)
+ "Error is {}".format(ex)
)
else:
_LOGGER.debug(
"Error calling service {}. Invalid parameters".format(
self.service_name
)
)
def check_service_enabled(self, service_code):
if service_code == "NA":
return True
else:
for service in self.attributes.get("availableServices"):
if service.get("serviceType") == service_code:
if service.get("vehicleCapable") and service.get(
"serviceEnabled"
):
return True
return False
async def async_get_services(self):
services = await self.hass.async_add_executor_job(
self.vehicle.get_services
)
if services:
services = services.get("services")
for service in services:
service_id = service.replace(
"/vehicles/{}/services/".format(self.vin), ""
)
try:
status = await self.hass.async_add_executor_job(
partial(self.vehicle.get_service_status, service_id)
)
if status:
if status.get("serviceType") == self.service_code:
return True
except Exception:
pass
return False
else:
return False
async def async_check_service_status(self, service_id):
return await self.hass.async_add_executor_job(
self.vehicle.get_service_status, service_id
)
async def async_monitor_service_call(self, service_id):
result = await self.async_check_service_status(service_id)
if result:
status = result.get("status")
while status == "Started":
_LOGGER.info(
"Checking for {} service call result status.".format(
self.service_name
)
)
await asyncio.sleep(5)
result = await self.async_check_service_status(service_id)
status = result.get("status")
if status and status in ["Successful", "MessageDelivered"]:
_LOGGER.info(
"Service call ({}) to vehicle {} was successful".format(
self.service_name, self.nickname
)
)
return "Successful"
else:
_LOGGER.info(
"InControl service call ({}) to vehicle {} ".format(
self.service_name, self.nickname,
)
+ "failed due to {}. \r\nFull return is {}".format(
result.get("failureReason"), result,
)
)
return status
else:
return None
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.